Merge remote-tracking branch 'origin/trunk' into ambari-rest-api-explorer
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/clusters/ClustersManageAccessCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/clusters/ClustersManageAccessCtrl.js
index e1b74aa..3a9ad67 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/clusters/ClustersManageAccessCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/clusters/ClustersManageAccessCtrl.js
@@ -37,7 +37,7 @@
       });
       var orderedRoles = Cluster.orderedRoles;
       var pms = [];
-      for (var key in orderedRoles) {
+      for (var key=0;key<orderedRoles.length;key++) {
         pms.push($scope.permissions[orderedRoles[key]]);
       }
       $scope.permissions = pms;
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
index b3c27dc..69c35c0 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
@@ -29,6 +29,7 @@
   $scope.stackIds = [];
   $scope.allVersions = [];
   $scope.networkLost = false;
+  $scope.stackRepoUpdateLinkExists = true;
   $scope.skipValidation = false;
   $scope.useRedhatSatellite = false;
 
@@ -543,6 +544,23 @@
     })[0];
   };
 
+  /**
+   * Return true if at least one stacks have the repo URL link in the repoinfo.xml
+   * @return boolean
+   * */
+  $scope.setStackRepoUpdateLinkExists = function (versions) {
+    var stackRepoUpdateLinkExists = versions.find(function(_version){
+      return _version.stackRepoUpdateLinkExists;
+    });
+
+    //Found at least one version with the stack repo update link
+    if (stackRepoUpdateLinkExists){
+      $scope.stackRepoUpdateLinkExists = true;
+    } else {
+      $scope.stackRepoUpdateLinkExists = false;
+    }
+  };
+
   $scope.setNetworkIssues = function (versions) {
    $scope.networkLost = !versions.find(function(_version){
      return !_version.stackDefault;
@@ -576,6 +594,7 @@
         $scope.selectedPublicRepoVersion = $scope.activeStackVersion;
         $scope.setVersionSelected($scope.activeStackVersion);
         $scope.setNetworkIssues(versions);
+        $scope.setStackRepoUpdateLinkExists(versions);
         $scope.validateRepoUrl();
         $scope.availableStackRepoList = versions.length == 1 ? [] : versions;
       }
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsEditCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsEditCtrl.js
index c6ba241..a84a97c 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsEditCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsEditCtrl.js
@@ -24,15 +24,15 @@
     os: $t('versions.os')
   };
   $scope.editController = true;
-  $scope.osList = [];
+  $scope.osList = []; // view modal for display repo urls of various OSes
   $scope.skipValidation = false;
   $scope.useRedhatSatellite = false;
-  $scope.selectedOS = 0;
   $scope.upgradeStack = {
     stack_name: '',
     stack_version: '',
     display_name: ''
   };
+  $scope.defaulfOSRepos = {}; // a copy of initial loaded repo info for "changed" check later
 
   $scope.loadStackVersionInfo = function () {
     return Stack.getRepo($routeParams.versionId, $routeParams.stackName).then(function (response) {
@@ -54,8 +54,6 @@
             var skipServices = ['MAPREDUCE2', 'GANGLIA', 'KERBEROS'];
             return skipServices.indexOf(service.name) === -1;
           }) || [];
-      //save default values of repos to check if they were changed
-      $scope.defaulfOSRepos = {};
       response.updateObj.operating_systems.forEach(function(os) {
         $scope.defaulfOSRepos[os.OperatingSystems.os_type] = {};
         os.repositories.forEach(function(repo) {
@@ -84,8 +82,6 @@
       } else {
         $scope.deleteEnabled = $scope.isDeletable();
       }
-      // fetch all repos to display the left menu
-      $scope.fetchRepos();
     });
   };
 
@@ -133,33 +129,34 @@
     });
   };
 
-  $scope.defaulfOSRepos = {};
-
   $scope.save = function () {
     $scope.editVersionDisabled = true;
     delete $scope.updateObj.href;
     $scope.updateObj.operating_systems = [];
-    var updateRepoUrl = false;
+    // check if there is any change in repo list
+    var changed = false;
     angular.forEach($scope.osList, function (os) {
       var savedUrls = $scope.defaulfOSRepos[os.OperatingSystems.os_type];
-      os.OperatingSystems.ambari_managed_repositories = !$scope.useRedhatSatellite;
-      if (os.selected) {
-        var currentRepos = os.repositories;
-        var urlChanged = false;
-        angular.forEach(currentRepos, function (repo) {
-          if (repo.Repositories.base_url != savedUrls[repo.Repositories.repo_id]) {
-            urlChanged = true;
-          }
-        });
-        if (!savedUrls || urlChanged) {
-          updateRepoUrl = true;
+      if (os.selected) { // currently shown?
+        if (savedUrls) { // initially loaded?
+          angular.forEach(os.repositories, function (repo) {
+            if (repo.Repositories.base_url != savedUrls[repo.Repositories.repo_id]) {
+              changed = true; // modified
+            }
+          });
+        } else {
+          changed = true; // added
         }
+        os.OperatingSystems.ambari_managed_repositories = !$scope.useRedhatSatellite;
         $scope.updateObj.operating_systems.push(os);
-      } else if (savedUrls) {
-        updateRepoUrl = true;
+      } else {
+        if (savedUrls) {
+          changed = true; // removed
+        }
       }
     });
-    if (updateRepoUrl && !$scope.deleteEnabled) {
+    // show confirmation when making changes to current/installed repo
+    if (changed && !$scope.deleteEnabled) {
       ConfirmationModal.show(
           $t('versions.changeBaseURLConfirmation.title'),
           $t('versions.changeBaseURLConfirmation.message'),
@@ -342,64 +339,5 @@
     return hasErrors;
   };
 
-
-  // add all repos list
-  $scope.filter = {
-    version: '',
-    cluster: {
-      options: [],
-      current: null
-    }
-  };
-
-  $scope.pagination = {
-    totalRepos: 100,
-    maxVisiblePages: 1,
-    itemsPerPage: 100,
-    currentPage: 1
-  };
-  $scope.allRepos = [];
-  $scope.stackVersions = [];
-
-
-
-  /**
-   *  Formatted object to display all repos:
-   *
-   *  [{ 'name': 'HDP-2.3',
-   *     'repos': ['2.3.6.0-2343', '2.3.4.1', '2.3.4.0-56']
-   *   },
-   *   { 'name': 'HDP-2.2',
-   *     'repos': ['2.2.6.0', '2.2.4.5', '2.2.4.0']
-   *   }
-   *  ]
-   *
-   */
-  $scope.fetchRepos = function () {
-    return Stack.allRepos($scope.filter, $scope.pagination).then(function (repos) {
-      $scope.allRepos = repos.items.sort(function(a, b){return a.repository_version < b.repository_version});
-      var existingStackHash = {};
-      var stackVersions = [];
-      angular.forEach($scope.allRepos, function (repo) {
-        var stackVersionName = repo.stack_name + '-' + repo.stack_version;
-        var currentStackVersion = $scope.upgradeStack.stack_name + '-' + $scope.upgradeStack.stack_version;
-        repo.isActive = $scope.actualVersion == repo.repository_version;
-        if (!existingStackHash[stackVersionName]) {
-          existingStackHash[stackVersionName] = true;
-          stackVersions.push({
-            'name': stackVersionName,
-            'isOpened': stackVersionName == currentStackVersion,
-            'repos': [repo]
-          });
-        } else {
-          if (stackVersions[stackVersions.length -1].repos) {
-            stackVersions[stackVersions.length -1].repos.push(repo);
-          }
-        }
-      });
-      $scope.stackVersions = stackVersions;
-    });
-  };
-
   $scope.loadStackVersionInfo();
 }]);
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js
index fba8538..fd2c6e5 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/i18n.config.js
@@ -307,7 +307,7 @@
     'users.showAll': 'Show all users',
     'users.showAdmin': 'Show only admin users',
     'users.groupMembership': 'Group Membership',
-    'users.userNameTip': 'Only alpha-numeric characters, up to 80 characters',
+    'users.userNameTip': 'Maximum length is 80 characters. \\, &, |, <, >, ` are not allowed.',
 
     'users.changeStatusConfirmation.title': 'Change Status',
     'users.changeStatusConfirmation.message': 'Are you sure you want to change status for user "{{userName}}" to {{status}}?',
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Stack.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Stack.js
index e028906..b496987 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Stack.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/services/Stack.js
@@ -88,7 +88,7 @@
     },
 
     allPublicStackVersions: function() {
-      var url = '/version_definitions?fields=VersionDefinition/stack_default,operating_systems/repositories/Repositories/*,VersionDefinition/stack_services,VersionDefinition/repository_version' +
+      var url = '/version_definitions?fields=VersionDefinition/stack_default,VersionDefinition/stack_repo_update_link_exists,operating_systems/repositories/Repositories/*,VersionDefinition/stack_services,VersionDefinition/repository_version' +
         '&VersionDefinition/show_available=true';
       var deferred = $q.defer();
       $http.get(Settings.baseUrl + url, {mock: 'version/versions.json'})
@@ -100,6 +100,7 @@
               stackName: version.VersionDefinition.stack_name,
               stackVersion: version.VersionDefinition.stack_version,
               stackDefault: version.VersionDefinition.stack_default,
+              stackRepoUpdateLinkExists: version.VersionDefinition.stack_repo_update_link_exists,
               stackNameVersion:  version.VersionDefinition.stack_name + '-' + version.VersionDefinition.stack_version,
               displayName: version.VersionDefinition.stack_name + '-' + version.VersionDefinition.repository_version.split('-')[0], //HDP-2.3.4.0
               displayNameFull: version.VersionDefinition.stack_name + '-' + version.VersionDefinition.repository_version, //HDP-2.3.4.0-23
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/views/main.html b/ambari-admin/src/main/resources/ui/admin-web/app/views/main.html
index d62ae15..3bdb80e 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/views/main.html
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/views/main.html
@@ -18,51 +18,79 @@
 <div class="panel panel-default mainpage">
   <div class="panel-body">
     <h1>{{'main.title' | translate}}</h1>
+
     <div ng-if="isLoaded" id="main-operations-boxes" class="row thumbnails">
       <p ng-hide="cluster">{{'main.noClusterDescription' | translate}}</p>
+
       <p ng-show="cluster">{{'main.hasClusterDescription' | translate}}</p>
 
-        <!--Clusters-->
+      <!--Clusters-->
       <div ng-show="cluster" class="col-sm-11 thumbnail">
         <h4 class="title">{{'main.operateCluster.title' | translate}}</h4>
+
         <div class="description">{{'main.operateCluster.description' | translate}}</div>
         <div class="glyphicon glyphicon-cloud"></div>
         <div class="buttons">
         <span ng-class="{active: isActive('clusters.manageAccess')}">
-          <a ng-show="cluster.Clusters.provisioning_state != 'INSTALLED'" href class="btn btn-primary permission-button" ng-disabled="cluster.Clusters.provisioning_state != 'INSTALLED' ">{{'main.operateCluster.manageRoles' | translate}}</a>
-          <a ng-show="cluster.Clusters.provisioning_state == 'INSTALLED'" href="#/clusters/{{cluster.Clusters.cluster_name}}/userAccessList" class="btn btn-primary permission-button" ng-disabled="cluster.Clusters.provisioning_state != 'INSTALLED' ">{{'main.operateCluster.manageRoles' | translate}}</a>
+          <a ng-show="cluster.Clusters.provisioning_state != 'INSTALLED'" href class="btn btn-primary permission-button"
+             ng-disabled="cluster.Clusters.provisioning_state != 'INSTALLED' ">{{'main.operateCluster.manageRoles' |
+            translate}}</a>
+          <a ng-show="cluster.Clusters.provisioning_state == 'INSTALLED'"
+             href="#/clusters/{{cluster.Clusters.cluster_name}}/userAccessList"
+             class="btn btn-primary permission-button"
+             ng-disabled="cluster.Clusters.provisioning_state != 'INSTALLED' ">{{'main.operateCluster.manageRoles' |
+            translate}}</a>
         </span>
         <span>
-          <a ng-show="cluster.Clusters.provisioning_state != 'INSTALLED'" href class="btn btn-primary go-dashboard-button" ng-disabled="cluster.Clusters.provisioning_state != 'INSTALLED' ">{{'common.goToDashboard' | translate}}</a>
-          <a ng-show="cluster.Clusters.provisioning_state == 'INSTALLED'" href="{{fromSiteRoot('/#/main/dashboard/metrics')}}" class="btn btn-primary go-dashboard-button" ng-disabled="cluster.Clusters.provisioning_state != 'INSTALLED' ">{{'common.goToDashboard' | translate}}</a>
+          <a ng-show="cluster.Clusters.provisioning_state != 'INSTALLED'" href
+             class="btn btn-primary go-dashboard-button"
+             ng-disabled="cluster.Clusters.provisioning_state != 'INSTALLED' ">{{'common.goToDashboard' |
+            translate}}</a>
+          <a ng-show="cluster.Clusters.provisioning_state == 'INSTALLED'"
+             href="{{fromSiteRoot('/#/main/dashboard/metrics')}}" class="btn btn-primary go-dashboard-button"
+             ng-disabled="cluster.Clusters.provisioning_state != 'INSTALLED' ">{{'common.goToDashboard' |
+            translate}}</a>
         </span>
         </div>
       </div>
       <div ng-hide="cluster" class="col-sm-11 thumbnail">
         <h4 class="title">{{'main.createCluster.title' | translate}}</h4>
+
         <div class="description">{{'main.createCluster.description' | translate}}</div>
         <div class="glyphicon glyphicon-cloud"></div>
-        <div class="buttons"> <a href="{{fromSiteRoot('/#/installer/step0')}}" class="btn btn-primary create-cluster-button">{{'main.createCluster.launchInstallWizard' | translate}}</a></div>
+        <div class="buttons"><a href="{{fromSiteRoot('/#/installer/step0')}}"
+                                class="btn btn-primary create-cluster-button">{{'main.createCluster.launchInstallWizard'
+          | translate}}</a></div>
       </div>
 
-        <!--Manage Users and groups-->
+      <!--Manage Users and groups-->
       <div class="col-sm-5 thumbnail">
         <h4 class="title">{{'main.manageUsersAndGroups.title' | translate}}</h4>
+
         <div class="description">{{'main.manageUsersAndGroups.description' | translate}}</div>
         <div class="glyphicon glyphicon-user"></div>
         <div class="buttons">
-          <span ng-class="{active: isActive('users.list')}"><link-to route="users.list" class="btn btn-primary userslist-button">{{'common.users' | translate}}</link-to></span>
-          <span ng-class="{active: isActive('groups.list')}"><link-to route="groups.list" class="btn btn-primary groupslist-button">{{'common.groups' | translate}}</link-to></span>
+          <span ng-class="{active: isActive('users.list')}"><link-to route="users.list"
+                                                                     class="btn btn-primary userslist-button">
+            {{'common.users' | translate}}
+          </link-to></span>
+          <span ng-class="{active: isActive('groups.list')}"><link-to route="groups.list"
+                                                                      class="btn btn-primary groupslist-button">
+            {{'common.groups' | translate}}
+          </link-to></span>
         </div>
       </div>
 
-        <!--Deploy Views-->
+      <!--Deploy Views-->
       <div class="col-sm-5 thumbnail">
         <h4 class="title">{{'main.deployViews.title' | translate}}</h4>
+
         <div class="description">{{'main.deployViews.description' | translate}}</div>
         <div class="glyphicon glyphicon-th"></div>
-        <div ng-class="{active: isActive('views.list')}" class="buttons"><link-to route="views.list" class="btn btn-primary viewslist-button">{{'common.views' | translate}}</link-to></div>
+        <div ng-class="{active: isActive('views.list')}" class="buttons">
+          <link-to route="views.list" class="btn btn-primary viewslist-button">{{'common.views' | translate}}</link-to>
         </div>
       </div>
+    </div>
   </div>
 </div>
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html b/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
index 3bee2a1..fe08802 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
@@ -115,8 +115,7 @@
   </div>
 
   <div id="upload-definition-file-panel" ng-if="createController">
-
-    <div class="col-sm-12 big-radio clearfix" ng-class="{'disabled' : networkLost || useRedhatSatellite}">
+    <div class="col-sm-12 big-radio clearfix hide-soft" ng-class="{'disabled' : networkLost || useRedhatSatellite,'visible':stackRepoUpdateLinkExists}">
       <input type="radio" ng-model="selectedOption.index" value="1" ng-change="togglePublicLocalOptionSelect()" ng-disabled="networkLost || useRedhatSatellite">
       <span>{{'versions.usePublic' | translate}}</span>
       <a id="public-disabled-link" href="javascript:void(0);" ng-if="networkLost" ng-click="showPublicRepoDisabledDialog()">{{'versions.networkIssues.networkLost'| translate}}</a>
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/views/users/create.html b/ambari-admin/src/main/resources/ui/admin-web/app/views/users/create.html
index cc5d8d4..80a3b04 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/views/users/create.html
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/views/users/create.html
@@ -34,7 +34,7 @@
         placeholder="{{'users.userName' | translate}}"
         ng-model="user.user_name"
         ng-required="true"
-        ng-pattern="/^\w*$/"
+        ng-pattern="/^[^<>&`|\\]+$/"
         ng-maxlength="80"
         tooltip="{{'users.userNameTip' | translate}}"
         autocomplete="off"
diff --git a/ambari-agent/conf/unix/install-helper.sh b/ambari-agent/conf/unix/install-helper.sh
index 0e32d0a..c30aab1 100644
--- a/ambari-agent/conf/unix/install-helper.sh
+++ b/ambari-agent/conf/unix/install-helper.sh
@@ -70,6 +70,7 @@
   chmod a+x $AMBARI_AGENT_VAR
   
   chmod 1777 $AMBARI_AGENT_VAR/tmp
+  chmod 700 $AMBARI_AGENT_VAR/keys
   chmod 700 $AMBARI_AGENT_VAR/data
 
   #TODO we need this when upgrading from pre 2.4 versions to 2.4, remove this when upgrade from pre 2.4 versions will be
@@ -89,7 +90,7 @@
   rm -f "$PYTHON_WRAPER_TARGET"
 
   AMBARI_PYTHON=""
-  python_binaries=( "/usr/bin/python" "/usr/bin/python2" "/usr/bin/python2.7", "/usr/bin/python2.6" )
+  python_binaries=( "/usr/bin/python" "/usr/bin/python2" "/usr/bin/python2.7" "/usr/bin/python2.6" )
   for python_binary in "${python_binaries[@]}"
   do
     $python_binary -c "import sys ; ver = sys.version_info ; sys.exit(not (ver >= (2,6) and ver<(3,0)))" 1>/dev/null 2>/dev/null
diff --git a/ambari-agent/pom.xml b/ambari-agent/pom.xml
index a57ed64..c1cb056 100644
--- a/ambari-agent/pom.xml
+++ b/ambari-agent/pom.xml
@@ -330,6 +330,7 @@
                     <include>/tools/*.jar</include>
                     <include>/cache/stacks/HDP/2.1.GlusterFS/services/STORM/package/files/wordCount.jar</include>
                     <include>/cache/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar</include>
+                    <include>/cache/stacks/HDP/3.0/hooks/before-START/files/fast-hdfs-resource.jar</include>
                     <include>/cache/common-services/STORM/0.9.1/package/files/wordCount.jar</include>
                   </includes>
                 </source>
diff --git a/ambari-agent/src/main/python/ambari_agent/ActionQueue.py b/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
index 75880c6..1eda5c2 100644
--- a/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
+++ b/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
@@ -498,16 +498,14 @@
 
   def execute_status_command_and_security_status(self, command):
     component_status_result = self.customServiceOrchestrator.requestComponentStatus(command)
-    component_security_status_result = self.customServiceOrchestrator.requestComponentSecurityState(command)
-
-    return command, component_status_result, component_security_status_result
+    return command, component_status_result
 
   def process_status_command_result(self, result):
     '''
     Executes commands of type STATUS_COMMAND
     '''
     try:
-      command, component_status_result, component_security_status_result = result
+      command, component_status_result = result
       cluster = command['clusterName']
       service = command['serviceName']
       component = command['componentName']
@@ -548,9 +546,6 @@
       if self.controller.recovery_manager.enabled():
         result['sendExecCmdDet'] = str(request_execution_cmd)
 
-      # Add security state to the result
-      result['securityState'] = component_security_status_result
-
       if component_extra is not None and len(component_extra) != 0:
         if component_extra.has_key('alerts'):
           result['alerts'] = component_extra['alerts']
diff --git a/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py b/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
index cf48189..95e4712 100644
--- a/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
+++ b/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
@@ -27,6 +27,7 @@
 
 from ambari_commons import OSConst
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
 logger = logging.getLogger(__name__)
 
 content = """
@@ -74,9 +75,8 @@
 
 """.format(ps=os.sep)
 
-
 servicesToPidNames = {
-  'GLUSTERFS' : 'glusterd.pid$',
+  'GLUSTERFS': 'glusterd.pid$',
   'NAMENODE': 'hadoop-{USER}-namenode.pid$',
   'SECONDARY_NAMENODE': 'hadoop-{USER}-secondarynamenode.pid$',
   'DATANODE': 'hadoop-{USER}-datanode.pid$',
@@ -97,13 +97,13 @@
   'KERBEROS_SERVER': 'kadmind.pid',
   'HIVE_SERVER': 'hive-server.pid',
   'HIVE_METASTORE': 'hive.pid',
-  'HIVE_SERVER_INTERACTIVE' : 'hive-interactive.pid',
+  'HIVE_SERVER_INTERACTIVE': 'hive-interactive.pid',
   'MYSQL_SERVER': 'mysqld.pid',
   'HUE_SERVER': '/var/run/hue/supervisor.pid',
   'WEBHCAT_SERVER': 'webhcat.pid',
 }
 
-#Each service, which's pid depends on user should provide user mapping
+# Each service, which's pid depends on user should provide user mapping
 servicesToLinuxUser = {
   'NAMENODE': 'hdfs_user',
   'SECONDARY_NAMENODE': 'hdfs_user',
@@ -120,30 +120,30 @@
 }
 
 pidPathVars = [
-  {'var' : 'glusterfs_pid_dir_prefix',
-   'defaultValue' : '/var/run'},
-  {'var' : 'hadoop_pid_dir_prefix',
-   'defaultValue' : '/var/run/hadoop'},
-  {'var' : 'hadoop_pid_dir_prefix',
-   'defaultValue' : '/var/run/hadoop'},
-  {'var' : 'hbase_pid_dir',
-   'defaultValue' : '/var/run/hbase'},
-  {'var' : 'zk_pid_dir',
-   'defaultValue' : '/var/run/zookeeper'},
-  {'var' : 'oozie_pid_dir',
-   'defaultValue' : '/var/run/oozie'},
-  {'var' : 'hcat_pid_dir',
-   'defaultValue' : '/var/run/webhcat'},
-  {'var' : 'hive_pid_dir',
-   'defaultValue' : '/var/run/hive'},
-  {'var' : 'mysqld_pid_dir',
-   'defaultValue' : '/var/run/mysqld'},
-  {'var' : 'hcat_pid_dir',
-   'defaultValue' : '/var/run/webhcat'},
-  {'var' : 'yarn_pid_dir_prefix',
-   'defaultValue' : '/var/run/hadoop-yarn'},
-  {'var' : 'mapred_pid_dir_prefix',
-   'defaultValue' : '/var/run/hadoop-mapreduce'},
+  {'var': 'glusterfs_pid_dir_prefix',
+   'defaultValue': '/var/run'},
+  {'var': 'hadoop_pid_dir_prefix',
+   'defaultValue': '/var/run/hadoop'},
+  {'var': 'hadoop_pid_dir_prefix',
+   'defaultValue': '/var/run/hadoop'},
+  {'var': 'hbase_pid_dir',
+   'defaultValue': '/var/run/hbase'},
+  {'var': 'zk_pid_dir',
+   'defaultValue': '/var/run/zookeeper'},
+  {'var': 'oozie_pid_dir',
+   'defaultValue': '/var/run/oozie'},
+  {'var': 'hcat_pid_dir',
+   'defaultValue': '/var/run/webhcat'},
+  {'var': 'hive_pid_dir',
+   'defaultValue': '/var/run/hive'},
+  {'var': 'mysqld_pid_dir',
+   'defaultValue': '/var/run/mysqld'},
+  {'var': 'hcat_pid_dir',
+   'defaultValue': '/var/run/webhcat'},
+  {'var': 'yarn_pid_dir_prefix',
+   'defaultValue': '/var/run/hadoop-yarn'},
+  {'var': 'mapred_pid_dir_prefix',
+   'defaultValue': '/var/run/hadoop-mapreduce'},
 ]
 
 
@@ -323,14 +323,37 @@
     if reg_resp and AmbariConfig.AMBARI_PROPERTIES_CATEGORY in reg_resp:
       if not self.has_section(AmbariConfig.AMBARI_PROPERTIES_CATEGORY):
         self.add_section(AmbariConfig.AMBARI_PROPERTIES_CATEGORY)
-      for k,v in reg_resp[AmbariConfig.AMBARI_PROPERTIES_CATEGORY].items():
+      for k, v in reg_resp[AmbariConfig.AMBARI_PROPERTIES_CATEGORY].items():
         self.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, k, v)
         logger.info("Updating config property (%s) with value (%s)", k, v)
     pass
 
-  def get_force_https_protocol(self):
+  def get_force_https_protocol_name(self):
+    """
+    Get forced https protocol name.
+
+    :return: protocol name, PROTOCOL_TLSv1 by default
+    """
     return self.get('security', 'force_https_protocol', default="PROTOCOL_TLSv1")
 
+  def get_force_https_protocol_value(self):
+    """
+    Get forced https protocol value that correspondents to ssl module variable.
+
+    :return: protocol value
+    """
+    import ssl
+    return getattr(ssl, self.get_force_https_protocol_name())
+
+  def get_ca_cert_file_path(self):
+    """
+    Get path to file with trusted certificates.
+
+    :return: trusted certificates file path
+    """
+    return self.get('security', 'ca_cert_path', default="")
+
+
 def isSameHostList(hostlist1, hostlist2):
   is_same = True
 
diff --git a/ambari-agent/src/main/python/ambari_agent/Controller.py b/ambari-agent/src/main/python/ambari_agent/Controller.py
index 78b5c0c..0297f74 100644
--- a/ambari-agent/src/main/python/ambari_agent/Controller.py
+++ b/ambari-agent/src/main/python/ambari_agent/Controller.py
@@ -29,6 +29,7 @@
 import urllib2
 import pprint
 from random import randint
+import re
 import subprocess
 import functools
 
@@ -587,7 +588,9 @@
     Stack Upgrade.
     """
     try:
-      if compare_versions(self.version, "2.1.2") >= 0:
+      version = self.get_version()
+      logger.debug("Ambari Agent version {0}".format(version))
+      if compare_versions(version, "2.1.2") >= 0:
         source_file = "/etc/hadoop/conf/dfs_data_dir_mount.hist"
         destination_file = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist"
         if os.path.exists(source_file) and not os.path.exists(destination_file):
@@ -601,9 +604,16 @@
           return_code = subprocess.call(command, shell=True)
           logger.info("Return code: %d" % return_code)
     except Exception, e:
-      logger.info("Exception in move_data_dir_mount_file(). Error: {0}".format(str(e)))
+      logger.error("Exception in move_data_dir_mount_file(). Error: {0}".format(str(e)))
 
-
+  def get_version(self):
+    version = self.version
+    matches = re.findall(r"[\d+.]+",version)
+    if not matches:
+      logger.warning("No version match result, use original version {0}".format(version))
+      return version
+    else:
+      return matches[0]
 
 def main(argv=None):
   # Allow Ctrl-C
diff --git a/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py b/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
index a67e16e..7dd00de 100644
--- a/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
+++ b/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
@@ -47,7 +47,6 @@
   SCRIPT_TYPE_PYTHON = "PYTHON"
   COMMAND_TYPE = "commandType"
   COMMAND_NAME_STATUS = "STATUS"
-  COMMAND_NAME_SECURITY_STATUS = "SECURITY_STATUS"
   CUSTOM_ACTION_COMMAND = 'ACTIONEXECUTE'
   CUSTOM_COMMAND_COMMAND = 'CUSTOM_COMMAND'
 
@@ -63,7 +62,7 @@
   AMBARI_SERVER_PORT = "ambari_server_port"
   AMBARI_SERVER_USE_SSL = "ambari_server_use_ssl"
 
-  FREQUENT_COMMANDS = [COMMAND_NAME_SECURITY_STATUS, COMMAND_NAME_STATUS]
+  FREQUENT_COMMANDS = [COMMAND_NAME_STATUS]
   DONT_DEBUG_FAILURES_FOR_COMMANDS = FREQUENT_COMMANDS
   REFLECTIVELY_RUN_COMMANDS = FREQUENT_COMMANDS # -- commands which run a lot and often (this increases their speed)
   DONT_BACKUP_LOGS_FOR_COMMANDS = FREQUENT_COMMANDS
@@ -82,7 +81,8 @@
   def __init__(self, config, controller):
     self.config = config
     self.tmp_dir = config.get('agent', 'prefix')
-    self.force_https_protocol = config.get_force_https_protocol()
+    self.force_https_protocol = config.get_force_https_protocol_name()
+    self.ca_cert_file_path = config.get_ca_cert_file_path()
     self.exec_tmp_dir = Constants.AGENT_TMP_DIR
     self.file_cache = FileCache(config)
     self.status_commands_stdout = os.path.join(self.tmp_dir,
@@ -396,7 +396,7 @@
       for py_file, current_base_dir in filtered_py_file_list:
         log_info_on_failure = not command_name in self.DONT_DEBUG_FAILURES_FOR_COMMANDS
         script_params = [command_name, json_path, current_base_dir, tmpstrucoutfile, logger_level, self.exec_tmp_dir,
-                         self.force_https_protocol]
+                         self.force_https_protocol, self.ca_cert_file_path]
         
         if log_out_files:
           script_params.append("-o")
@@ -467,36 +467,6 @@
                           override_output_files=override_output_files)
     return res
 
-  def requestComponentSecurityState(self, command):
-    """
-     Determines the current security state of the component
-     A command will be issued to trigger the security_status check and the result of this check will
-     returned to the caller. If the component lifecycle script has no security_status method the
-     check will return non zero exit code and "UNKNOWN" will be returned.
-    """
-    override_output_files=True # by default, we override status command output
-    if logger.level == logging.DEBUG:
-      override_output_files = False
-    security_check_res = self.runCommand(command, self.status_commands_stdout,
-                                         self.status_commands_stderr, self.COMMAND_NAME_SECURITY_STATUS,
-                                         override_output_files=override_output_files)
-    result = 'UNKNOWN'
-
-    if security_check_res is None:
-      logger.warn("The return value of the security_status check was empty, the security status is unknown")
-    elif 'exitcode' not in security_check_res:
-      logger.warn("Missing 'exitcode' value from the security_status check result, the security status is unknown")
-    elif security_check_res['exitcode'] != 0:
-      logger.debug("The 'exitcode' value from the security_status check result indicated the check routine failed to properly execute, the security status is unknown")
-    elif 'structuredOut' not in security_check_res:
-      logger.warn("Missing 'structuredOut' value from the security_status check result, the security status is unknown")
-    elif 'securityState' not in security_check_res['structuredOut']:
-      logger.warn("Missing 'securityState' value from the security_status check structuredOut data set, the security status is unknown")
-    else:
-      result = security_check_res['structuredOut']['securityState']
-
-    return result
-
   def resolve_script_path(self, base_dir, script):
     """
     Encapsulates logic of script location determination.
diff --git a/ambari-agent/src/main/python/ambari_agent/Hardware.py b/ambari-agent/src/main/python/ambari_agent/Hardware.py
index 8cb8a28..696438e 100644
--- a/ambari-agent/src/main/python/ambari_agent/Hardware.py
+++ b/ambari-agent/src/main/python/ambari_agent/Hardware.py
@@ -45,11 +45,13 @@
   LINUX_PATH_SEP = "/"
 
   def __init__(self, config):
+    logger.info("Initializing host system information.")
     self.hardware = {
       'mounts': Hardware.osdisks()
     }
     self.config = config
     self.hardware.update(Facter(self.config).facterInfo())
+    logger.info("Host system information: %s", self.hardware)
 
   @classmethod
   def _parse_df_line(cls, line):
diff --git a/ambari-agent/src/main/python/ambari_agent/HostInfo.py b/ambari-agent/src/main/python/ambari_agent/HostInfo.py
index 4b7bfd7..5f96df5 100644
--- a/ambari-agent/src/main/python/ambari_agent/HostInfo.py
+++ b/ambari-agent/src/main/python/ambari_agent/HostInfo.py
@@ -68,9 +68,22 @@
     return 'unknown'
 
   def checkLiveServices(self, services, result):
+    is_redhat7_or_higher = False
+    is_redhat = False
+
+    if OSCheck.is_redhat_family():
+      is_redhat = True
+      if int(OSCheck.get_os_major_version()) >= 7:
+        is_redhat7_or_higher = True
+
     for service in services:
       svcCheckResult = {}
-      svcCheckResult['name'] = " or ".join(service)
+      if "ntpd" in service and is_redhat7_or_higher:
+        svcCheckResult['name'] = "chronyd"
+      elif "chronyd" in service and is_redhat:
+        svcCheckResult['name'] = "ntpd"
+      else:
+        svcCheckResult['name'] = " or ".join(service)
       svcCheckResult['status'] = "UNKNOWN"
       svcCheckResult['desc'] = ""
       try:
diff --git a/ambari-agent/src/main/python/ambari_agent/NetUtil.py b/ambari-agent/src/main/python/ambari_agent/NetUtil.py
index 9b29633..fe32efe 100644
--- a/ambari-agent/src/main/python/ambari_agent/NetUtil.py
+++ b/ambari-agent/src/main/python/ambari_agent/NetUtil.py
@@ -29,7 +29,10 @@
 
 logger = logging.getLogger(__name__)
 
-ensure_ssl_using_protocol(AmbariConfig.get_resolved_config().get_force_https_protocol())
+ensure_ssl_using_protocol(
+  AmbariConfig.get_resolved_config().get_force_https_protocol_name(),
+  AmbariConfig.get_resolved_config().get_ca_cert_file_path()
+)
 
 class NetUtil:
 
diff --git a/ambari-agent/src/main/python/ambari_agent/StatusCommandsExecutor.py b/ambari-agent/src/main/python/ambari_agent/StatusCommandsExecutor.py
index 142e7ca..f42e134 100644
--- a/ambari-agent/src/main/python/ambari_agent/StatusCommandsExecutor.py
+++ b/ambari-agent/src/main/python/ambari_agent/StatusCommandsExecutor.py
@@ -83,280 +83,5 @@
   def kill(self, reason=None, can_relaunch=True):
     pass
 
-class MultiProcessStatusCommandsExecutor(StatusCommandsExecutor):
-  def __init__(self, config, actionQueue):
-    self.config = config
-    self.actionQueue = actionQueue
-
-    self.can_relaunch = True
-
-    # used to prevent queues from been used during creation of new one to prevent threads messing up with combination of
-    # old and new queues
-    self.usage_lock = threading.RLock()
-    # protects against simultaneous killing/creating from different threads.
-    self.kill_lock = threading.RLock()
-
-    self.status_command_timeout = int(self.config.get('agent', 'status_command_timeout', 5))
-    self.customServiceOrchestrator = self.actionQueue.customServiceOrchestrator
-
-    self.worker_process = None
-    self.mustDieEvent = multiprocessing.Event()
-    self.timedOutEvent = multiprocessing.Event()
-
-    # multiprocessing stuff that need to be cleaned every time
-    self.mp_result_queue = multiprocessing.Queue()
-    self.mp_result_logs = multiprocessing.Queue()
-    self.mp_task_queue = multiprocessing.Queue()
-
-  def _drain_queue(self, target_queue, max_time=5, max_empty_count=15, read_break=.001):
-    """
-    Read everything that available in queue. Using not reliable multiprocessing.Queue methods(qsize, empty), so contains
-    extremely dumb protection against blocking too much at this method: will try to get all possible items for not more
-    than ``max_time`` seconds; will return after ``max_empty_count`` calls of ``target_queue.get(False)`` that raised
-    ``Queue.Empty`` exception. Notice ``read_break`` argument, with default values this method will be able to read
-    ~4500 ``range(1,10000)`` objects for 5 seconds. So don't fill queue too fast.
-
-    :param target_queue: queue to read from
-    :param max_time: maximum time to spend in this method call
-    :param max_empty_count: maximum allowed ``Queue.Empty`` in a row
-    :param read_break: time to wait before next read cycle iteration
-    :return: list of resulting objects
-    """
-    results = []
-    _empty = 0
-    _start = time.time()
-    with self.usage_lock:
-      try:
-        while (not target_queue.empty() or target_queue.qsize() > 0) and time.time() - _start < max_time and _empty < max_empty_count:
-          try:
-            results.append(target_queue.get(False))
-            _empty = 0
-            time.sleep(read_break) # sleep a little to get more accurate empty and qsize results
-          except Queue.Empty:
-            _empty += 1
-          except IOError:
-            pass
-          except UnicodeDecodeError:
-            pass
-      except IOError:
-        pass
-    return results
-
-  def _log_message(self, level, message, exception=None):
-    """
-    Put log message to logging queue. Must be used only for logging from child process(in _worker_process_target).
-
-    :param level:
-    :param message:
-    :param exception:
-    :return:
-    """
-    result_message = "StatusCommandExecutor reporting at {0}: ".format(time.time()) + message
-    self.mp_result_logs.put((level, result_message, exception))
-
-  def _process_logs(self):
-    """
-    Get all available at this moment logs and prints them to logger.
-    """
-    for level, message, exception in self._drain_queue(self.mp_result_logs):
-      if level == logging.ERROR:
-        logger.debug(message, exc_info=exception)
-      if level == logging.WARN:
-        logger.warn(message)
-      if level == logging.INFO:
-        logger.info(message)
-
-  def _worker_process_target(self):
-    """
-    Internal method that running in separate process.
-    """
-    # cleanup monkey-patching results in child process, as it causing problems
-    import subprocess
-    reload(subprocess)
-    import multiprocessing
-    reload(multiprocessing)
-
-    bind_debug_signal_handlers()
-    self._log_message(logging.INFO, "StatusCommandsExecutor process started")
-
-    # region StatusCommandsExecutor process internals
-    internal_in_queue = Queue.Queue()
-    internal_out_queue = Queue.Queue()
-
-    def _internal_worker():
-      """
-      thread that actually executes status commands
-      """
-      while True:
-        _cmd = internal_in_queue.get()
-        internal_out_queue.put(self.actionQueue.execute_status_command_and_security_status(_cmd))
-
-    worker = threading.Thread(target=_internal_worker)
-    worker.daemon = True
-    worker.start()
-
-    def _internal_process_command(_command):
-      internal_in_queue.put(_command)
-      start_time = time.time()
-      result = None
-      while not self.mustDieEvent.is_set() and not result and time.time() - start_time < self.status_command_timeout:
-        try:
-          result = internal_out_queue.get(timeout=1)
-        except Queue.Empty:
-          pass
-
-      if result:
-        self.mp_result_queue.put(result)
-        return True
-      else:
-        # do not set timed out event twice
-        if not self.timedOutEvent.is_set():
-          self._set_timed_out(_command)
-        return False
-
-    # endregion
-
-    try:
-      while not self.mustDieEvent.is_set():
-        try:
-          command = self.mp_task_queue.get(False)
-        except Queue.Empty:
-          # no command, lets try in other loop iteration
-          time.sleep(.1)
-          continue
-
-        self._log_message(logging.DEBUG, "Running status command for {0}".format(command['componentName']))
-
-        if _internal_process_command(command):
-          self._log_message(logging.DEBUG, "Completed status command for {0}".format(command['componentName']))
-
-    except Exception as e:
-      self._log_message(logging.ERROR, "StatusCommandsExecutor process failed with exception:", e)
-      raise
-
-    self._log_message(logging.INFO, "StatusCommandsExecutor subprocess finished")
-
-  def _set_timed_out(self, command):
-    """
-    Set timeout event and adding log entry for given command.
-
-    :param command:
-    :return:
-    """
-    msg = "Command {0} for {1} is running for more than {2} seconds. Terminating it due to timeout.".format(
-        command['commandType'],
-        command['componentName'],
-        self.status_command_timeout
-    )
-    self._log_message(logging.WARN, msg)
-    self.timedOutEvent.set()
-
-  def put_commands(self, commands):
-    """
-    Put given commands to command executor.
-
-    :param commands: status commands to execute
-    :return:
-    """
-    with self.usage_lock:
-      for command in commands:
-        logger.info("Adding " + command['commandType'] + " for component " + \
-                    command['componentName'] + " of service " + \
-                    command['serviceName'] + " of cluster " + \
-                    command['clusterName'] + " to the queue.")
-        self.mp_task_queue.put(command)
-        logger.debug(pprint.pformat(command))
-
-  def process_results(self):
-    """
-    Process all the results from the SCE worker process.
-    """
-    self._process_logs()
-    results = self._drain_queue(self.mp_result_queue)
-    logger.debug("Drained %s status commands results, ~%s remains in queue", len(results), self.mp_result_queue.qsize())
-    for result in results:
-      try:
-        self.actionQueue.process_status_command_result(result)
-      except UnicodeDecodeError:
-        pass
-
-  @property
-  def need_relaunch(self):
-    """
-    Indicates if process need to be relaunched due to timeout or it is dead or even was not created.
-
-    :return: tuple (bool, str|None) with flag to relaunch and reason of relaunch
-    """
-    if not self.worker_process or not self.worker_process.is_alive():
-      return True, "WORKER_DEAD"
-    elif self.timedOutEvent.is_set():
-      return True, "COMMAND_TIMEOUT"
-    return False, None
-
-  def relaunch(self, reason=None):
-    """
-    Restart status command executor internal process.
-
-    :param reason: reason of restart
-    :return:
-    """
-    with self.kill_lock:
-      logger.info("Relaunching child process reason:" + str(reason))
-      if self.can_relaunch:
-        self.kill(reason)
-        self.worker_process = multiprocessing.Process(target=self._worker_process_target)
-        self.worker_process.start()
-        logger.info("Started process with pid {0}".format(self.worker_process.pid))
-      else:
-        logger.debug("Relaunch does not allowed, can not relaunch")
-
-  def kill(self, reason=None, can_relaunch=True):
-    """
-    Tries to stop command executor internal process for sort time, otherwise killing it. Closing all possible queues to
-    unblock threads that probably blocked on read or write operations to queues. Must be called from threads different
-    from threads that calling read or write methods(get_log_messages, get_results, put_commands).
-
-    :param can_relaunch: indicates if StatusCommandsExecutor can be relaunched after this kill
-    :param reason: reason of killing
-    :return:
-    """
-    with self.kill_lock:
-      self.can_relaunch = can_relaunch
-
-      if not self.can_relaunch:
-        logger.info("Killing without possibility to relaunch...")
-
-      # try graceful stop, otherwise hard-kill
-      if self.worker_process and self.worker_process.is_alive():
-        self.mustDieEvent.set()
-        self.worker_process.join(timeout=3)
-        if self.worker_process.is_alive():
-          os.kill(self.worker_process.pid, signal.SIGKILL)
-          logger.info("Child process killed by -9")
-        else:
-          # get log messages only if we died gracefully, otherwise we will have chance to block here forever, in most cases
-          # this call will do nothing, as all logs will be processed in ActionQueue loop
-          self._process_logs()
-          logger.info("Child process died gracefully")
-      else:
-        logger.info("Child process already dead")
-
-      # close queues and acquire usage lock
-      # closing both sides of pipes here, we need this hack in case of blocking on recv() call
-      self.mp_result_queue.close()
-      self.mp_result_queue._writer.close()
-      self.mp_result_logs.close()
-      self.mp_result_logs._writer.close()
-      self.mp_task_queue.close()
-      self.mp_task_queue._writer.close()
-
-      with self.usage_lock:
-        self.mp_result_queue.join_thread()
-        self.mp_result_queue = multiprocessing.Queue()
-        self.mp_task_queue.join_thread()
-        self.mp_task_queue = multiprocessing.Queue()
-        self.mp_result_logs.join_thread()
-        self.mp_result_logs = multiprocessing.Queue()
-        self.customServiceOrchestrator = self.actionQueue.customServiceOrchestrator
-        self.mustDieEvent.clear()
-        self.timedOutEvent.clear()
+# TODO make reliable MultiProcessStatusCommandsExecutor implementation
+MultiProcessStatusCommandsExecutor = SingleProcessStatusCommandsExecutor
diff --git a/ambari-agent/src/main/python/ambari_agent/alerts/web_alert.py b/ambari-agent/src/main/python/ambari_agent/alerts/web_alert.py
index ef144bb..8ce4405 100644
--- a/ambari-agent/src/main/python/ambari_agent/alerts/web_alert.py
+++ b/ambari-agent/src/main/python/ambari_agent/alerts/web_alert.py
@@ -55,7 +55,10 @@
 
 WebResponse = namedtuple('WebResponse', 'status_code time_millis error_msg')
 
-ensure_ssl_using_protocol(AmbariConfig.get_resolved_config().get_force_https_protocol())
+ensure_ssl_using_protocol(
+    AmbariConfig.get_resolved_config().get_force_https_protocol_name(),
+    AmbariConfig.get_resolved_config().get_ca_cert_file_path()
+)
 
 class WebAlert(BaseAlert):
 
diff --git a/ambari-agent/src/main/python/ambari_agent/hostname.py b/ambari-agent/src/main/python/ambari_agent/hostname.py
index 0f5f069..357c6b0 100644
--- a/ambari-agent/src/main/python/ambari_agent/hostname.py
+++ b/ambari-agent/src/main/python/ambari_agent/hostname.py
@@ -23,6 +23,7 @@
 import urllib2
 import logging
 import traceback
+import sys
 
 logger = logging.getLogger(__name__)
 
@@ -52,12 +53,19 @@
       out, err = osStat.communicate()
       if (0 == osStat.returncode and 0 != len(out.strip())):
         cached_hostname = out.strip()
+        logger.info("Read hostname '{0}' using agent:hostname_script '{1}'".format(cached_hostname, scriptname))
       else:
+        logger.warn("Execution of '{0}' failed with exit code {1}. err='{2}'\nout='{3}'".format(scriptname, osStat.returncode, err.strip(), out.strip()))
         cached_hostname = socket.getfqdn()
+        logger.info("Read hostname '{0}' using socket.getfqdn() as '{1}' failed".format(cached_hostname, scriptname))
     except:
       cached_hostname = socket.getfqdn()
+      logger.warn("Unexpected error while retrieving hostname: '{0}', defaulting to socket.getfqdn()".format(sys.exc_info()))
+      logger.info("Read hostname '{0}' using socket.getfqdn().".format(cached_hostname))
   except:
     cached_hostname = socket.getfqdn()
+    logger.info("agent:hostname_script configuration not defined thus read hostname '{0}' using socket.getfqdn().".format(cached_hostname))
+
   cached_hostname = cached_hostname.lower()
   return cached_hostname
 
diff --git a/ambari-agent/src/packages/tarball/all.xml b/ambari-agent/src/packages/tarball/all.xml
index 363941a..c71ffe9 100644
--- a/ambari-agent/src/packages/tarball/all.xml
+++ b/ambari-agent/src/packages/tarball/all.xml
@@ -44,21 +44,25 @@
   <fileSets>
     <fileSet>
       <directoryMode>755</directoryMode>
+      <fileMode>755</fileMode>
       <directory>src/main/python/ambari_agent</directory>
       <outputDirectory>${agent.install.dir}</outputDirectory>
     </fileSet>
     <fileSet>
       <directoryMode>755</directoryMode>
+      <fileMode>755</fileMode>
       <directory>${project.basedir}/../ambari-common/src/main/python/ambari_commons</directory>
       <outputDirectory>${ambari_commons.install.dir}</outputDirectory>
     </fileSet>
     <fileSet>
       <directoryMode>755</directoryMode>
+      <fileMode>755</fileMode>
       <directory>${resourceManagementSrcLocation}</directory>
       <outputDirectory>${resource_management.install.dir}</outputDirectory>
     </fileSet>
     <fileSet>
       <directoryMode>755</directoryMode>
+      <fileMode>755</fileMode>
       <directory>${project.basedir}/../ambari-common/src/main/python/ambari_jinja2/ambari_jinja2</directory>
       <outputDirectory>${jinja.install.dir}</outputDirectory>
       <excludes>
@@ -67,11 +71,13 @@
     </fileSet>
     <fileSet>
       <directoryMode>755</directoryMode>
+      <fileMode>755</fileMode>
       <directory>${project.basedir}/../ambari-common/src/main/python/ambari_simplejson</directory>
       <outputDirectory>${simplejson.install.dir}</outputDirectory>
     </fileSet>
     <fileSet>
       <directoryMode>755</directoryMode>
+      <fileMode>755</fileMode>
       <directory>src/examples</directory>
       <outputDirectory>${lib.dir}/examples</outputDirectory>
     </fileSet>
@@ -92,7 +98,8 @@
 	  </excludes>
     </fileSet>
     <fileSet>
-      <directoryMode>755</directoryMode>
+      <directoryMode>700</directoryMode>
+      <fileMode>700</fileMode>
       <directory>${empty.dir}</directory>
       <outputDirectory>/var/lib/${project.artifactId}/keys</outputDirectory>
 	  <excludes>
@@ -117,11 +124,13 @@
     </fileSet>
     <fileSet>
       <directoryMode>755</directoryMode>
+      <fileMode>755</fileMode>
       <directory>${target.cache.dir}</directory>
       <outputDirectory>/var/lib/ambari-agent/cache</outputDirectory>
     </fileSet>
     <fileSet>
       <directoryMode>755</directoryMode>
+      <fileMode>755</fileMode>
       <directory>${pluggableStackDefinitionOutput}/custom_actions</directory>
       <outputDirectory>/var/lib/ambari-agent/cache/custom_actions</outputDirectory>
     </fileSet>
diff --git a/ambari-agent/src/test/python/ambari_agent/TestActionQueue.py b/ambari-agent/src/test/python/ambari_agent/TestActionQueue.py
index ab46f96..faa9b81 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestActionQueue.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestActionQueue.py
@@ -988,12 +988,11 @@
 
     dummy_controller.recovery_manager = RecoveryManager(tempfile.mktemp())
 
-    result = (self.status_command, {'exitcode': 0 }, 'UNKNOWN')
+    result = (self.status_command, {'exitcode': 0 })
 
     actionQueue.process_status_command_result(result)
     report = actionQueue.result()
-    expected = {'dummy report': '',
-                'securityState' : 'UNKNOWN'}
+    expected = {'dummy report': ''}
 
     self.assertEqual(len(report['componentStatus']), 1)
     self.assertEqual(report['componentStatus'][0], expected)
@@ -1019,12 +1018,11 @@
 
     dummy_controller.recovery_manager = RecoveryManager(tempfile.mktemp(), True, False)
 
-    result = (self.status_command, {'exitcode': 0 }, 'UNKNOWN')
+    result = (self.status_command, {'exitcode': 0 })
 
     actionQueue.process_status_command_result(result)
     report = actionQueue.result()
     expected = {'dummy report': '',
-                'securityState' : 'UNKNOWN',
                 'sendExecCmdDet': 'True'}
 
     self.assertEqual(len(report['componentStatus']), 1)
@@ -1033,12 +1031,11 @@
     requires_recovery_mock.return_value = True
     command_exists_mock.return_value = True
     
-    result = (self.status_command, {'exitcode': 0 }, 'UNKNOWN')
+    result = (self.status_command, {'exitcode': 0 })
 
     actionQueue.process_status_command_result(result)
     report = actionQueue.result()
     expected = {'dummy report': '',
-                'securityState' : 'UNKNOWN',
                 'sendExecCmdDet': 'False'}
 
     self.assertEqual(len(report['componentStatus']), 1)
@@ -1062,7 +1059,7 @@
       'structuredOut': {'alerts': [ {'name': 'flume_alert'} ] }
     }
     
-    result = (self.status_command_for_alerts, command_return_value, command_return_value)
+    result = (self.status_command_for_alerts, command_return_value)
     
     build_mock.return_value = {'somestatusresult': 'aresult'}
 
diff --git a/ambari-agent/src/test/python/ambari_agent/TestController.py b/ambari-agent/src/test/python/ambari_agent/TestController.py
index 7f5d451..20da81f 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestController.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestController.py
@@ -416,6 +416,20 @@
                         exceptionMessage, str(e))
 
 
+  def test_getVersion(self):
+    self.controller.version = "1.2.3.4_MyAgent"
+    version = self.controller.get_version()
+    self.assertEquals('1.2.3.4', version)
+    self.controller.version = "1.2.3-MyAgent"
+    version = self.controller.get_version()
+    self.assertEquals('1.2.3', version)
+    self.controller.version = "11.2.3-MyAgent"
+    version = self.controller.get_version()
+    self.assertEquals('11.2.3', version)
+    self.controller.version = "11.2.13.10_MyAgent"
+    version = self.controller.get_version()
+    self.assertEquals('11.2.13.10', version)
+
   @patch.object(ExitHelper, "exit")
   @patch.object(threading._Event, "wait")
   @patch("time.sleep")
diff --git a/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py b/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
index 8e5e9a3..c54ffca 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
@@ -569,57 +569,6 @@
     status = orchestrator.requestComponentStatus(status_command)
     self.assertEqual(runCommand_mock.return_value, status)
 
-  @patch.object(CustomServiceOrchestrator, "runCommand")
-  @patch.object(FileCache, "__init__")
-  def test_requestComponentSecurityState(self, FileCache_mock, runCommand_mock):
-    FileCache_mock.return_value = None
-    status_command = {
-      "serviceName" : 'HDFS',
-      "commandType" : "STATUS_COMMAND",
-      "clusterName" : "",
-      "componentName" : "DATANODE",
-      'configurations':{}
-    }
-    dummy_controller = MagicMock()
-    orchestrator = CustomServiceOrchestrator(self.config, dummy_controller)
-    # Test securityState
-    runCommand_mock.return_value = {
-      'exitcode' : 0,
-      'structuredOut' : {'securityState': 'UNSECURED'}
-    }
-
-    status = orchestrator.requestComponentSecurityState(status_command)
-    self.assertEqual('UNSECURED', status)
-
-    # Test case where exit code indicates failure
-    runCommand_mock.return_value = {
-      "exitcode" : 1
-    }
-    status = orchestrator.requestComponentSecurityState(status_command)
-    self.assertEqual('UNKNOWN', status)
-
-  @patch.object(FileCache, "__init__")
-  def test_requestComponentSecurityState_realFailure(self, FileCache_mock):
-    '''
-    Tests the case where the CustomServiceOrchestrator attempts to call a service's security_status
-    method, but fails to do so because the script or method was not found.
-    :param FileCache_mock:
-    :return:
-    '''
-    FileCache_mock.return_value = None
-    status_command = {
-      "serviceName" : 'BOGUS_SERVICE',
-      "commandType" : "STATUS_COMMAND",
-      "clusterName" : "",
-      "componentName" : "DATANODE",
-      'configurations':{}
-    }
-    dummy_controller = MagicMock()
-    orchestrator = CustomServiceOrchestrator(self.config, dummy_controller)
-
-    status = orchestrator.requestComponentSecurityState(status_command)
-    self.assertEqual('UNKNOWN', status)
-
 
   @patch.object(CustomServiceOrchestrator, "get_py_executor")
   @patch.object(CustomServiceOrchestrator, "dump_command_to_json")
diff --git a/ambari-agent/src/test/python/ambari_agent/TestHardware.py b/ambari-agent/src/test/python/ambari_agent/TestHardware.py
index d30020c..5400e26 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestHardware.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestHardware.py
@@ -226,8 +226,10 @@
   @patch.object(FacterLinux, "setMemInfoOutput")
   @patch.object(OSCheck, "get_os_type")
   @patch.object(OSCheck, "get_os_version")
-  def test_facterMemInfoOutput(self, get_os_version_mock, get_os_type_mock, facter_setMemInfoOutput_mock):
+  @patch.object(FacterLinux, "getSystemResourceOverrides")
+  def test_facterMemInfoOutput(self, getSystemResourceOverridesMock, get_os_version_mock, get_os_type_mock, facter_setMemInfoOutput_mock):
 
+    getSystemResourceOverridesMock.return_value = {}
     facter_setMemInfoOutput_mock.return_value = '''
 MemTotal:        1832392 kB
 MemFree:          868648 kB
diff --git a/ambari-agent/src/test/python/ambari_agent/TestShell.py b/ambari-agent/src/test/python/ambari_agent/TestShell.py
index 5dc1899..47923bd 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestShell.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestShell.py
@@ -63,7 +63,7 @@
       test_process = subprocess.Popen(test_cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
       time.sleep(0.3) # Delay to allow subprocess to start
       # Check if processes are running
-      ps_cmd = """ps aux """
+      ps_cmd = """ps auxww """
       ps_process = subprocess.Popen(ps_cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
       (out, err) = ps_process.communicate()
       self.assertTrue(sleep_cmd in out)
diff --git a/ambari-agent/src/test/python/resource_management/TestScript.py b/ambari-agent/src/test/python/resource_management/TestScript.py
index 65f8c2d..d531314 100644
--- a/ambari-agent/src/test/python/resource_management/TestScript.py
+++ b/ambari-agent/src/test/python/resource_management/TestScript.py
@@ -17,27 +17,15 @@
 See the License for the specific language governing permissions and
 limitations under the License.
 '''
-import ConfigParser
-import os
-
-import pprint
-
-from unittest import TestCase
-import threading
-import tempfile
-import time
-from threading import Thread
-
-
 import StringIO
-import sys, logging, pprint
-from ambari_agent import AgentException
+import sys, pprint
 from resource_management.libraries.script import Script
 from resource_management.core.environment import Environment
-from mock.mock import MagicMock, patch
+from mock.mock import patch
+from stacks.utils.RMFTestCase import *
+import logging
 
-
-class TestScript(TestCase):
+class TestScript(RMFTestCase):
 
   def setUp(self):
     # disable stdout
@@ -116,6 +104,40 @@
     self.assertEqual(open_mock.call_count, 3)
     self.assertEqual(Script.structuredOut, {"1": "3", "2": "2"})
 
+  @patch("__builtin__.open")
+  def test_status_commands_clear_structured_out(self, open_mock):
+    """
+    Tests that status commands will clear and stored structured output from prior status commands.
+    :param open_mock: 
+    :return: 
+    """
+    class MagicFile(object):
+      def read(self):
+        return "{}"
+
+      def write(self, data):
+        pass
+
+      def __exit__(self, exc_type, exc_val, exc_tb):
+        pass
+
+      def __enter__(self):
+        return self
+
+    sys.argv = ["", "status", "foo.py", "", "", "INFO", ""]
+    open_mock.side_effect = [MagicFile()]
+
+    try:
+      with Environment(".", test_mode=True) as env:
+        script = Script()
+        Script.structuredOut = { "version" : "old_version" }
+        script.execute()
+    except:
+      pass
+
+    self.assertTrue(open_mock.called)
+    self.assertEquals({}, Script.structuredOut)
+
   def tearDown(self):
     # enable stdout
     sys.stdout = sys.__stdout__
diff --git a/ambari-common/src/main/python/ambari_commons/inet_utils.py b/ambari-common/src/main/python/ambari_commons/inet_utils.py
index d44107d..66f6544 100644
--- a/ambari-common/src/main/python/ambari_commons/inet_utils.py
+++ b/ambari-common/src/main/python/ambari_commons/inet_utils.py
@@ -183,23 +183,42 @@
       return '127.0.0.1'
   return address
 
-def ensure_ssl_using_protocol(protocol):
+def ensure_ssl_using_protocol(protocol="PROTOCOL_TLSv1", ca_certs=None):
   """
   Monkey patching ssl module to force it use tls_v1. Do this in common module to avoid problems with
   PythonReflectiveExecutor.
+
   :param protocol: one of ("PROTOCOL_SSLv2", "PROTOCOL_SSLv3", "PROTOCOL_SSLv23", "PROTOCOL_TLSv1", "PROTOCOL_TLSv1_1", "PROTOCOL_TLSv1_2")
+  :param ca_certs: path to ca_certs file
   :return:
   """
   from functools import wraps
   import ssl
-  if hasattr(ssl.wrap_socket, "_ambari_patched"):
-    return # do not create chain of wrappers, patch only once
-  def sslwrap(func):
-    @wraps(func)
-    def bar(*args, **kw):
-      import ssl
-      kw['ssl_version'] = getattr(ssl, protocol)
-      return func(*args, **kw)
-    bar._ambari_patched = True
-    return bar
-  ssl.wrap_socket = sslwrap(ssl.wrap_socket)
+
+  if not hasattr(ssl.wrap_socket, "_ambari_patched"):
+    def sslwrap(func):
+      @wraps(func)
+      def bar(*args, **kw):
+        import ssl
+        kw['ssl_version'] = getattr(ssl, protocol)
+        if ca_certs and not 'ca_certs' in kw:
+          kw['ca_certs'] = ca_certs
+          kw['cert_reqs'] = ssl.CERT_REQUIRED
+        return func(*args, **kw)
+      bar._ambari_patched = True
+      return bar
+    ssl.wrap_socket = sslwrap(ssl.wrap_socket)
+
+  # python 2.7 stuff goes here
+  if hasattr(ssl, "_create_default_https_context"):
+    if not hasattr(ssl._create_default_https_context, "_ambari_patched"):
+      @wraps(ssl._create_default_https_context)
+      def _create_default_https_context_patched():
+        context = ssl.SSLContext(protocol = getattr(ssl, protocol))
+        if ca_certs:
+          context.load_verify_locations(ca_certs)
+          context.verify_mode = ssl.CERT_REQUIRED
+          context.check_hostname = False
+        return context
+      _create_default_https_context_patched._ambari_patched = True
+      ssl._create_default_https_context = _create_default_https_context_patched
diff --git a/ambari-common/src/main/python/ambari_commons/network.py b/ambari-common/src/main/python/ambari_commons/network.py
index 4c589f3..edb9add 100644
--- a/ambari-common/src/main/python/ambari_commons/network.py
+++ b/ambari-common/src/main/python/ambari_commons/network.py
@@ -42,30 +42,20 @@
     self.sock = ssl.wrap_socket(conn_socket, self.key_file, self.cert_file,
                                 ssl_version=self.ssl_version)
 
-def get_http_connection(host, port, https_enabled=False, ca_certs=None):
+def get_http_connection(host, port, https_enabled=False, ca_certs=None, ssl_version = ssl.PROTOCOL_SSLv23):
   if https_enabled:
-    ssl_version = ssl.PROTOCOL_SSLv23
     if ca_certs:
-      ssl_version = check_ssl_certificate_and_return_ssl_version(host, port, ca_certs)
+      check_ssl_certificate_and_return_ssl_version(host, port, ca_certs, ssl_version)
     return HTTPSConnectionWithCustomSslVersion(host, port, ssl_version)
   else:
     return httplib.HTTPConnection(host, port)
 
-def check_ssl_certificate_and_return_ssl_version(host, port, ca_certs):
+def check_ssl_certificate_and_return_ssl_version(host, port, ca_certs, ssl_version = ssl.PROTOCOL_SSLv23):
   try:
-    # Try with TLSv1 first.
-    ssl_version = ssl.PROTOCOL_TLSv1
     ssl.get_server_certificate((host, port), ssl_version=ssl_version, ca_certs=ca_certs)
   except ssl.SSLError as ssl_error:
-    print_warning_msg("Failed to verify the SSL certificate for https://{0}:{1} with CA certificate in {2} using ssl.PROTOCOL_TLSv1."
-                      " Trying to use less secure ssl.PROTOCOL_SSLv23. Error : {3}".format(host, port, ca_certs, str(ssl_error)))
-    try:
-      # Try with SSLv23 only if TLSv1 failed.
-      ssl_version = ssl.PROTOCOL_SSLv23
-      ssl.get_server_certificate((host, port), ssl_version=ssl_version, ca_certs=ca_certs)
-    except ssl.SSLError as ssl_error:
-      raise Fail("Failed to verify the SSL certificate for https://{0}:{1} with CA certificate in {2}. Error : {3}"
-               .format(host, port, ca_certs, str(ssl_error)))
+    raise Fail("Failed to verify the SSL certificate for https://{0}:{1} with CA certificate in {2}. Error : {3}"
+             .format(host, port, ca_certs, str(ssl_error)))
   return ssl_version
 
 
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
index ce00f0c..facf186 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
@@ -356,11 +356,16 @@
               then the Atlas RPM will not be able to copy its artifacts into /etc/atlas/conf directory and therefore
               prevent Ambari from by copying those unmanaged contents into /etc/atlas/$version/0
               '''
-              parent_dir = os.path.dirname(current_dir)
-              if os.path.exists(parent_dir):
-                Link(conf_dir, to=current_dir)
+              component_list = default("/localComponents", [])
+              if "ATLAS_SERVER" in component_list or "ATLAS_CLIENT" in component_list:
+                Logger.info("Atlas is installed on this host.")
+                parent_dir = os.path.dirname(current_dir)
+                if os.path.exists(parent_dir):
+                  Link(conf_dir, to=current_dir)
+                else:
+                  Logger.info("Will not create symlink from {0} to {1} because the destination's parent dir does not exist.".format(conf_dir, current_dir))
               else:
-                Logger.info("Will not create symlink from {0} to {1} because the destination's parent dir does not exist.".format(conf_dir, current_dir))
+                Logger.info("Will not create symlink from {0} to {1} because Atlas is not installed on this host.".format(conf_dir, current_dir))
             else:
               # Normal path for other packages
               Link(conf_dir, to=current_dir)
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/curl_krb_request.py b/ambari-common/src/main/python/resource_management/libraries/functions/curl_krb_request.py
index 557db58..72bc5c6 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/curl_krb_request.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/curl_krb_request.py
@@ -62,6 +62,7 @@
 def curl_krb_request(tmp_dir, keytab, principal, url, cache_file_prefix,
     krb_exec_search_paths, return_only_http_code, caller_label, user,
     connection_timeout = CONNECTION_TIMEOUT_DEFAULT,
+    ca_certs = None,
     kinit_timer_ms=DEFAULT_KERBEROS_KINIT_TIMER_MS, method = '',body='',header=''):
   """
   Makes a curl request using the kerberos credentials stored in a calculated cache file. The
@@ -84,13 +85,20 @@
   :param caller_label: an identifier to give context into the caller of this module (used for logging)
   :param user: the user to invoke the curl command as
   :param connection_timeout: if specified, a connection timeout for curl (default 10 seconds)
+  :param ca_certs: path to certificates
   :param kinit_timer_ms: if specified, the time (in ms), before forcing a kinit even if the
                          klist cache is still valid.
   :return:
   """
 
   import uuid
-
+  # backward compatibility with old code and management packs, etc. All new code need pass ca_certs explicitly
+  if ca_certs is None:
+    try:
+      from ambari_agent.AmbariConfig import AmbariConfig
+      ca_certs = AmbariConfig.get_resolved_config().get_ca_cert_file_path()
+    except:
+      pass
   # start off false
   is_kinit_required = False
 
@@ -174,13 +182,16 @@
   connection_timeout = int(connection_timeout)
   maximum_timeout = connection_timeout + 2
 
+  ssl_options = ['-k']
+  if ca_certs:
+    ssl_options = ['--cacert', ca_certs]
   try:
     if return_only_http_code:
-      _, curl_stdout, curl_stderr = get_user_call_output(['curl', '--location-trusted', '-k', '--negotiate', '-u', ':', '-b', cookie_file, '-c', cookie_file, '-w',
+      _, curl_stdout, curl_stderr = get_user_call_output(['curl', '--location-trusted'] + ssl_options + ['--negotiate', '-u', ':', '-b', cookie_file, '-c', cookie_file, '-w',
                              '%{http_code}', url, '--connect-timeout', str(connection_timeout), '--max-time', str(maximum_timeout), '-o', '/dev/null'],
                              user=user, env=kerberos_env)
     else:
-      curl_command = ['curl', '--location-trusted', '-k', '--negotiate', '-u', ':', '-b', cookie_file, '-c', cookie_file,
+      curl_command = ['curl', '--location-trusted'] + ssl_options + ['--negotiate', '-u', ':', '-b', cookie_file, '-c', cookie_file,
                       url, '--connect-timeout', str(connection_timeout), '--max-time', str(maximum_timeout)]
       # returns response body
       if len(method) > 0 and len(body) == 0 and len(header) == 0:
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/decorator.py b/ambari-common/src/main/python/resource_management/libraries/functions/decorator.py
index 55cf335..b5b804d 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/decorator.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/decorator.py
@@ -26,13 +26,15 @@
 from resource_management.core.logger import Logger
 
 
-def retry(times=3, sleep_time=1, max_sleep_time=8, backoff_factor=1, err_class=Exception):
+def retry(times=3, sleep_time=1, max_sleep_time=8, backoff_factor=1, err_class=Exception, timeout_func=None):
   """
   Retry decorator for improved robustness of functions.
-  :param times: Number of times to attempt to call the function.
+  :param times: Number of times to attempt to call the function.  Optionally specify the timeout_func.
   :param sleep_time: Initial sleep time between attempts
   :param backoff_factor: After every failed attempt, multiple the previous sleep time by this factor.
   :param err_class: Exception class to handle
+  :param timeout_func: used when the 'times' argument should be computed.  this function should
+         return an integer value that indicates the number of seconds to wait
   :return: Returns the output of the wrapped function.
   """
   def decorator(function):
@@ -42,6 +44,10 @@
       _backoff_factor = backoff_factor
       _err_class = err_class
 
+      if timeout_func is not None:
+        timeout = timeout_func()
+        _times = timeout // sleep_time  # ensure we end up with an integer
+
       while _times > 1:
         _times -= 1
         try:
@@ -49,7 +55,8 @@
         except _err_class, err:
           Logger.info("Will retry %d time(s), caught exception: %s. Sleeping for %d sec(s)" % (_times, str(err), _sleep_time))
           time.sleep(_sleep_time)
-        if(_sleep_time * _backoff_factor <= max_sleep_time):
+
+        if _sleep_time * _backoff_factor <= max_sleep_time:
           _sleep_time *= _backoff_factor
 
       return function(*args, **kwargs)
@@ -57,15 +64,17 @@
   return decorator
 
 
-def safe_retry(times=3, sleep_time=1, max_sleep_time=8, backoff_factor=1, err_class=Exception, return_on_fail=None):
+def safe_retry(times=3, sleep_time=1, max_sleep_time=8, backoff_factor=1, err_class=Exception, return_on_fail=None, timeout_func=None):
   """
   Retry decorator for improved robustness of functions. Instead of error generation on the last try, will return
   return_on_fail value.
-  :param times: Number of times to attempt to call the function.
+  :param times: Number of times to attempt to call the function.  Optionally specify the timeout_func.
   :param sleep_time: Initial sleep time between attempts
   :param backoff_factor: After every failed attempt, multiple the previous sleep time by this factor.
   :param err_class: Exception class to handle
   :param return_on_fail value to return on the last try
+  :param timeout_func: used when the 'times' argument should be computed.  this function should
+         return an integer value that indicates the number of seconds to wait
   :return: Returns the output of the wrapped function.
   """
   def decorator(function):
@@ -76,6 +85,10 @@
       _err_class = err_class
       _return_on_fail = return_on_fail
 
+      if timeout_func is not None:
+        timeout = timeout_func()
+        _times = timeout // sleep_time  # ensure we end up with an integer
+
       while _times > 1:
         _times -= 1
         try:
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/jmx.py b/ambari-common/src/main/python/resource_management/libraries/functions/jmx.py
index 9a4ff5f..dbd0092 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/jmx.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/jmx.py
@@ -23,7 +23,7 @@
 from resource_management.core.logger import Logger
 from resource_management.libraries.functions.get_user_call_output import get_user_call_output
 
-def get_value_from_jmx(qry, property, security_enabled, run_user, is_https_enabled):
+def get_value_from_jmx(qry, property, security_enabled, run_user, is_https_enabled, last_retry=True):
   try:
     if security_enabled:
       cmd = ['curl', '--negotiate', '-u', ':', '-s']
@@ -41,5 +41,6 @@
       data_dict = json.loads(data)
       return data_dict["beans"][0][property]
   except:
-    Logger.logger.exception("Getting jmx metrics from NN failed. URL: " + str(qry))
-    return None
\ No newline at end of file
+    if last_retry:
+      Logger.logger.exception("Getting jmx metrics from NN failed. URL: " + str(qry))
+    return None
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py b/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
index 665a8e4..8a2ff25 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
@@ -51,7 +51,7 @@
   @retry(times=times, sleep_time=sleep_time, backoff_factor=backoff_factor, err_class=Fail)
   def doRetries(hdfs_site, security_enabled, run_user):
     doRetries.attempt += 1
-    active_namenodes, standby_namenodes, unknown_namenodes = get_namenode_states_noretries(hdfs_site, security_enabled, run_user)
+    active_namenodes, standby_namenodes, unknown_namenodes = get_namenode_states_noretries(hdfs_site, security_enabled, run_user, doRetries.attempt == times)
     Logger.info(
       "NameNode HA states: active_namenodes = {0}, standby_namenodes = {1}, unknown_namenodes = {2}".format(
         active_namenodes, standby_namenodes, unknown_namenodes))
@@ -65,7 +65,7 @@
   doRetries.attempt = 0
   return doRetries(hdfs_site, security_enabled, run_user)
 
-def get_namenode_states_noretries(hdfs_site, security_enabled, run_user):
+def get_namenode_states_noretries(hdfs_site, security_enabled, run_user, last_retry=True):
   """
   return format [('nn1', 'hdfs://hostname1:port1'), ('nn2', 'hdfs://hostname2:port2')] , [....], [....]
   """
@@ -102,7 +102,7 @@
 
       jmx_uri = JMX_URI_FRAGMENT.format(protocol, value)
       
-      state = get_value_from_jmx(jmx_uri, 'tag.HAState', security_enabled, run_user, is_https_enabled)
+      state = get_value_from_jmx(jmx_uri, 'tag.HAState', security_enabled, run_user, is_https_enabled, last_retry)
       # If JMX parsing failed
       if not state:
         check_service_cmd = "hdfs haadmin -ns {0} -getServiceState {1}".format(get_nameservice(hdfs_site), nn_unique_id)
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
index c510dac..c80c577 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
@@ -49,7 +49,7 @@
                         xa_audit_db_password, ssl_truststore_password,
                         ssl_keystore_password, api_version=None, stack_version_override = None, skip_if_rangeradmin_down = True,
                         is_security_enabled = False, is_stack_supports_ranger_kerberos = False,
-                        component_user_principal = None, component_user_keytab = None):
+                        component_user_principal = None, component_user_keytab = None, cred_lib_path_override = None, cred_setup_prefix_override = None):
 
   if audit_db_is_enabled and component_driver_curl_source is not None and not component_driver_curl_source.endswith("/None"):
     if previous_jdbc_jar and os.path.isfile(previous_jdbc_jar):
@@ -69,8 +69,10 @@
 
   if policymgr_mgr_url.endswith('/'):
     policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
-  stack_version = get_stack_version(component_select_name)
-  if stack_version_override is not None:
+
+  if stack_version_override is None:
+    stack_version = get_stack_version(component_select_name)
+  else:
     stack_version = stack_version_override
 
   component_conf_dir = conf_dict
@@ -187,7 +189,7 @@
 
     setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, stack_version, credential_file,
               xa_audit_db_password, ssl_truststore_password, ssl_keystore_password,
-              component_user, component_group, java_home)
+              component_user, component_group, java_home, cred_lib_path_override, cred_setup_prefix_override)
 
   else:
     File(format('{component_conf_dir}/ranger-security.xml'),
@@ -207,16 +209,20 @@
       sudo=True)
 
 def setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, stack_version, credential_file, xa_audit_db_password,
-                                ssl_truststore_password, ssl_keystore_password, component_user, component_group, java_home):
+                                ssl_truststore_password, ssl_keystore_password, component_user, component_group, java_home, cred_lib_path_override = None, cred_setup_prefix_override = None):
 
   stack_root = Script.get_stack_root()
   service_name = str(service_name).lower()
-  cred_lib_path = format('{stack_root}/{stack_version}/ranger-{service_name}-plugin/install/lib/*')
-  cred_setup_prefix = (format('{stack_root}/{stack_version}/ranger-{service_name}-plugin/ranger_credential_helper.py'), '-l', cred_lib_path)
 
-  if service_name == 'nifi':
-    cred_lib_path = format('{stack_root}/{stack_version}/{service_name}/ext/ranger/install/lib/*')
-    cred_setup_prefix = (format('{stack_root}/{stack_version}/{service_name}/ext/ranger/scripts/ranger_credential_helper.py'), '-l', cred_lib_path)
+  if cred_lib_path_override is not None:
+    cred_lib_path = cred_lib_path_override
+  else:
+    cred_lib_path = format('{stack_root}/{stack_version}/ranger-{service_name}-plugin/install/lib/*')
+
+  if cred_setup_prefix_override is not None:
+    cred_setup_prefix = cred_setup_prefix_override
+  else:
+    cred_setup_prefix = (format('{stack_root}/{stack_version}/ranger-{service_name}-plugin/ranger_credential_helper.py'), '-l', cred_lib_path)
 
   if audit_db_is_enabled:
     cred_setup = cred_setup_prefix + ('-f', credential_file, '-k', 'auditDBCred', '-v', PasswordString(xa_audit_db_password), '-c', '1')
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py b/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
index ff00a1f..79dc874 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
@@ -78,6 +78,46 @@
 
   return version
 
+def get_component_version_with_stack_selector(stack_selector_path, component_name):
+  """
+   For specific cases where we deal with HDP add on services from a management pack, the version
+   needs to be determined by using the specific stack selector itself.
+   :param stack_selector_path: /usr/bin/hdf-select
+   Comes from the service which calls for this function.
+   :param component_name: Component name as a string necessary to get the version
+   :return: Returns a string if found, e.g., 2.2.1.0-2175, otherwise, returns None
+   This function can be called by custom services, hence should not be removed
+  """
+  version = None
+  out = None
+  code = -1
+  if not stack_selector_path:
+    Logger.error("Stack selector path not provided")
+  elif not os.path.exists(stack_selector_path):
+    Logger.error("Stack selector path does not exist")
+  elif not component_name:
+    Logger.error("Component name not provided")
+  else:
+    tmpfile = tempfile.NamedTemporaryFile()
+
+    get_stack_comp_version_cmd = ""
+    try:
+      # This is necessary because Ubuntu returns "stdin: is not a tty", see AMBARI-8088
+      with open(tmpfile.name, 'r') as file:
+        get_stack_comp_version_cmd = '{0} status {1} > {2}' .format(stack_selector_path, component_name, tmpfile.name)
+        code, stdoutdata = shell.call(get_stack_comp_version_cmd, quiet=True)
+        out = file.read()
+
+      if code != 0 or out is None:
+        raise Exception("Code is nonzero or output is empty")
+
+      Logger.debug("Command: %s\nOutput: %s" % (get_stack_comp_version_cmd, str(out)))
+      matches = re.findall(r"([\d\.]+\-\d+)", out)
+      version = matches[0] if matches and len(matches) > 0 else None
+    except Exception, e:
+      Logger.error("Could not determine stack version for component %s by calling '%s'. Return Code: %s, Output: %s." %
+                   (component_name, get_stack_comp_version_cmd, str(code), str(out)))
+  return version
 
 def get_versions_from_stack_root(stack_root):
   """
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index 5fa9ec4..bcad6c3 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -28,6 +28,7 @@
 import platform
 import inspect
 import tarfile
+import time
 from optparse import OptionParser
 import resource_management
 from ambari_commons import OSCheck, OSConst
@@ -35,6 +36,7 @@
 from ambari_commons.constants import UPGRADE_TYPE_ROLLING
 from ambari_commons.constants import UPGRADE_TYPE_HOST_ORDERED
 from ambari_commons.network import reconfigure_urllib2_opener
+from ambari_commons.inet_utils import resolve_address, ensure_ssl_using_protocol
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from resource_management.libraries.resources import XmlConfig
 from resource_management.libraries.resources import PropertiesFile
@@ -150,6 +152,7 @@
   # Class variable
   tmp_dir = ""
   force_https_protocol = "PROTOCOL_TLSv1"
+  ca_cert_file_path = None
 
   def load_structured_out(self):
     Script.structuredOut = {}
@@ -179,7 +182,7 @@
         json.dump(Script.structuredOut, fp)
     except IOError, err:
       Script.structuredOut.update({"errMsg" : "Unable to write to " + self.stroutfile})
-      
+
   def get_component_name(self):
     """
     To be overridden by subclasses.
@@ -217,7 +220,7 @@
     """
     stack_name = Script.get_stack_name()
     component_name = self.get_component_name()
-    
+
     if component_name and stack_name:
       component_version = get_component_version(stack_name, component_name)
 
@@ -260,15 +263,15 @@
     parser.add_option("-o", "--out-files-logging", dest="log_out_files", action="store_true",
                       help="use this option to enable outputting *.out files of the service pre-start")
     (self.options, args) = parser.parse_args()
-    
+
     self.log_out_files = self.options.log_out_files
-    
+
     # parse arguments
     if len(args) < 6:
      print "Script expects at least 6 arguments"
      print USAGE.format(os.path.basename(sys.argv[0])) # print to stdout
      sys.exit(1)
-     
+
     self.command_name = str.lower(sys.argv[1])
     self.command_data_file = sys.argv[2]
     self.basedir = sys.argv[3]
@@ -276,9 +279,11 @@
     self.load_structured_out()
     self.logging_level = sys.argv[5]
     Script.tmp_dir = sys.argv[6]
-    # optional script argument for forcing https protocol
+    # optional script arguments for forcing https protocol and ca_certs file
     if len(sys.argv) >= 8:
       Script.force_https_protocol = sys.argv[7]
+    if len(sys.argv) >= 9:
+      Script.ca_cert_file_path = sys.argv[8]
 
     logging_level_str = logging._levelNames[self.logging_level]
     Logger.initialize_logger(__name__, logging_level=logging_level_str)
@@ -289,6 +294,16 @@
     if OSCheck.is_windows_family():
       reload_windows_env()
 
+    # !!! status commands re-use structured output files; if the status command doesn't update the
+    # the file (because it doesn't have to) then we must ensure that the file is reset to prevent
+    # old, stale structured output from a prior status command from being used
+    if self.command_name == "status":
+      Script.structuredOut = {}
+      self.put_structured_out({})
+
+    # make sure that script has forced https protocol and ca_certs file passed from agent
+    ensure_ssl_using_protocol(Script.get_force_https_protocol_name(), Script.get_ca_cert_file_path())
+
     try:
       with open(self.command_data_file) as f:
         pass
@@ -308,50 +323,68 @@
       method = self.choose_method_to_execute(self.command_name)
       with Environment(self.basedir, tmp_dir=Script.tmp_dir) as env:
         env.config.download_path = Script.tmp_dir
-        
-        if self.command_name == "start" and not self.is_hook():
-          self.pre_start()
-        
+
+        if not self.is_hook():
+          self.execute_prefix_function(self.command_name, 'pre', env)
+
         method(env)
 
-        if self.command_name == "start" and not self.is_hook():
-          self.post_start()
+        if not self.is_hook():
+          self.execute_prefix_function(self.command_name, 'post', env)
+
     except Fail as ex:
       ex.pre_raise()
       raise
     finally:
       if self.should_expose_component_version(self.command_name):
         self.save_component_version_to_structured_out()
-        
+
+  def execute_prefix_function(self, command_name, afix, env):
+    """
+    Execute action afix (prefix or suffix) based on command_name and afix type
+    example: command_name=start, afix=pre will result in execution of self.pre_start(env) if exists
+    """
+    self_methods = dir(self)
+    method_name = "{0}_{1}".format(afix, command_name)
+    if not method_name in self_methods:
+      Logger.logger.debug("Action afix '{0}' not present".format(method_name))
+      return
+    Logger.logger.debug("Execute action afix: {0}".format(method_name))
+    method = getattr(self, method_name)
+    method(env)
+
   def is_hook(self):
     from resource_management.libraries.script.hook import Hook
     return (Hook in self.__class__.__bases__)
-        
+
   def get_log_folder(self):
     return ""
-  
+
   def get_user(self):
     return ""
 
   def get_pid_files(self):
     return []
-        
-  def pre_start(self):
+
+  def pre_start(self, env=None):
+    """
+    Executed before any start method. Posts contents of relevant *.out files to command execution log.
+    """
     if self.log_out_files:
       log_folder = self.get_log_folder()
       user = self.get_user()
-      
+
       if log_folder == "":
         Logger.logger.warn("Log folder for current script is not defined")
         return
-      
+
       if user == "":
         Logger.logger.warn("User for current script is not defined")
         return
-      
+
       show_logs(log_folder, user, lines_count=COUNT_OF_LAST_LINES_OF_OUT_FILES_LOGGED, mask=OUT_FILES_MASK)
 
-  def post_start(self):
+  def post_start(self, env=None):
     pid_files = self.get_pid_files()
     if pid_files == []:
       Logger.logger.warning("Pid files for current script are not defined")
@@ -366,6 +399,32 @@
 
     Logger.info("Component has started with pid(s): {0}".format(', '.join(pids)))
 
+  def post_stop(self, env):
+    """
+    Executed after completion of every stop method. Waits until component is actually stopped (check is performed using
+     components status() method.
+    """
+    self_methods = dir(self)
+
+    if not 'status' in self_methods:
+      pass
+    status_method = getattr(self, 'status')
+    component_is_stopped = False
+    counter = 0
+    while not component_is_stopped :
+      try:
+        if counter % 100 == 0:
+          Logger.logger.info("Waiting for actual component stop")
+        status_method(env)
+        time.sleep(0.1)
+        counter += 1
+      except ComponentIsNotRunning, e:
+        Logger.logger.debug("'status' reports ComponentIsNotRunning")
+        component_is_stopped = True
+      except ClientComponentHasNoStatus, e:
+        Logger.logger.debug("Client component has no status")
+        component_is_stopped = True
+
   def choose_method_to_execute(self, command_name):
     """
     Returns a callable object that should be executed for a given command.
@@ -375,7 +434,7 @@
       raise Fail("Script '{0}' has no method '{1}'".format(sys.argv[0], command_name))
     method = getattr(self, command_name)
     return method
-  
+
   def get_stack_version_before_packages_installed(self):
     """
     This works in a lazy way (calculates the version first time and stores it). 
@@ -392,7 +451,7 @@
     if not Script.stack_version_from_distro_select and component_name:
       from resource_management.libraries.functions import stack_select
       Script.stack_version_from_distro_select = stack_select.get_stack_version_before_install(component_name)
-      
+
     # If <stack-selector-tool> has not yet been done (situations like first install),
     # we can use <stack-selector-tool> version itself.
     # Wildcards cause a lot of troubles with installing packages, if the version contains wildcards we should try to specify it.
@@ -403,7 +462,7 @@
               stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME))
 
     return Script.stack_version_from_distro_select
-  
+
   def format_package_name(self, name):
     from resource_management.libraries.functions.default import default
     """
@@ -440,7 +499,7 @@
       stack_version_package_formatted = self.get_stack_version_before_packages_installed().replace('.', package_delimiter).replace('-', package_delimiter) if STACK_VERSION_PLACEHOLDER in name else name
 
     package_name = name.replace(STACK_VERSION_PLACEHOLDER, stack_version_package_formatted)
-    
+
     return package_name
 
   @staticmethod
@@ -465,10 +524,34 @@
     return Script.tmp_dir
 
   @staticmethod
-  def get_force_https_protocol():
+  def get_force_https_protocol_name():
+    """
+    Get forced https protocol name.
+
+    :return: protocol name, PROTOCOL_TLSv1 by default
+    """
     return Script.force_https_protocol
 
   @staticmethod
+  def get_force_https_protocol_value():
+    """
+    Get forced https protocol value that correspondents to ssl module variable.
+
+    :return: protocol value
+    """
+    import ssl
+    return getattr(ssl, Script.get_force_https_protocol_name())
+
+  @staticmethod
+  def get_ca_cert_file_path():
+    """
+    Get path to file with trusted certificates.
+
+    :return: trusted certificates file path
+    """
+    return Script.ca_cert_file_path
+
+  @staticmethod
   def get_component_from_role(role_directory_map, default_role):
     """
     Gets the <stack-root>/current/<component> component given an Ambari role,
@@ -639,13 +722,13 @@
                           hadoop_user, self.get_password(hadoop_user),
                           str(config['hostLevelParams']['stack_version']))
       reload_windows_env()
-      
+
   def check_package_condition(self, package):
     condition = package['condition']
-    
+
     if not condition:
       return True
-    
+
     return self.should_install_package(package)
 
   def should_install_package(self, package):
@@ -782,7 +865,7 @@
 
       # To remain backward compatible with older stacks, only pass upgrade_type if available.
       # TODO, remove checking the argspec for "upgrade_type" once all of the services support that optional param.
-      self.pre_start()
+      self.pre_start(env)
       if "upgrade_type" in inspect.getargspec(self.start).args:
         self.start(env, upgrade_type=upgrade_type)
       else:
@@ -790,7 +873,7 @@
           self.start(env, rolling_restart=(upgrade_type == UPGRADE_TYPE_ROLLING))
         else:
           self.start(env)
-      self.post_start()
+      self.post_start(env)
 
       if is_stack_upgrade:
         # Remain backward compatible with the rest of the services that haven't switched to using
@@ -819,22 +902,6 @@
     """
     self.fail_with_error('configure method isn\'t implemented')
 
-  def security_status(self, env):
-    """
-    To be overridden by subclasses to provide the current security state of the component.
-    Implementations are required to set the "securityState" property of the structured out data set
-    to one of the following values:
-
-      UNSECURED        - If the component is not configured for any security protocol such as
-                         Kerberos
-      SECURED_KERBEROS - If the component is configured for Kerberos
-      UNKNOWN          - If the security state cannot be determined
-      ERROR            - If the component is supposed to be secured, but there are issues with the
-                         configuration.  For example, if the component is configured for Kerberos
-                         but the configured principal and keytab file fail to kinit
-    """
-    self.put_structured_out({"securityState": "UNKNOWN"})
-
   def generate_configs_get_template_file_content(self, filename, dicts):
     config = self.get_config()
     content = ''
@@ -852,7 +919,7 @@
     config = self.get_config()
     return {'configurations':config['configurations'][dict],
             'configuration_attributes':config['configuration_attributes'][dict]}
-    
+
   def generate_configs_get_xml_file_dict(self, filename, dict):
     config = self.get_config()
     return config['configurations'][dict]
@@ -864,7 +931,7 @@
     """
     import params
     env.set_params(params)
-    
+
     config = self.get_config()
 
     xml_configs_list = config['commandParams']['xml_configs_list']
@@ -882,19 +949,19 @@
         for filename, dict in file_dict.iteritems():
           XmlConfig(filename,
                     conf_dir=conf_tmp_dir,
-                    mode=0600,
+                    mode=0644,
                     **self.generate_configs_get_xml_file_content(filename, dict)
           )
       for file_dict in env_configs_list:
         for filename,dicts in file_dict.iteritems():
           File(os.path.join(conf_tmp_dir, filename),
-               mode=0600,
+               mode=0644,
                content=InlineTemplate(self.generate_configs_get_template_file_content(filename, dicts)))
 
       for file_dict in properties_configs_list:
         for filename, dict in file_dict.iteritems():
           PropertiesFile(os.path.join(conf_tmp_dir, filename),
-                         mode=0600,
+                         mode=0644,
                          properties=self.generate_configs_get_xml_file_dict(filename, dict)
           )
       with closing(tarfile.open(output_filename, "w:gz")) as tar:
diff --git a/ambari-funtest/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/mysql_service.py b/ambari-funtest/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/mysql_service.py
index 4716343..cf1d30e 100644
--- a/ambari-funtest/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/mysql_service.py
+++ b/ambari-funtest/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/mysql_service.py
@@ -31,6 +31,11 @@
   elif action == 'status':
     cmd = format('service {daemon_name} status')
     logoutput = False
+    try:
+      Execute(cmd)
+      return
+    except:
+      raise ComponentIsNotRunning()
   else:
     cmd = None
 
diff --git a/ambari-infra/.gitignore b/ambari-infra/.gitignore
new file mode 100644
index 0000000..a7d91c4
--- /dev/null
+++ b/ambari-infra/.gitignore
@@ -0,0 +1,6 @@
+target
+.settings
+.classpath
+.project
+/bin/
+job-repository.db
\ No newline at end of file
diff --git a/ambari-infra/ambari-infra-assembly/pom.xml b/ambari-infra/ambari-infra-assembly/pom.xml
index 51e5804..fafef7e 100644
--- a/ambari-infra/ambari-infra-assembly/pom.xml
+++ b/ambari-infra/ambari-infra-assembly/pom.xml
@@ -38,6 +38,10 @@
     <solr.client.mapping.path>${mapping.base.path}/${solr.client.package.name}</solr.client.mapping.path>
     <solr.client.dir>${project.basedir}/../ambari-infra-solr-client</solr.client.dir>
     <infra.solr.plugin.dir>${project.basedir}/../ambari-infra-solr-plugin</infra.solr.plugin.dir>
+    <infra-manager.package.name>ambari-infra-manager</infra-manager.package.name>
+    <infra-manager.dir>${project.basedir}/../ambari-infra-manager</infra-manager.dir>
+    <infra-manager.mapping.path>${mapping.base.path}/${infra-manager.package.name}</infra-manager.mapping.path>
+    <infra-manager.conf.mapping.path>/etc/${infra-manager.package.name}/conf</infra-manager.conf.mapping.path>
   </properties>
 
   <profiles>
@@ -118,6 +122,45 @@
                   </mappings>
                 </configuration>
               </execution>
+              <execution>
+                <id>infra-manager</id>
+                <phase>package</phase>
+                <goals>
+                  <goal>rpm</goal>
+                </goals>
+                <configuration>
+                  <group>Development</group>
+                  <name>${infra-manager.package.name}</name>
+                  <mappings>
+                    <mapping>
+                      <directory>${infra-manager.mapping.path}</directory>
+                      <sources>
+                        <source>
+                          <location>${infra-manager.dir}/target/package</location>
+                          <excludes>
+                            <exclude>log4j.xml</exclude>
+                            <exclude>infra-manager.properties</exclude>
+                            <exclude>infra-manager-env.sh</exclude>
+                          </excludes>
+                        </source>
+                      </sources>
+                    </mapping>
+                    <mapping>
+                      <directory>${infra-manager.conf.mapping.path}</directory>
+                      <sources>
+                        <source>
+                          <location>${infra-manager.dir}/target/package</location>
+                          <includes>
+                            <include>log4j.xml</include>
+                            <include>infra-manager.properties</include>
+                            <include>infra-manager-env.sh</include>
+                          </includes>
+                        </source>
+                      </sources>
+                    </mapping>
+                  </mappings>
+                </configuration>
+              </execution>
             </executions>
           </plugin>
           <plugin>
@@ -277,6 +320,49 @@
                   </dataSet>
                 </configuration>
               </execution>
+
+              <execution>
+                <phase>package</phase>
+                <id>jdeb-infra-manager</id>
+                <goals>
+                  <goal>jdeb</goal>
+                </goals>
+                <configuration>
+                  <controlDir>${basedir}/src/main/package/deb/manager</controlDir>
+                  <deb>${basedir}/target/${infra-manager.package.name}_${package-version}-${package-release}.deb</deb>
+                  <skip>false</skip>
+                  <skipPOMs>false</skipPOMs>
+                  <dataSet>
+                    <data>
+                      <src>${infra-manager.dir}/target/ambari-infra-manager.tar.gz</src>
+                      <type>archive</type>
+                      <mapper>
+                        <type>perm</type>
+                        <user>root</user>
+                        <group>root</group>
+                        <prefix>${infra-manager.mapping.path}</prefix>
+                      </mapper>
+                      <excludes>
+                        log4j.xml,infra-manager.properties,infra-manager-env.sh
+                      </excludes>
+                    </data>
+                    <data>
+                      <src>${infra-manager.dir}/target/package</src>
+                      <type>directory</type>
+                      <mapper>
+                        <prefix>${infra-manager.conf.mapping.path}</prefix>
+                        <type>perm</type>
+                        <user>root</user>
+                        <group>root</group>
+                        <filemode>644</filemode>
+                      </mapper>
+                      <includes>
+                        log4j.xml,infra-manager.properties,infra-manager-env.sh
+                      </includes>
+                    </data>
+                  </dataSet>
+                </configuration>
+              </execution>
             </executions>
           </plugin>
           <plugin>
@@ -330,6 +416,11 @@
       <artifactId>ambari-infra-solr-plugin</artifactId>
       <version>${project.version}</version>
     </dependency>
+    <dependency>
+      <groupId>org.apache.ambari</groupId>
+      <artifactId>ambari-infra-manager</artifactId>
+      <version>${project.version}</version>
+    </dependency>
   </dependencies>
 
 
diff --git a/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/control b/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/control
new file mode 100644
index 0000000..03663a0
--- /dev/null
+++ b/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/control
@@ -0,0 +1,22 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+Package: [[infra-manager.package.name]]
+Version: [[package-version]]-[[package-release]]
+Section: [[deb.section]]
+Priority: [[deb.priority]]
+Depends: [[deb.dependency.list]]
+Architecture: [[deb.architecture]]
+Description: [[description]]
+Maintainer: [[deb.publisher]]
\ No newline at end of file
diff --git a/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/postinst b/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/postinst
new file mode 100644
index 0000000..21a01fa
--- /dev/null
+++ b/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/postinst
@@ -0,0 +1,15 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
diff --git a/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/postrm b/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/postrm
new file mode 100644
index 0000000..21a01fa
--- /dev/null
+++ b/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/postrm
@@ -0,0 +1,15 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
diff --git a/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/preinst b/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/preinst
new file mode 100644
index 0000000..21a01fa
--- /dev/null
+++ b/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/preinst
@@ -0,0 +1,15 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
diff --git a/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/prerm b/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/prerm
new file mode 100644
index 0000000..21a01fa
--- /dev/null
+++ b/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/prerm
@@ -0,0 +1,15 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
diff --git a/ambari-infra/ambari-infra-manager/README.md b/ambari-infra/ambari-infra-manager/README.md
new file mode 100644
index 0000000..d3527c4
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/README.md
@@ -0,0 +1,31 @@
+<!--
+{% comment %}
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to you under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+{% endcomment %}
+-->
+
+# Ambari Infra Manager
+TODO
+## Build & Run Application
+```bash
+mvn clean package exec:java
+```
+
+## Build & Run Application in docker container
+```bash
+cd docker
+./infra-manager-docker.sh
+```
\ No newline at end of file
diff --git a/ambari-infra/ambari-infra-manager/build.xml b/ambari-infra/ambari-infra-manager/build.xml
new file mode 100644
index 0000000..3d0f4da
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/build.xml
@@ -0,0 +1,54 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<project basedir="." default="build" name="infra-manager">
+  <property environment="env"/>
+  <property name="debuglevel" value="source,lines,vars"/>
+  <dirname property="builddir" file="build.xml"/>
+  <property name="target" value="1.7"/>
+  <property name="source" value="1.7"/>
+  <target name="init">
+  </target>
+  <target name="build"/>
+
+  <target name="package">
+    <delete dir="target/package"/>
+    <copy todir="target/package/libs" includeEmptyDirs="no">
+      <fileset dir="target/libs"/>
+    </copy>
+    <copy todir="target/package/libs" includeEmptyDirs="no">
+      <fileset file="target/*.jar"/>
+    </copy>
+    <copy todir="target/package" includeEmptyDirs="no">
+      <fileset file="src/main/resources/infraManager.sh"/>
+      <fileset file="src/main/resources/infra-manager-env.sh"/>
+      <fileset file="target/classes/infra-manager.properties"/>
+      <fileset file="target/classes/log4j.xml"/>
+    </copy>
+    <chmod file="target/package/*.sh" perm="755"/>
+    <tar compression="gzip" destfile="target/ambari-infra-manager.tar.gz">
+      <tarfileset mode="755" dir="target/package">
+        <include name="*.sh"/>
+      </tarfileset>
+      <tarfileset mode="664" dir="target/package">
+        <exclude name="*.sh"/>
+      </tarfileset>
+    </tar>
+
+  </target>
+
+</project>
\ No newline at end of file
diff --git a/ambari-infra/ambari-infra-manager/docker/Dockerfile b/ambari-infra/ambari-infra-manager/docker/Dockerfile
new file mode 100644
index 0000000..adb584a
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/docker/Dockerfile
@@ -0,0 +1,52 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+FROM centos:centos6
+
+RUN echo root:changeme | chpasswd
+
+RUN yum clean all -y && yum update -y
+RUN yum -y install vim wget rpm-build sudo which telnet tar openssh-server openssh-clients ntp git httpd lsof
+RUN rpm -e --nodeps --justdb glibc-common
+RUN yum -y install glibc-common
+
+ENV HOME /root
+
+#Install JAVA
+ENV JAVA_VERSION 8u31
+ENV BUILD_VERSION b13
+RUN wget --no-cookies --no-check-certificate --header "Cookie: oraclelicense=accept-securebackup-cookie" "http://download.oracle.com/otn-pub/java/jdk/$JAVA_VERSION-$BUILD_VERSION/jdk-$JAVA_VERSION-linux-x64.rpm" -O jdk-8-linux-x64.rpm
+RUN rpm -ivh jdk-8-linux-x64.rpm
+ENV JAVA_HOME /usr/java/default/
+
+#Install Maven
+RUN mkdir -p /opt/maven
+WORKDIR /opt/maven
+RUN wget http://archive.apache.org/dist/maven/maven-3/3.3.1/binaries/apache-maven-3.3.1-bin.tar.gz
+RUN tar -xvzf /opt/maven/apache-maven-3.3.1-bin.tar.gz
+RUN rm -rf /opt/maven/apache-maven-3.3.1-bin.tar.gz
+
+ENV M2_HOME /opt/maven/apache-maven-3.3.1
+ENV MAVEN_OPTS -Xmx2048m
+ENV PATH $PATH:$JAVA_HOME/bin:$M2_HOME/bin
+
+# SSH key
+RUN ssh-keygen -f /root/.ssh/id_rsa -t rsa -N ''
+RUN cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys
+RUN chmod 600 /root/.ssh/authorized_keys
+RUN sed -ri 's/UsePAM yes/UsePAM no/g' /etc/ssh/sshd_config
+
+ADD bin/start.sh /root/start.sh
+RUN chmod +x /root/start.sh
+
+WORKDIR /root
+CMD /root/start.sh
\ No newline at end of file
diff --git a/ambari-infra/ambari-infra-manager/docker/bin/start.sh b/ambari-infra/ambari-infra-manager/docker/bin/start.sh
new file mode 100755
index 0000000..076c06f
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/docker/bin/start.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+export INFRA_MANAGER_OPTS="-Xdebug -Xrunjdwp:transport=dt_socket,address=5007,server=y,suspend=n"
+touch /root/infra-manager.log
+/root/ambari-infra-manager/infraManager.sh --port 61890 > /root/infra-manager.log
+tail -f /root/infra-manager.log
+
diff --git a/ambari-infra/ambari-infra-manager/docker/infra-manager-docker.sh b/ambari-infra/ambari-infra-manager/docker/infra-manager-docker.sh
new file mode 100755
index 0000000..87d6b8a
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/docker/infra-manager-docker.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+sdir="`dirname \"$0\"`"
+: ${1:?"argument is missing: (start|stop|build-and-run|build|build-docker-and-run|build-mvn-and-run|build-docker-only|build-mvn-only)"}
+command="$1"
+
+function build_infra_manager_container() {
+  pushd $sdir
+  docker build -t ambari-infra-manager:v1.0 .
+  popd
+}
+
+function build_infra_manager_project() {
+  pushd $sdir/../
+  mvn clean package -DskipTests
+  popd
+}
+
+function kill_infra_manager_container() {
+  echo "Try to remove infra manager container if exists ..."
+  docker rm -f infra-manager
+}
+
+function start_infra_manager_container() {
+ echo "Start infra manager container ..."
+ pushd $sdir/../
+ local AMBARI_INFRA_MANAGER_LOCATION=$(pwd)
+ popd
+ kill_infra_manager_container
+ docker run -d --name infra-manager --hostname infra-manager.apache.org \
+   -v $AMBARI_INFRA_MANAGER_LOCATION/target/package:/root/ambari-infra-manager -p 61890:61890 -p 5007:5007 \
+   ambari-infra-manager:v1.0
+  ip_address=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' logsearch)
+  echo "Ambari Infra Manager container started on $ip_address (for Mac OSX route to boot2docker/docker-machine VM address, e.g.: 'sudo route add -net 172.17.0.0/16 192.168.59.103')"
+  echo "You can follow Log Search logs with 'docker logs -f infra-manager' command"
+}
+
+case $command in
+  "build-and-run")
+     build_infra_manager_project
+     build_infra_manager_container
+     start_infra_manager_container
+     ;;
+  "build")
+     build_infra_manager_project
+     start_infra_manager_container
+     ;;
+  "build-docker-and-run")
+     build_infra_manager_container
+     start_infra_manager_container
+     ;;
+  "build-mvn-and-run")
+     build_infra_manager_project
+     build_infra_manager_container
+     ;;
+  "build-docker-only")
+     build_infra_manager_container
+     ;;
+  "build-mvn-only")
+     build_infra_manager_project
+     ;;
+  "start")
+     start_infra_manager_container
+     ;;
+  "stop")
+     kill_infra_manager_container
+     ;;
+   *)
+   echo "Available commands: (start|stop|build-and-run|build|build-docker-and-run|build-mvn-and-run|build-docker-only|build-mvn-only)"
+   ;;
+esac
\ No newline at end of file
diff --git a/ambari-infra/ambari-infra-manager/pom.xml b/ambari-infra/ambari-infra-manager/pom.xml
new file mode 100644
index 0000000..b7708c2
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/pom.xml
@@ -0,0 +1,431 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <parent>
+    <artifactId>ambari-infra</artifactId>
+    <groupId>org.apache.ambari</groupId>
+    <version>2.0.0.0-SNAPSHOT</version>
+  </parent>
+  <name>Ambari Infra Manager</name>
+  <url>http://maven.apache.org</url>
+  <modelVersion>4.0.0</modelVersion>
+
+  <artifactId>ambari-infra-manager</artifactId>
+
+  <properties>
+    <spring.version>4.2.5.RELEASE</spring.version>
+    <spring.security.version>4.0.4.RELEASE</spring.security.version>
+    <jersey.version>2.23.2</jersey.version>
+    <jetty-version>9.2.11.v20150529</jetty-version>
+    <swagger.version>1.5.8</swagger.version>
+    <spring-data-solr.version>2.0.2.RELEASE</spring-data-solr.version>
+    <jjwt.version>0.6.0</jjwt.version>
+    <spring-batch.version>3.0.7.RELEASE</spring-batch.version>
+    <jdk.version>1.7</jdk.version>
+    <sqlite.version>3.8.11.2</sqlite.version>
+  </properties>
+
+  <build>
+    <finalName>ambari-infra-manager_${project.version}</finalName>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <version>3.0</version>
+        <configuration>
+          <source>${jdk.version}</source>
+          <target>${jdk.version}</target>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>exec-maven-plugin</artifactId>
+        <version>1.2.1</version>
+        <executions>
+          <execution>
+            <goals>
+              <goal>java</goal>
+            </goals>
+          </execution>
+        </executions>
+        <configuration>
+          <mainClass>org.apache.ambari.infra.InfraManager</mainClass>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-dependency-plugin</artifactId>
+        <version>2.8</version>
+        <executions>
+          <execution>
+            <id>copy-dependencies</id>
+            <phase>package</phase>
+            <goals>
+              <goal>copy-dependencies</goal>
+            </goals>
+            <configuration>
+              <outputAbsoluteArtifactFilename>true</outputAbsoluteArtifactFilename>
+              <outputDirectory>${basedir}/target/libs</outputDirectory>
+              <overWriteReleases>false</overWriteReleases>
+              <overWriteSnapshots>false</overWriteSnapshots>
+              <overWriteIfNewer>true</overWriteIfNewer>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-antrun-plugin</artifactId>
+        <version>1.7</version>
+        <executions>
+          <execution>
+            <phase>package</phase>
+            <configuration>
+              <target>
+                <ant antfile="build.xml">
+                  <target name="package"/>
+                </ant>
+              </target>
+            </configuration>
+            <goals>
+              <goal>run</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+
+  <dependencies>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.easymock</groupId>
+      <artifactId>easymock</artifactId>
+      <version>3.4</version>
+      <scope>test</scope>
+    </dependency>
+    <!-- Spring dependencies -->
+    <dependency>
+      <groupId>org.springframework</groupId>
+      <artifactId>spring-beans</artifactId>
+      <version>${spring.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.springframework</groupId>
+      <artifactId>spring-context</artifactId>
+      <version>${spring.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.springframework</groupId>
+      <artifactId>spring-test</artifactId>
+      <version>${spring.version}</version>
+    </dependency>
+    <!-- Spring Security -->
+    <dependency>
+      <groupId>org.springframework.security</groupId>
+      <artifactId>spring-security-web</artifactId>
+      <version>${spring.security.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.springframework.security</groupId>
+      <artifactId>spring-security-core</artifactId>
+      <version>${spring.security.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.springframework.security</groupId>
+      <artifactId>spring-security-config</artifactId>
+      <version>${spring.security.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.springframework.security</groupId>
+      <artifactId>spring-security-ldap</artifactId>
+      <version>${spring.security.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.glassfish.jersey.ext</groupId>
+      <artifactId>jersey-spring3</artifactId>
+      <version>2.23.2</version>
+      <exclusions>
+        <exclusion>
+          <groupId>org.springframework</groupId>
+          <artifactId>*</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.glassfish.jersey.connectors</groupId>
+      <artifactId>jersey-apache-connector</artifactId>
+      <version>${jersey.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.glassfish.jersey.core</groupId>
+      <artifactId>jersey-client</artifactId>
+      <version>${jersey.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.glassfish.jersey.media</groupId>
+      <artifactId>jersey-media-json-jettison</artifactId>
+      <version>${jersey.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.glassfish.jersey.media</groupId>
+      <artifactId>jersey-media-json-jackson</artifactId>
+      <version>${jersey.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.glassfish.jersey.core</groupId>
+      <artifactId>jersey-common</artifactId>
+      <version>${jersey.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>javax.servlet</groupId>
+      <artifactId>javax.servlet-api</artifactId>
+      <version>3.1.0</version>
+    </dependency>
+    <dependency>
+      <groupId>log4j</groupId>
+      <artifactId>log4j</artifactId>
+      <version>1.2.17</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.solr</groupId>
+      <artifactId>solr-solrj</artifactId>
+      <version>${solr.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.solr</groupId>
+      <artifactId>solr-core</artifactId>
+      <version>${solr.version}</version>
+      <exclusions>
+        <exclusion>
+          <groupId>*</groupId>
+          <artifactId>*</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.lucene</groupId>
+      <artifactId>lucene-core</artifactId>
+      <version>${solr.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.lucene</groupId>
+      <artifactId>lucene-analyzers-common</artifactId>
+      <version>${solr.version}</version>
+    </dependency>
+    <!-- Hadoop -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <version>2.7.0</version>
+      <exclusions>
+        <exclusion>
+          <groupId>javax.servlet</groupId>
+          <artifactId>servlet-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty-util</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jetty-util</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-json</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-server</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>commons-io</groupId>
+      <artifactId>commons-io</artifactId>
+      <version>2.4</version>
+    </dependency>
+    <dependency>
+      <groupId>commons-cli</groupId>
+      <artifactId>commons-cli</artifactId>
+      <version>1.3.1</version>
+    </dependency>
+    <dependency>
+      <groupId>commons-codec</groupId>
+      <artifactId>commons-codec</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-lang</groupId>
+      <artifactId>commons-lang</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.springframework.security.kerberos</groupId>
+      <artifactId>spring-security-kerberos-core</artifactId>
+      <version>1.0.1.RELEASE</version>
+    </dependency>
+    <dependency>
+      <groupId>org.springframework.security.kerberos</groupId>
+      <artifactId>spring-security-kerberos-web</artifactId>
+      <version>1.0.1.RELEASE</version>
+    </dependency>
+    <dependency>
+      <groupId>org.springframework.security.kerberos</groupId>
+      <artifactId>spring-security-kerberos-client</artifactId>
+      <version>1.0.1.RELEASE</version>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.jetty</groupId>
+      <artifactId>jetty-security</artifactId>
+      <version>${jetty-version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.jetty</groupId>
+      <artifactId>jetty-server</artifactId>
+      <version>${jetty-version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.jetty</groupId>
+      <artifactId>jetty-servlet</artifactId>
+      <version>${jetty-version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.jetty</groupId>
+      <artifactId>jetty-servlets</artifactId>
+      <version>${jetty-version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.jetty</groupId>
+      <artifactId>jetty-util</artifactId>
+      <version>${jetty-version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.jetty</groupId>
+      <artifactId>jetty-webapp</artifactId>
+      <version>${jetty-version}</version>
+      <exclusions>
+        <exclusion>
+          <groupId>org.springframework</groupId>
+          <artifactId>*</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.jetty</groupId>
+      <artifactId>jetty-annotations</artifactId>
+      <version>${jetty-version}</version>
+      <exclusions>
+        <exclusion>
+          <groupId>org.springframework</groupId>
+          <artifactId>*</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>cglib</groupId>
+      <artifactId>cglib</artifactId>
+      <version>3.2.4</version>
+    </dependency>
+    <dependency>
+      <groupId>io.swagger</groupId>
+      <artifactId>swagger-annotations</artifactId>
+      <version>${swagger.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>io.swagger</groupId>
+      <artifactId>swagger-core</artifactId>
+      <version>${swagger.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>io.swagger</groupId>
+      <artifactId>swagger-jersey2-jaxrs</artifactId>
+      <version>${swagger.version}</version>
+      <exclusions>
+        <exclusion>
+          <groupId>javax.ws.rs</groupId>
+          <artifactId>jsr311-api</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>io.swagger</groupId>
+      <artifactId>swagger-models</artifactId>
+      <version>${swagger.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.webjars</groupId>
+      <artifactId>swagger-ui</artifactId>
+      <version>2.1.0</version>
+    </dependency>
+    <dependency>
+      <groupId>org.springframework.data</groupId>
+      <artifactId>spring-data-solr</artifactId>
+      <version>${spring-data-solr.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.springframework</groupId>
+      <artifactId>spring-context-support</artifactId>
+      <version>${spring.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.springframework.batch</groupId>
+      <artifactId>spring-batch-core</artifactId>
+      <version>${spring-batch.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.springframework</groupId>
+      <artifactId>spring-jdbc</artifactId>
+      <version>${spring.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>io.jsonwebtoken</groupId>
+      <artifactId>jjwt</artifactId>
+      <version>${jjwt.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.glassfish.jersey.ext</groupId>
+      <artifactId>jersey-bean-validation</artifactId>
+      <version>2.25</version>
+    </dependency>
+    <dependency>
+      <groupId>org.xerial</groupId>
+      <artifactId>sqlite-jdbc</artifactId>
+      <version>${sqlite.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.springframework.batch</groupId>
+      <artifactId>spring-batch-admin-manager</artifactId>
+      <version>1.3.1.RELEASE</version>
+    </dependency>
+  </dependencies>
+
+</project>
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/InfraManager.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/InfraManager.java
new file mode 100644
index 0000000..227bab4
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/InfraManager.java
@@ -0,0 +1,186 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra;
+
+import org.apache.ambari.infra.conf.InfraManagerConfig;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.DefaultParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+import org.eclipse.jetty.server.Connector;
+import org.eclipse.jetty.server.HttpConfiguration;
+import org.eclipse.jetty.server.HttpConnectionFactory;
+import org.eclipse.jetty.server.Server;
+import org.eclipse.jetty.server.ServerConnector;
+import org.eclipse.jetty.server.handler.HandlerList;
+import org.eclipse.jetty.server.handler.ResourceHandler;
+import org.eclipse.jetty.servlet.ServletContextHandler;
+import org.eclipse.jetty.servlet.ServletHolder;
+import org.eclipse.jetty.util.resource.Resource;
+import org.eclipse.jetty.util.resource.ResourceCollection;
+import org.eclipse.jetty.webapp.WebAppContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.web.context.ContextLoaderListener;
+import org.springframework.web.context.request.RequestContextListener;
+import org.springframework.web.context.support.AnnotationConfigWebApplicationContext;
+
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+
+import static org.apache.ambari.infra.common.InfraManagerConstants.DEFAULT_PORT;
+import static org.apache.ambari.infra.common.InfraManagerConstants.DEFAULT_PROTOCOL;
+import static org.apache.ambari.infra.common.InfraManagerConstants.INFRA_MANAGER_SESSION_ID;
+import static org.apache.ambari.infra.common.InfraManagerConstants.PROTOCOL_SSL;
+import static org.apache.ambari.infra.common.InfraManagerConstants.ROOT_CONTEXT;
+import static org.apache.ambari.infra.common.InfraManagerConstants.SESSION_TIMEOUT;
+import static org.apache.ambari.infra.common.InfraManagerConstants.WEB_RESOURCE_FOLDER;
+
+public class InfraManager {
+
+  private static final Logger LOG = LoggerFactory.getLogger(InfraManager.class);
+
+  public static void main(String[] args) {
+    Options options = new Options();
+    HelpFormatter helpFormatter = new HelpFormatter();
+    helpFormatter.setDescPadding(10);
+    helpFormatter.setWidth(200);
+
+    final Option helpOption = Option.builder("h")
+      .longOpt("help")
+      .desc("Print commands")
+      .build();
+
+    final Option portOption = Option.builder("p")
+      .longOpt("port")
+      .desc("Infra Manager port")
+      .numberOfArgs(1)
+      .argName("port_number")
+      .build();
+
+    final Option protocolOption = Option.builder("t")
+      .longOpt("tls-enabled")
+      .desc("TLS enabled for Infra Manager")
+      .build();
+
+    options.addOption(helpOption);
+    options.addOption(portOption);
+    options.addOption(protocolOption);
+
+    try {
+      CommandLineParser cmdLineParser = new DefaultParser();
+      CommandLine cli = cmdLineParser.parse(options, args);
+      int port = cli.hasOption('p') ? Integer.parseInt(cli.getOptionValue('p')) : DEFAULT_PORT;
+      String protocol = cli.hasOption("t") ? PROTOCOL_SSL : DEFAULT_PROTOCOL;
+
+      Server server = buildServer(port, protocol);
+      HandlerList handlers = new HandlerList();
+      handlers.addHandler(createSwaggerContext());
+      handlers.addHandler(createBaseWebappContext());
+
+      server.setHandler(handlers);
+      server.start();
+
+      LOG.debug("============================Server Dump=======================================");
+      LOG.debug(server.dump());
+      LOG.debug("==============================================================================");
+      server.join();
+    } catch (Exception e) {
+      // TODO
+      e.printStackTrace();
+    }
+  }
+
+  private static Server buildServer(int port, String protocol) {
+    Server server = new Server();
+    HttpConfiguration httpConfiguration = new HttpConfiguration();
+    httpConfiguration.setRequestHeaderSize(65535);
+    // TODO: tls
+    ServerConnector connector = new ServerConnector(server, new HttpConnectionFactory(httpConfiguration));
+    connector.setPort(port);
+    server.setConnectors(new Connector[]{connector});
+    URI infraManagerURI = URI.create(String.format("%s://0.0.0.0:%s", protocol, String.valueOf(port)));
+    LOG.info("Starting infra manager URI=" + infraManagerURI);
+    return server;
+  }
+
+  private static WebAppContext createBaseWebappContext() throws MalformedURLException {
+    URI webResourceBase = findWebResourceBase();
+    WebAppContext context = new WebAppContext();
+    context.setBaseResource(Resource.newResource(webResourceBase));
+    context.setContextPath(ROOT_CONTEXT);
+    context.setParentLoaderPriority(true);
+
+    // Configure Spring
+    context.addEventListener(new ContextLoaderListener());
+    context.addEventListener(new RequestContextListener());
+    // TODO: security, add: context.addFilter(new FilterHolder(new DelegatingFilterProxy("springSecurityFilterChain")), "/*", EnumSet.allOf(DispatcherType.class));
+    context.setInitParameter("contextClass", AnnotationConfigWebApplicationContext.class.getName());
+    context.setInitParameter("contextConfigLocation", InfraManagerConfig.class.getName());
+
+    // Configure Jersey
+    ServletHolder jerseyServlet = context.addServlet(org.glassfish.jersey.servlet.ServletContainer.class, "/api/v1/*");
+    jerseyServlet.setInitOrder(1);
+    jerseyServlet.setInitParameter("jersey.config.server.provider.packages","org.apache.ambari.infra.rest,io.swagger.jaxrs.listing");
+
+    context.getSessionHandler().getSessionManager().setMaxInactiveInterval(SESSION_TIMEOUT);
+    context.getSessionHandler().getSessionManager().getSessionCookieConfig().setName(INFRA_MANAGER_SESSION_ID);
+
+    return context;
+  }
+
+  private static URI findWebResourceBase() {
+    URL fileCompleteUrl = Thread.currentThread().getContextClassLoader().getResource(WEB_RESOURCE_FOLDER);
+    String errorMessage = "Web Resource Folder " + WEB_RESOURCE_FOLDER + " not found in classpath";
+    if (fileCompleteUrl != null) {
+      try {
+        return fileCompleteUrl.toURI().normalize();
+      } catch (URISyntaxException e) {
+        LOG.error(errorMessage, e);
+        System.exit(1);
+      }
+    } else {
+      LOG.error(errorMessage);
+      System.exit(1);
+    }
+    throw new IllegalStateException(errorMessage);
+  }
+
+  private static ServletContextHandler createSwaggerContext() throws URISyntaxException {
+    ResourceHandler resourceHandler = new ResourceHandler();
+    ResourceCollection resources = new ResourceCollection(new String[] {
+      InfraManager.class.getClassLoader()
+        .getResource("META-INF/resources/webjars/swagger-ui/2.1.0")
+        .toURI().toString(),
+      InfraManager.class.getClassLoader()
+        .getResource("swagger")
+        .toURI().toString()
+    });
+    resourceHandler.setBaseResource(resources);
+    resourceHandler.setWelcomeFiles(new String[]{"swagger.html"}); // rewrite index.html from swagger-ui webjar
+    ServletContextHandler context = new ServletContextHandler();
+    context.setContextPath("/docs/");
+    context.setHandler(resourceHandler);
+    return context;
+  }
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/common/InfraManagerConstants.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/common/InfraManagerConstants.java
new file mode 100644
index 0000000..11714f3
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/common/InfraManagerConstants.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.common;
+
+public final class InfraManagerConstants {
+  public static final int DEFAULT_PORT = 61890;
+  public static final String DEFAULT_PROTOCOL = "http";
+  public static final String INFRA_MANAGER_SESSION_ID = "INFRA_MANAGER_SESSIONID";
+  public static final String PROTOCOL_SSL = "https";
+  public static final String ROOT_CONTEXT = "/";
+  public static final String WEB_RESOURCE_FOLDER = "webapp";
+  public static final Integer SESSION_TIMEOUT = 60 * 30;
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/InfraManagerApiDocConfig.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/InfraManagerApiDocConfig.java
new file mode 100644
index 0000000..22e2263
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/InfraManagerApiDocConfig.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.conf;
+
+import io.swagger.jaxrs.config.BeanConfig;
+import io.swagger.jaxrs.listing.ApiListingResource;
+import io.swagger.jaxrs.listing.SwaggerSerializers;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+
+@Configuration
+public class InfraManagerApiDocConfig {
+
+  @Bean
+  public ApiListingResource apiListingResource() {
+    return new ApiListingResource();
+  }
+
+  @Bean
+  public SwaggerSerializers swaggerSerializers() {
+    return new SwaggerSerializers();
+  }
+
+  @Bean
+  public BeanConfig swaggerConfig() {
+    BeanConfig beanConfig = new BeanConfig();
+    beanConfig.setSchemes(new String[]{"http", "https"});
+    beanConfig.setBasePath("/api/v1");
+    beanConfig.setTitle("Infra Manager REST API");
+    beanConfig.setDescription("Manager component for Ambari Infra");
+    beanConfig.setLicense("Apache 2.0");
+    beanConfig.setLicenseUrl("http://www.apache.org/licenses/LICENSE-2.0.html");
+    beanConfig.setScan(true);
+    beanConfig.setVersion("1.0.0");
+    beanConfig.setResourcePackage("org.apache.ambari.infra.rest");
+    return beanConfig;
+  }
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/InfraManagerConfig.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/InfraManagerConfig.java
new file mode 100644
index 0000000..86059a2
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/InfraManagerConfig.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.conf;
+
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.ComponentScan;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.context.annotation.PropertySource;
+import org.springframework.context.support.PropertySourcesPlaceholderConfigurer;
+
+@Configuration
+@ComponentScan("org.apache.ambari.infra")
+@PropertySource(value = {"classpath:infra-manager.properties"})
+public class InfraManagerConfig {
+
+  @Bean
+  public static PropertySourcesPlaceholderConfigurer propertyConfigurer() {
+    return new PropertySourcesPlaceholderConfigurer();
+  }
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/batch/InfraManagerBatchConfig.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/batch/InfraManagerBatchConfig.java
new file mode 100644
index 0000000..c3d8db6
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/batch/InfraManagerBatchConfig.java
@@ -0,0 +1,282 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.conf.batch;
+
+import org.apache.ambari.infra.job.dummy.DummyItemProcessor;
+import org.apache.ambari.infra.job.dummy.DummyItemWriter;
+import org.apache.ambari.infra.job.dummy.DummyObject;
+import org.springframework.batch.admin.service.JdbcSearchableJobExecutionDao;
+import org.springframework.batch.admin.service.JdbcSearchableJobInstanceDao;
+import org.springframework.batch.admin.service.JdbcSearchableStepExecutionDao;
+import org.springframework.batch.admin.service.JobService;
+import org.springframework.batch.admin.service.SearchableJobExecutionDao;
+import org.springframework.batch.admin.service.SearchableJobInstanceDao;
+import org.springframework.batch.admin.service.SearchableStepExecutionDao;
+import org.springframework.batch.admin.service.SimpleJobService;
+import org.springframework.batch.core.Job;
+import org.springframework.batch.core.Step;
+import org.springframework.batch.core.configuration.JobRegistry;
+import org.springframework.batch.core.configuration.annotation.EnableBatchProcessing;
+import org.springframework.batch.core.configuration.annotation.JobBuilderFactory;
+import org.springframework.batch.core.configuration.annotation.StepBuilderFactory;
+import org.springframework.batch.core.configuration.support.JobRegistryBeanPostProcessor;
+import org.springframework.batch.core.explore.JobExplorer;
+import org.springframework.batch.core.launch.JobLauncher;
+import org.springframework.batch.core.launch.JobOperator;
+import org.springframework.batch.core.launch.support.SimpleJobLauncher;
+import org.springframework.batch.core.launch.support.SimpleJobOperator;
+import org.springframework.batch.core.repository.JobRepository;
+import org.springframework.batch.core.repository.dao.DefaultExecutionContextSerializer;
+import org.springframework.batch.core.repository.dao.ExecutionContextDao;
+import org.springframework.batch.core.repository.dao.JdbcExecutionContextDao;
+import org.springframework.batch.core.repository.support.JobRepositoryFactoryBean;
+import org.springframework.batch.item.ItemProcessor;
+import org.springframework.batch.item.ItemReader;
+import org.springframework.batch.item.ItemWriter;
+import org.springframework.batch.item.file.FlatFileItemReader;
+import org.springframework.batch.item.file.LineMapper;
+import org.springframework.batch.item.file.mapping.BeanWrapperFieldSetMapper;
+import org.springframework.batch.item.file.mapping.DefaultLineMapper;
+import org.springframework.batch.item.file.mapping.FieldSetMapper;
+import org.springframework.batch.item.file.transform.DelimitedLineTokenizer;
+import org.springframework.batch.item.file.transform.LineTokenizer;
+import org.springframework.batch.support.transaction.ResourcelessTransactionManager;
+import org.springframework.beans.factory.annotation.Qualifier;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.core.io.ClassPathResource;
+import org.springframework.core.io.Resource;
+import org.springframework.core.task.SimpleAsyncTaskExecutor;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.jdbc.datasource.DriverManagerDataSource;
+import org.springframework.jdbc.datasource.init.DataSourceInitializer;
+import org.springframework.jdbc.datasource.init.ResourceDatabasePopulator;
+import org.springframework.scheduling.annotation.EnableScheduling;
+import org.springframework.transaction.PlatformTransactionManager;
+
+import javax.inject.Inject;
+import javax.sql.DataSource;
+import java.net.MalformedURLException;
+
+@Configuration
+@EnableBatchProcessing
+@EnableScheduling
+public class InfraManagerBatchConfig {
+
+  @Value("classpath:org/springframework/batch/core/schema-drop-sqlite.sql")
+  private Resource dropRepositoryTables;
+
+  @Value("classpath:org/springframework/batch/core/schema-sqlite.sql")
+  private Resource dataRepositorySchema;
+
+  @Value("${infra-manager.batch.db.init:false}")
+  private boolean dropDatabaseOnStartup;
+
+  @Value("${infra-manager.batch.db.file:/etc/ambari-inra-manager/conf/repository.db}")
+  private String sqliteDbFileLocation;
+
+  @Value("${infra-manager.batch.db.username}")
+  private String databaseUsername;
+
+  @Value("${infra-manager.batch.db.password}")
+  private String databasePassword;
+
+  @Inject
+  private StepBuilderFactory steps;
+
+  @Inject
+  private JobBuilderFactory jobs;
+
+  @Inject
+  private JobRegistry jobRegistry;
+
+  @Inject
+  private JobExplorer jobExplorer;
+
+  @Bean
+  public DataSource dataSource() {
+    DriverManagerDataSource dataSource = new DriverManagerDataSource();
+    dataSource.setDriverClassName("org.sqlite.JDBC");
+    dataSource.setUrl("jdbc:sqlite:" + sqliteDbFileLocation);
+    dataSource.setUsername(databaseUsername);
+    dataSource.setPassword(databasePassword);
+    return dataSource;
+  }
+
+  @Bean
+  public DataSourceInitializer dataSourceInitializer(DataSource dataSource)
+    throws MalformedURLException {
+    ResourceDatabasePopulator databasePopulator = new ResourceDatabasePopulator();
+    if (dropDatabaseOnStartup) {
+      databasePopulator.addScript(dropRepositoryTables);
+      databasePopulator.setIgnoreFailedDrops(true);
+    }
+    databasePopulator.addScript(dataRepositorySchema);
+    databasePopulator.setContinueOnError(true);
+
+    DataSourceInitializer initializer = new DataSourceInitializer();
+    initializer.setDataSource(dataSource);
+    initializer.setDatabasePopulator(databasePopulator);
+
+    return initializer;
+  }
+
+  @Bean
+  public JobRepository jobRepository() throws Exception {
+    JobRepositoryFactoryBean factory = new JobRepositoryFactoryBean();
+    factory.setDataSource(dataSource());
+    factory.setTransactionManager(getTransactionManager());
+    factory.afterPropertiesSet();
+    return factory.getObject();
+  }
+
+  @Bean
+  public PlatformTransactionManager getTransactionManager() {
+    return new ResourcelessTransactionManager();
+  }
+
+  @Bean(name = "jobLauncher")
+  public JobLauncher jobLauncher() throws Exception {
+    SimpleJobLauncher jobLauncher = new SimpleJobLauncher();
+    jobLauncher.setJobRepository(jobRepository());
+    jobLauncher.setTaskExecutor(new SimpleAsyncTaskExecutor());
+    jobLauncher.afterPropertiesSet();
+    return jobLauncher;
+  }
+
+  @Bean
+  public JobOperator jobOperator() throws Exception {
+    SimpleJobOperator jobOperator = new SimpleJobOperator();
+    jobOperator.setJobExplorer(jobExplorer);
+    jobOperator.setJobLauncher(jobLauncher());
+    jobOperator.setJobRegistry(jobRegistry);
+    jobOperator.setJobRepository(jobRepository());
+    return jobOperator;
+  }
+
+  @Bean
+  public JobRegistryBeanPostProcessor jobRegistryBeanPostProcessor() {
+    JobRegistryBeanPostProcessor jobRegistryBeanPostProcessor = new JobRegistryBeanPostProcessor();
+    jobRegistryBeanPostProcessor.setJobRegistry(jobRegistry);
+    return jobRegistryBeanPostProcessor;
+  }
+
+  @Bean
+  public JdbcTemplate jdbcTemplate() {
+    return new JdbcTemplate(dataSource());
+  }
+
+  @Bean
+  public SearchableJobInstanceDao searchableJobInstanceDao() {
+    JdbcSearchableJobInstanceDao dao = new JdbcSearchableJobInstanceDao();
+    dao.setJdbcTemplate(jdbcTemplate());
+    return dao;
+  }
+
+  @Bean
+  public SearchableJobExecutionDao searchableJobExecutionDao() {
+    JdbcSearchableJobExecutionDao dao = new JdbcSearchableJobExecutionDao();
+    dao.setJdbcTemplate(jdbcTemplate());
+    dao.setDataSource(dataSource());
+    return dao;
+  }
+
+  @Bean
+  public SearchableStepExecutionDao searchableStepExecutionDao() {
+    JdbcSearchableStepExecutionDao dao = new JdbcSearchableStepExecutionDao();
+    dao.setDataSource(dataSource());
+    dao.setJdbcTemplate(jdbcTemplate());
+    return dao;
+  }
+
+  @Bean
+  public ExecutionContextDao executionContextDao() {
+    JdbcExecutionContextDao dao = new JdbcExecutionContextDao();
+    dao.setSerializer(new DefaultExecutionContextSerializer());
+    dao.setJdbcTemplate(jdbcTemplate());
+    return dao;
+  }
+
+  @Bean
+  public JobService jobService() throws Exception {
+    return new
+      SimpleJobService(searchableJobInstanceDao(), searchableJobExecutionDao(), searchableStepExecutionDao(),
+      jobRepository(), jobLauncher(), jobRegistry, executionContextDao());
+  }
+
+  @Bean(name = "dummyStep")
+  protected Step dummyStep(ItemReader<DummyObject> reader,
+                       ItemProcessor<DummyObject, String> processor,
+                       ItemWriter<String> writer) {
+    return steps.get("dummyStep").<DummyObject, String> chunk(2)
+      .reader(reader).processor(processor).writer(writer).build();
+  }
+
+  @Bean(name = "dummyJob")
+  public Job job(@Qualifier("dummyStep") Step dummyStep) {
+    return jobs.get("dummyJob").start(dummyStep).build();
+  }
+
+  @Bean
+  public ItemReader<DummyObject> dummyItemReader() {
+    FlatFileItemReader<DummyObject> csvFileReader = new FlatFileItemReader<>();
+    csvFileReader.setResource(new ClassPathResource("dummy/dummy.txt"));
+    csvFileReader.setLinesToSkip(1);
+    LineMapper<DummyObject> lineMapper = dummyLineMapper();
+    csvFileReader.setLineMapper(lineMapper);
+    return csvFileReader;
+  }
+
+  @Bean
+  public ItemProcessor<DummyObject, String> dummyItemProcessor() {
+    return new DummyItemProcessor();
+  }
+
+  @Bean
+  public ItemWriter<String> dummyItemWriter() {
+    return new DummyItemWriter();
+  }
+
+  private LineMapper<DummyObject> dummyLineMapper() {
+    DefaultLineMapper<DummyObject> lineMapper = new DefaultLineMapper<>();
+
+    LineTokenizer dummyTokenizer = dummyTokenizer();
+    lineMapper.setLineTokenizer(dummyTokenizer);
+
+    FieldSetMapper<DummyObject> dummyFieldSetMapper = dummyFieldSetMapper();
+    lineMapper.setFieldSetMapper(dummyFieldSetMapper);
+
+    return lineMapper;
+  }
+
+  private FieldSetMapper<DummyObject> dummyFieldSetMapper() {
+    BeanWrapperFieldSetMapper<DummyObject> studentInformationMapper = new BeanWrapperFieldSetMapper<>();
+    studentInformationMapper.setTargetType(DummyObject.class);
+    return studentInformationMapper;
+  }
+
+  private LineTokenizer dummyTokenizer() {
+    DelimitedLineTokenizer studentLineTokenizer = new DelimitedLineTokenizer();
+    studentLineTokenizer.setDelimiter(",");
+    studentLineTokenizer.setNames(new String[]{"f1", "f2"});
+    return studentLineTokenizer;
+  }
+
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyItemProcessor.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyItemProcessor.java
new file mode 100644
index 0000000..a124e4d
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyItemProcessor.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.job.dummy;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.batch.item.ItemProcessor;
+
+public class DummyItemProcessor implements ItemProcessor<DummyObject, String> {
+
+  private static final Logger LOG = LoggerFactory.getLogger(DummyItemProcessor.class);
+
+  @Override
+  public String process(DummyObject input) throws Exception {
+    LOG.info("Dummy processing, f1: {}, f2: {}. wait 10 seconds", input.getF1(), input.getF2());
+    Thread.sleep(10000);
+    return String.format("%s, %s", input.getF1(), input.getF2());
+  }
+
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyItemWriter.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyItemWriter.java
new file mode 100644
index 0000000..f495795
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyItemWriter.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.job.dummy;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.batch.item.ItemWriter;
+
+import java.util.List;
+
+public class DummyItemWriter implements ItemWriter<String> {
+
+  private static final Logger LOG = LoggerFactory.getLogger(DummyItemWriter.class);
+
+  @Override
+  public void write(List<? extends String> values) throws Exception {
+    LOG.info("DummyItem writer called (values: {})... wait 1 seconds", values.toString());
+    Thread.sleep(1000);
+  }
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyObject.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyObject.java
new file mode 100644
index 0000000..ce087dd
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/dummy/DummyObject.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.job.dummy;
+
+public class DummyObject {
+  private String f1;
+  private String f2;
+
+  public String getF1() {
+    return f1;
+  }
+
+  public void setF1(String f1) {
+    this.f1 = f1;
+  }
+
+  public String getF2() {
+    return f2;
+  }
+
+  public void setF2(String f2) {
+    this.f2 = f2;
+  }
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/manager/JobManager.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/manager/JobManager.java
new file mode 100644
index 0000000..fc0a4f7
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/manager/JobManager.java
@@ -0,0 +1,274 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.manager;
+
+import com.google.common.collect.Lists;
+import org.apache.ambari.infra.model.ExecutionContextResponse;
+import org.apache.ambari.infra.model.JobDetailsResponse;
+import org.apache.ambari.infra.model.JobExecutionDetailsResponse;
+import org.apache.ambari.infra.model.JobExecutionInfoResponse;
+import org.apache.ambari.infra.model.JobInstanceDetailsResponse;
+import org.apache.ambari.infra.model.JobOperationParams;
+import org.apache.ambari.infra.model.StepExecutionContextResponse;
+import org.apache.ambari.infra.model.StepExecutionInfoResponse;
+import org.apache.ambari.infra.model.StepExecutionProgressResponse;
+import org.springframework.batch.admin.history.StepExecutionHistory;
+import org.springframework.batch.admin.service.JobService;
+import org.springframework.batch.admin.service.NoSuchStepExecutionException;
+import org.springframework.batch.admin.web.JobInfo;
+import org.springframework.batch.admin.web.StepExecutionProgress;
+import org.springframework.batch.core.JobExecution;
+import org.springframework.batch.core.JobInstance;
+import org.springframework.batch.core.JobParametersBuilder;
+import org.springframework.batch.core.JobParametersInvalidException;
+import org.springframework.batch.core.StepExecution;
+import org.springframework.batch.core.launch.JobExecutionNotRunningException;
+import org.springframework.batch.core.launch.JobInstanceAlreadyExistsException;
+import org.springframework.batch.core.launch.JobOperator;
+import org.springframework.batch.core.launch.NoSuchJobException;
+import org.springframework.batch.core.launch.NoSuchJobExecutionException;
+import org.springframework.batch.core.launch.NoSuchJobInstanceException;
+import org.springframework.batch.core.repository.JobExecutionAlreadyRunningException;
+import org.springframework.batch.core.repository.JobInstanceAlreadyCompleteException;
+import org.springframework.batch.core.repository.JobRestartException;
+
+import javax.inject.Inject;
+import javax.inject.Named;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TimeZone;
+
+@Named
+public class JobManager {
+
+  @Inject
+  private JobService jobService;
+
+  @Inject
+  private JobOperator jobOperator;
+
+  private TimeZone timeZone = TimeZone.getDefault();
+
+  public Set<String> getAllJobNames() {
+    return jobOperator.getJobNames();
+  }
+
+  /**
+   * Launch a new job instance (based on job name) and applies customized parameters to it.
+   * Also add a new date parameter to make sure the job instance will be unique
+   */
+  public JobExecutionInfoResponse launchJob(String jobName, String params)
+    throws JobParametersInvalidException, JobInstanceAlreadyExistsException, NoSuchJobException,
+    JobExecutionAlreadyRunningException, JobRestartException, JobInstanceAlreadyCompleteException {
+    // TODO: handle params
+    JobParametersBuilder jobParametersBuilder = new JobParametersBuilder();
+    jobParametersBuilder.addDate("date", new Date());
+    return new JobExecutionInfoResponse(jobService.launch(jobName, jobParametersBuilder.toJobParameters()), timeZone);
+  }
+
+  /**
+   * Get all executions ids that mapped to specific job name,
+   */
+  public Set<Long> getExecutionIdsByJobName(String jobName) throws NoSuchJobException {
+    return jobOperator.getRunningExecutions(jobName);
+  }
+
+  /**
+   * Stop all running job executions and returns with the number of stopped jobs.
+   */
+  public Integer stopAllJobs() {
+    return jobService.stopAll();
+  }
+
+  /**
+   * Gather job execution details by job execution id.
+   */
+  public JobExecutionDetailsResponse getExectionInfo(Long jobExecutionId) throws NoSuchJobExecutionException {
+    JobExecution jobExecution = jobService.getJobExecution(jobExecutionId);
+    List<StepExecutionInfoResponse> stepExecutionInfos = new ArrayList<StepExecutionInfoResponse>();
+    for (StepExecution stepExecution : jobExecution.getStepExecutions()) {
+      stepExecutionInfos.add(new StepExecutionInfoResponse(stepExecution, timeZone));
+    }
+    Collections.sort(stepExecutionInfos, new Comparator<StepExecutionInfoResponse>() {
+      @Override
+      public int compare(StepExecutionInfoResponse o1, StepExecutionInfoResponse o2) {
+        return o1.getId().compareTo(o2.getId());
+      }
+    });
+    return new JobExecutionDetailsResponse(new JobExecutionInfoResponse(jobExecution, timeZone), stepExecutionInfos);
+  }
+
+  /**
+   * Stop or abandon a running job execution by job execution id
+   */
+  public JobExecutionInfoResponse stopOrAbandonJobByExecutionId(Long jobExecutionId, JobOperationParams.JobStopOrAbandonOperationParam operation)
+    throws NoSuchJobExecutionException, JobExecutionNotRunningException, JobExecutionAlreadyRunningException {
+    JobExecution jobExecution;
+    if (JobOperationParams.JobStopOrAbandonOperationParam.STOP.equals(operation)) {
+      jobExecution = jobService.stop(jobExecutionId);
+    } else if (JobOperationParams.JobStopOrAbandonOperationParam.ABANDON.equals(operation)) {
+      jobExecution = jobService.abandon(jobExecutionId);
+    } else {
+      throw new UnsupportedOperationException("Unsupported operaration");
+    }
+    return new JobExecutionInfoResponse(jobExecution, timeZone);
+  }
+
+  /**
+   * Get execution context for a job execution instance. (context can be shipped between job executions)
+   */
+  public ExecutionContextResponse getExecutionContextByJobExecutionId(Long executionId) throws NoSuchJobExecutionException {
+    JobExecution jobExecution = jobService.getJobExecution(executionId);
+    Map<String, Object> executionMap = new HashMap<>();
+    for (Map.Entry<String, Object> entry : jobExecution.getExecutionContext().entrySet()) {
+      executionMap.put(entry.getKey(), entry.getValue());
+    }
+    return new ExecutionContextResponse(executionId, executionMap);
+  }
+
+  /**
+   * Restart a specific job instance with the same parameters. (only restart operation is supported here)
+   */
+  public JobExecutionInfoResponse restart(Long jobInstanceId, String jobName,
+                                          JobOperationParams.JobRestartOperationParam operation) throws NoSuchJobException, JobParametersInvalidException,
+    JobExecutionAlreadyRunningException, JobRestartException, JobInstanceAlreadyCompleteException, NoSuchJobExecutionException {
+    if (JobOperationParams.JobRestartOperationParam.RESTART.equals(operation)) {
+      Collection<JobExecution> jobExecutions = jobService.getJobExecutionsForJobInstance(jobName, jobInstanceId);
+      JobExecution jobExecution = jobExecutions.iterator().next();
+      Long jobExecutionId = jobExecution.getId();
+      return new JobExecutionInfoResponse(jobService.restart(jobExecutionId), timeZone);
+    } else {
+      throw new UnsupportedOperationException("Unsupported operation (try: RESTART)");
+    }
+  }
+
+  /**
+   * Get all job details. (paged)
+   */
+  public List<JobInfo> getAllJobs(int start, int pageSize) {
+    List<JobInfo> jobs = new ArrayList<>();
+    Collection<String> names = jobService.listJobs(start, pageSize);
+    for (String name : names) {
+      int count = 0;
+      try {
+        count = jobService.countJobExecutionsForJob(name);
+      }
+      catch (NoSuchJobException e) {
+        // shouldn't happen
+      }
+      boolean launchable = jobService.isLaunchable(name);
+      boolean incrementable = jobService.isIncrementable(name);
+      jobs.add(new JobInfo(name, count, null, launchable, incrementable));
+    }
+    return jobs;
+  }
+
+  /**
+   * Get all executions for unique job instance.
+   */
+  public List<JobExecutionInfoResponse> getExecutionsForJobInstance(String jobName, Long jobInstanceId) throws NoSuchJobInstanceException, NoSuchJobException {
+    List<JobExecutionInfoResponse> result = Lists.newArrayList();
+    JobInstance jobInstance = jobService.getJobInstance(jobInstanceId);
+    Collection<JobExecution> jobExecutions = jobService.getJobExecutionsForJobInstance(jobName, jobInstance.getInstanceId());
+    for (JobExecution jobExecution : jobExecutions) {
+      result.add(new JobExecutionInfoResponse(jobExecution, timeZone));
+    }
+    return result;
+  }
+
+  /**
+   * Get job details for a specific job. (paged)
+   */
+  public JobDetailsResponse getJobDetails(String jobName, int page, int size) throws NoSuchJobException {
+    List<JobInstanceDetailsResponse> jobInstanceResponses = Lists.newArrayList();
+    Collection<JobInstance> jobInstances = jobService.listJobInstances(jobName, page, size);
+
+    int count = jobService.countJobExecutionsForJob(jobName);
+    boolean launchable = jobService.isLaunchable(jobName);
+    boolean isIncrementable = jobService.isIncrementable(jobName);
+
+    for (JobInstance jobInstance: jobInstances) {
+      List<JobExecutionInfoResponse> executionInfos = Lists.newArrayList();
+      Collection<JobExecution> jobExecutions = jobService.getJobExecutionsForJobInstance(jobName, jobInstance.getId());
+      if (jobExecutions != null) {
+        for (JobExecution jobExecution : jobExecutions) {
+          executionInfos.add(new JobExecutionInfoResponse(jobExecution, timeZone));
+        }
+      }
+      jobInstanceResponses.add(new JobInstanceDetailsResponse(jobInstance, executionInfos));
+    }
+    return new JobDetailsResponse(new JobInfo(jobName, count, launchable, isIncrementable), jobInstanceResponses);
+  }
+
+  /**
+   * Get step execution details based for job execution id and step execution id.
+   */
+  public StepExecutionInfoResponse getStepExecution(Long jobExecutionId, Long stepExecutionId) throws NoSuchStepExecutionException, NoSuchJobExecutionException {
+    StepExecution stepExecution = jobService.getStepExecution(jobExecutionId, stepExecutionId);
+    return new StepExecutionInfoResponse(stepExecution, timeZone);
+  }
+
+  /**
+   * Get step execution context details. (execution context can be shipped between steps)
+   */
+  public StepExecutionContextResponse getStepExecutionContext(Long jobExecutionId, Long stepExecutionId) throws NoSuchStepExecutionException, NoSuchJobExecutionException {
+    StepExecution stepExecution = jobService.getStepExecution(jobExecutionId, stepExecutionId);
+    Map<String, Object> executionMap = new HashMap<>();
+    for (Map.Entry<String, Object> entry : stepExecution.getExecutionContext().entrySet()) {
+      executionMap.put(entry.getKey(), entry.getValue());
+    }
+    return new StepExecutionContextResponse(executionMap, jobExecutionId, stepExecutionId, stepExecution.getStepName());
+  }
+
+  /**
+   * Get step execution progress status detauls.
+   */
+  public StepExecutionProgressResponse getStepExecutionProgress(Long jobExecutionId, Long stepExecutionId) throws NoSuchStepExecutionException, NoSuchJobExecutionException {
+    StepExecution stepExecution = jobService.getStepExecution(jobExecutionId, stepExecutionId);
+    StepExecutionInfoResponse stepExecutionInfoResponse = new StepExecutionInfoResponse(stepExecution, timeZone);
+    String stepName = stepExecution.getStepName();
+    if (stepName.contains(":partition")) {
+      stepName = stepName.replaceAll("(:partition).*", "$1*");
+    }
+    String jobName = stepExecution.getJobExecution().getJobInstance().getJobName();
+    StepExecutionHistory stepExecutionHistory = computeHistory(jobName, stepName);
+    StepExecutionProgress stepExecutionProgress = new StepExecutionProgress(stepExecution, stepExecutionHistory);
+
+    return new StepExecutionProgressResponse(stepExecutionProgress, stepExecutionHistory, stepExecutionInfoResponse);
+
+  }
+
+  private StepExecutionHistory computeHistory(String jobName, String stepName) {
+    int total = jobService.countStepExecutionsForStep(jobName, stepName);
+    StepExecutionHistory stepExecutionHistory = new StepExecutionHistory(stepName);
+    for (int i = 0; i < total; i += 1000) {
+      for (StepExecution stepExecution : jobService.listStepExecutionsForStep(jobName, stepName, i, 1000)) {
+        stepExecutionHistory.append(stepExecution);
+      }
+    }
+    return stepExecutionHistory;
+  }
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/ExecutionContextResponse.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/ExecutionContextResponse.java
new file mode 100644
index 0000000..2d46c54
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/ExecutionContextResponse.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import java.util.Map;
+
+public class ExecutionContextResponse {
+
+  private final Long jobExecutionId;
+  private final Map<String, Object> executionContextMap;
+
+  public ExecutionContextResponse(Long jobExecutionId, Map<String, Object> executionContextMap) {
+    this.jobExecutionId = jobExecutionId;
+    this.executionContextMap = executionContextMap;
+  }
+
+  public Long getJobExecutionId() {
+    return jobExecutionId;
+  }
+
+  public Map<String, Object> getExecutionContextMap() {
+    return executionContextMap;
+  }
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobDetailsResponse.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobDetailsResponse.java
new file mode 100644
index 0000000..cd34fef
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobDetailsResponse.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import org.springframework.batch.admin.web.JobInfo;
+
+import java.util.List;
+
+public class JobDetailsResponse {
+
+  private JobInfo jobInfo;
+  private List<JobInstanceDetailsResponse> jobInstanceDetailsResponseList;
+
+  public JobDetailsResponse() {
+  }
+
+  public JobDetailsResponse(JobInfo jobInfo, List<JobInstanceDetailsResponse> jobInstanceDetailsResponseList) {
+    this.jobInfo = jobInfo;
+    this.jobInstanceDetailsResponseList = jobInstanceDetailsResponseList;
+  }
+
+  public JobInfo getJobInfo() {
+    return jobInfo;
+  }
+
+  public void setJobInfo(JobInfo jobInfo) {
+    this.jobInfo = jobInfo;
+  }
+
+  public List<JobInstanceDetailsResponse> getJobInstanceDetailsResponseList() {
+    return jobInstanceDetailsResponseList;
+  }
+
+  public void setJobInstanceDetailsResponseList(List<JobInstanceDetailsResponse> jobInstanceDetailsResponseList) {
+    this.jobInstanceDetailsResponseList = jobInstanceDetailsResponseList;
+  }
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionDetailsResponse.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionDetailsResponse.java
new file mode 100644
index 0000000..695b57f
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionDetailsResponse.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import java.util.List;
+
+public class JobExecutionDetailsResponse {
+
+  private JobExecutionInfoResponse jobExecutionInfoResponse;
+
+  private List<StepExecutionInfoResponse> stepExecutionInfoList;
+
+  public JobExecutionDetailsResponse(JobExecutionInfoResponse jobExecutionInfoResponse, List<StepExecutionInfoResponse> stepExecutionInfoList) {
+    this.jobExecutionInfoResponse = jobExecutionInfoResponse;
+    this.stepExecutionInfoList = stepExecutionInfoList;
+  }
+
+  public JobExecutionInfoResponse getJobExecutionInfoResponse() {
+    return jobExecutionInfoResponse;
+  }
+
+  public void setJobExecutionInfoResponse(JobExecutionInfoResponse jobExecutionInfoResponse) {
+    this.jobExecutionInfoResponse = jobExecutionInfoResponse;
+  }
+
+  public List<StepExecutionInfoResponse> getStepExecutionInfoList() {
+    return stepExecutionInfoList;
+  }
+
+  public void setStepExecutionInfoList(List<StepExecutionInfoResponse> stepExecutionInfoList) {
+    this.stepExecutionInfoList = stepExecutionInfoList;
+  }
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionInfoResponse.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionInfoResponse.java
new file mode 100644
index 0000000..a7e4a4f
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionInfoResponse.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import org.apache.ambari.infra.model.wrapper.JobExecutionData;
+import org.springframework.batch.admin.web.JobParametersExtractor;
+import org.springframework.batch.core.BatchStatus;
+import org.springframework.batch.core.JobExecution;
+import org.springframework.batch.core.JobInstance;
+import org.springframework.batch.core.converter.DefaultJobParametersConverter;
+import org.springframework.batch.core.converter.JobParametersConverter;
+
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.Properties;
+import java.util.TimeZone;
+
+public class JobExecutionInfoResponse {
+  private Long id;
+  private int stepExecutionCount;
+  private Long jobId;
+  private String jobName;
+  private String startDate = "";
+  private String startTime = "";
+  private String duration = "";
+  private JobExecutionData jobExecutionData;
+  private Properties jobParameters;
+  private String jobParametersString;
+  private boolean restartable = false;
+  private boolean abandonable = false;
+  private boolean stoppable = false;
+  private final TimeZone timeZone;
+
+
+  public JobExecutionInfoResponse(JobExecution jobExecution, TimeZone timeZone) {
+    JobParametersConverter converter = new DefaultJobParametersConverter();
+    this.jobExecutionData = new JobExecutionData(jobExecution);
+    this.timeZone = timeZone;
+    this.id = jobExecutionData.getId();
+    this.jobId = jobExecutionData.getJobId();
+    this.stepExecutionCount = jobExecutionData.getStepExecutions().size();
+    this.jobParameters = converter.getProperties(jobExecutionData.getJobParameters());
+    this.jobParametersString = (new JobParametersExtractor()).fromJobParameters(jobExecutionData.getJobParameters());
+    JobInstance jobInstance = jobExecutionData.getJobInstance();
+    if(jobInstance != null) {
+      this.jobName = jobInstance.getJobName();
+      BatchStatus endTime = jobExecutionData.getStatus();
+      this.restartable = endTime.isGreaterThan(BatchStatus.STOPPING) && endTime.isLessThan(BatchStatus.ABANDONED);
+      this.abandonable = endTime.isGreaterThan(BatchStatus.STARTED) && endTime != BatchStatus.ABANDONED;
+      this.stoppable = endTime.isLessThan(BatchStatus.STOPPING);
+    } else {
+      this.jobName = "?";
+    }
+
+    SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
+    SimpleDateFormat timeFormat = new SimpleDateFormat("HH:mm:ss");
+    SimpleDateFormat durationFormat = new SimpleDateFormat("HH:mm:ss");
+
+    durationFormat.setTimeZone(TimeZone.getTimeZone("GMT"));
+    timeFormat.setTimeZone(timeZone);
+    dateFormat.setTimeZone(timeZone);
+    if(jobExecutionData.getStartTime() != null) {
+      this.startDate = dateFormat.format(jobExecutionData.getStartTime());
+      this.startTime = timeFormat.format(jobExecutionData.getStartTime());
+      Date endTime1 = jobExecutionData.getEndTime() != null? jobExecutionData.getEndTime():new Date();
+      this.duration = durationFormat.format(new Date(endTime1.getTime() - jobExecutionData.getStartTime().getTime()));
+    }
+  }
+
+  public Long getId() {
+    return id;
+  }
+
+  public int getStepExecutionCount() {
+    return stepExecutionCount;
+  }
+
+  public Long getJobId() {
+    return jobId;
+  }
+
+  public String getJobName() {
+    return jobName;
+  }
+
+  public String getStartDate() {
+    return startDate;
+  }
+
+  public String getStartTime() {
+    return startTime;
+  }
+
+  public String getDuration() {
+    return duration;
+  }
+
+  public JobExecutionData getJobExecutionData() {
+    return jobExecutionData;
+  }
+
+  public Properties getJobParameters() {
+    return jobParameters;
+  }
+
+  public String getJobParametersString() {
+    return jobParametersString;
+  }
+
+  public boolean isRestartable() {
+    return restartable;
+  }
+
+  public boolean isAbandonable() {
+    return abandonable;
+  }
+
+  public boolean isStoppable() {
+    return stoppable;
+  }
+
+  public TimeZone getTimeZone() {
+    return timeZone;
+  }
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionRequest.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionRequest.java
new file mode 100644
index 0000000..b4c20e9
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionRequest.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import javax.ws.rs.PathParam;
+
+public class JobExecutionRequest {
+
+  @PathParam("jobName")
+  private String jobName;
+
+  @PathParam("jobInstanceId")
+  private Long jobInstanceId;
+
+  public String getJobName() {
+    return jobName;
+  }
+
+  public Long getJobInstanceId() {
+    return jobInstanceId;
+  }
+
+  public void setJobName(String jobName) {
+    this.jobName = jobName;
+  }
+
+  public void setJobInstanceId(Long jobInstanceId) {
+    this.jobInstanceId = jobInstanceId;
+  }
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionRestartRequest.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionRestartRequest.java
new file mode 100644
index 0000000..88687e7
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionRestartRequest.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+public class JobExecutionRestartRequest {
+
+  private String jobName;
+
+  private Long jobInstanceId;
+
+  private JobOperationParams.JobRestartOperationParam operation;
+
+  public String getJobName() {
+    return jobName;
+  }
+
+  public void setJobName(String jobName) {
+    this.jobName = jobName;
+  }
+
+  public Long getJobInstanceId() {
+    return jobInstanceId;
+  }
+
+  public void setJobExecutionId(Long jobExecutionId) {
+    this.jobInstanceId = jobExecutionId;
+  }
+
+  public JobOperationParams.JobRestartOperationParam getOperation() {
+    return operation;
+  }
+
+  public void setOperation(JobOperationParams.JobRestartOperationParam operation) {
+    this.operation = operation;
+  }
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionStopRequest.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionStopRequest.java
new file mode 100644
index 0000000..b176f12
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobExecutionStopRequest.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import javax.validation.constraints.NotNull;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.QueryParam;
+
+public class JobExecutionStopRequest {
+
+  @PathParam("jobExecutionId")
+  @NotNull
+  private Long jobExecutionId;
+
+  @QueryParam("operation")
+  @NotNull
+  private JobOperationParams.JobStopOrAbandonOperationParam operation;
+
+  public Long getJobExecutionId() {
+    return jobExecutionId;
+  }
+
+  public void setJobExecutionId(Long jobExecutionId) {
+    this.jobExecutionId = jobExecutionId;
+  }
+
+  public JobOperationParams.JobStopOrAbandonOperationParam getOperation() {
+    return operation;
+  }
+
+  public void setOperation(JobOperationParams.JobStopOrAbandonOperationParam operation) {
+    this.operation = operation;
+  }
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobInstanceDetailsResponse.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobInstanceDetailsResponse.java
new file mode 100644
index 0000000..af88654
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobInstanceDetailsResponse.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import org.springframework.batch.core.JobInstance;
+
+import java.util.List;
+
+public class JobInstanceDetailsResponse {
+
+  private JobInstance jobInstance;
+
+  private List<JobExecutionInfoResponse> jobExecutionInfoResponseList;
+
+  public JobInstanceDetailsResponse() {
+  }
+
+  public JobInstanceDetailsResponse(JobInstance jobInstance, List<JobExecutionInfoResponse> jobExecutionInfoResponseList) {
+    this.jobInstance = jobInstance;
+    this.jobExecutionInfoResponseList = jobExecutionInfoResponseList;
+  }
+
+  public JobInstance getJobInstance() {
+    return jobInstance;
+  }
+
+  public void setJobInstance(JobInstance jobInstance) {
+    this.jobInstance = jobInstance;
+  }
+
+  public List<JobExecutionInfoResponse> getJobExecutionInfoResponseList() {
+    return jobExecutionInfoResponseList;
+  }
+
+  public void setJobExecutionInfoResponseList(List<JobExecutionInfoResponse> jobExecutionInfoResponseList) {
+    this.jobExecutionInfoResponseList = jobExecutionInfoResponseList;
+  }
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobInstanceStartRequest.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobInstanceStartRequest.java
new file mode 100644
index 0000000..905a4fa
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobInstanceStartRequest.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import javax.validation.constraints.NotNull;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.QueryParam;
+
+public class JobInstanceStartRequest {
+
+  @PathParam("jobName")
+  @NotNull
+  private String jobName;
+
+  @QueryParam("params")
+  String params;
+
+  public String getJobName() {
+    return jobName;
+  }
+
+  public void setJobName(String jobName) {
+    this.jobName = jobName;
+  }
+
+  public String getParams() {
+    return params;
+  }
+
+  public void setParams(String params) {
+    this.params = params;
+  }
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobOperationParams.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobOperationParams.java
new file mode 100644
index 0000000..e286deb
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobOperationParams.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+public class JobOperationParams {
+
+  public enum JobStopOrAbandonOperationParam {
+    STOP, ABANDON;
+  }
+
+  public enum JobRestartOperationParam {
+    RESTART;
+  }
+
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobRequest.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobRequest.java
new file mode 100644
index 0000000..b4fd478
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/JobRequest.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import javax.validation.constraints.NotNull;
+import javax.ws.rs.PathParam;
+
+public class JobRequest extends PageRequest {
+
+  @NotNull
+  @PathParam("jobName")
+  private String jobName;
+
+  public String getJobName() {
+    return jobName;
+  }
+
+  public void setJobName(String jobName) {
+    this.jobName = jobName;
+  }
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/PageRequest.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/PageRequest.java
new file mode 100644
index 0000000..679d4fd
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/PageRequest.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import javax.ws.rs.DefaultValue;
+import javax.ws.rs.QueryParam;
+
+public class PageRequest {
+
+  @QueryParam("page")
+  @DefaultValue("0")
+  private int page;
+
+  @QueryParam("size")
+  @DefaultValue("20")
+  private int size;
+
+  public int getPage() {
+    return page;
+  }
+
+  public void setPage(int page) {
+    this.page = page;
+  }
+
+  public int getSize() {
+    return size;
+  }
+
+  public void setSize(int size) {
+    this.size = size;
+  }
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionContextResponse.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionContextResponse.java
new file mode 100644
index 0000000..0e67a87
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionContextResponse.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import java.util.Map;
+
+public class StepExecutionContextResponse {
+
+  private Map<String, Object> executionContextMap;
+
+  private Long jobExecutionId;
+
+  private Long stepExecutionId;
+
+  private String stepName;
+
+  public StepExecutionContextResponse() {
+  }
+
+  public StepExecutionContextResponse(Map<String, Object> executionContextMap, Long jobExecutionId, Long stepExecutionId, String stepName) {
+    this.executionContextMap = executionContextMap;
+    this.jobExecutionId = jobExecutionId;
+    this.stepExecutionId = stepExecutionId;
+    this.stepName = stepName;
+  }
+
+  public Map<String, Object> getExecutionContextMap() {
+    return executionContextMap;
+  }
+
+  public Long getJobExecutionId() {
+    return jobExecutionId;
+  }
+
+  public Long getStepExecutionId() {
+    return stepExecutionId;
+  }
+
+  public String getStepName() {
+    return stepName;
+  }
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionInfoResponse.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionInfoResponse.java
new file mode 100644
index 0000000..ed04767
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionInfoResponse.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import org.apache.ambari.infra.model.wrapper.StepExecutionData;
+import org.springframework.batch.core.JobExecution;
+import org.springframework.batch.core.StepExecution;
+
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.TimeZone;
+
+public class StepExecutionInfoResponse {
+  private Long id;
+  private Long jobExecutionId;
+  private String jobName;
+  private String name;
+  private String startDate = "-";
+  private String startTime = "-";
+  private String duration = "-";
+  private StepExecutionData stepExecutionData;
+  private long durationMillis;
+
+  public StepExecutionInfoResponse(String jobName, Long jobExecutionId, String name, TimeZone timeZone) {
+    this.jobName = jobName;
+    this.jobExecutionId = jobExecutionId;
+    this.name = name;
+    this.stepExecutionData = new StepExecutionData(new StepExecution(name, new JobExecution(jobExecutionId)));
+  }
+
+  public StepExecutionInfoResponse(StepExecution stepExecution, TimeZone timeZone) {
+    this.stepExecutionData = new StepExecutionData(stepExecution);
+    this.id = stepExecutionData.getId();
+    this.name = stepExecutionData.getStepName();
+    this.jobName = stepExecutionData.getJobExecution() != null && stepExecutionData.getJobExecution().getJobInstance() != null? stepExecutionData.getJobExecution().getJobInstance().getJobName():"?";
+    this.jobExecutionId = stepExecutionData.getJobExecutionId();
+    SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
+    SimpleDateFormat timeFormat = new SimpleDateFormat("HH:mm:ss");
+    SimpleDateFormat durationFormat = new SimpleDateFormat("HH:mm:ss");
+
+    durationFormat.setTimeZone(TimeZone.getTimeZone("GMT"));
+    timeFormat.setTimeZone(timeZone);
+    dateFormat.setTimeZone(timeZone);
+    if(stepExecutionData.getStartTime() != null) {
+      this.startDate = dateFormat.format(stepExecutionData.getStartTime());
+      this.startTime = timeFormat.format(stepExecutionData.getStartTime());
+      Date endTime = stepExecutionData.getEndTime() != null? stepExecutionData.getEndTime():new Date();
+      this.durationMillis = endTime.getTime() - stepExecutionData.getStartTime().getTime();
+      this.duration = durationFormat.format(new Date(this.durationMillis));
+    }
+
+  }
+
+  public Long getId() {
+    return this.id;
+  }
+
+  public Long getJobExecutionId() {
+    return this.jobExecutionId;
+  }
+
+  public String getName() {
+    return this.name;
+  }
+
+  public String getJobName() {
+    return this.jobName;
+  }
+
+  public String getStartDate() {
+    return this.startDate;
+  }
+
+  public String getStartTime() {
+    return this.startTime;
+  }
+
+  public String getDuration() {
+    return this.duration;
+  }
+
+  public long getDurationMillis() {
+    return this.durationMillis;
+  }
+
+  public String getStatus() {
+    return this.id != null?this.stepExecutionData.getStatus().toString():"NONE";
+  }
+
+  public String getExitCode() {
+    return this.id != null?this.stepExecutionData.getExitStatus().getExitCode():"NONE";
+  }
+
+  @JsonIgnore
+  public StepExecutionData getStepExecution() {
+    return this.stepExecutionData;
+  }
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionProgressResponse.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionProgressResponse.java
new file mode 100644
index 0000000..26f9ed4
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionProgressResponse.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import org.springframework.batch.admin.history.StepExecutionHistory;
+import org.springframework.batch.admin.web.StepExecutionProgress;
+
+public class StepExecutionProgressResponse {
+
+  private StepExecutionProgress stepExecutionProgress;
+
+  private StepExecutionHistory stepExecutionHistory;
+
+  private StepExecutionInfoResponse stepExecutionInfoResponse;
+
+  public StepExecutionProgressResponse() {
+  }
+
+  public StepExecutionProgressResponse(StepExecutionProgress stepExecutionProgress, StepExecutionHistory stepExecutionHistory,
+                                       StepExecutionInfoResponse stepExecutionInfoResponse) {
+    this.stepExecutionProgress = stepExecutionProgress;
+    this.stepExecutionHistory = stepExecutionHistory;
+    this.stepExecutionInfoResponse = stepExecutionInfoResponse;
+  }
+
+  public StepExecutionProgress getStepExecutionProgress() {
+    return stepExecutionProgress;
+  }
+
+  public StepExecutionHistory getStepExecutionHistory() {
+    return stepExecutionHistory;
+  }
+
+  public StepExecutionInfoResponse getStepExecutionInfoResponse() {
+    return stepExecutionInfoResponse;
+  }
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionRequest.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionRequest.java
new file mode 100644
index 0000000..2228171
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/StepExecutionRequest.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model;
+
+import javax.validation.constraints.NotNull;
+import javax.ws.rs.PathParam;
+
+public class StepExecutionRequest {
+
+  @PathParam("jobExecutionId")
+  @NotNull
+  private Long jobExecutionId;
+
+  @PathParam("stepExecutionId")
+  @NotNull
+  private Long stepExecutionId;
+
+  public Long getJobExecutionId() {
+    return jobExecutionId;
+  }
+
+  public void setJobExecutionId(Long jobExecutionId) {
+    this.jobExecutionId = jobExecutionId;
+  }
+
+  public Long getStepExecutionId() {
+    return stepExecutionId;
+  }
+
+  public void setStepExecutionId(Long stepExecutionId) {
+    this.stepExecutionId = stepExecutionId;
+  }
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/wrapper/JobExecutionData.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/wrapper/JobExecutionData.java
new file mode 100644
index 0000000..28e262a
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/wrapper/JobExecutionData.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model.wrapper;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.google.common.collect.Lists;
+import org.springframework.batch.core.BatchStatus;
+import org.springframework.batch.core.ExitStatus;
+import org.springframework.batch.core.JobExecution;
+import org.springframework.batch.core.JobInstance;
+import org.springframework.batch.core.JobParameters;
+import org.springframework.batch.core.StepExecution;
+import org.springframework.batch.item.ExecutionContext;
+
+import java.util.Collection;
+import java.util.Date;
+import java.util.List;
+
+/**
+ * Wrapper for #{{@link JobExecution}}
+ */
+public class JobExecutionData {
+
+  private JobExecution jobExecution;
+
+  public JobExecutionData(JobExecution jobExecution) {
+    this.jobExecution = jobExecution;
+  }
+
+  @JsonIgnore
+  public JobExecution getJobExecution() {
+    return jobExecution;
+  }
+
+  @JsonIgnore
+  public Collection<StepExecution> getStepExecutions() {
+    return jobExecution.getStepExecutions();
+  }
+
+  public JobParameters getJobParameters() {
+    return jobExecution.getJobParameters();
+  }
+
+  public JobInstance getJobInstance() {
+    return jobExecution.getJobInstance();
+  }
+
+  public Collection<StepExecutionData> getStepExecutionDataList() {
+    List<StepExecutionData> stepExecutionDataList = Lists.newArrayList();
+    Collection<StepExecution> stepExecutions = getStepExecutions();
+    if (stepExecutions != null) {
+      for (StepExecution stepExecution : stepExecutions) {
+        stepExecutionDataList.add(new StepExecutionData(stepExecution));
+      }
+    }
+    return stepExecutionDataList;
+  }
+
+  public BatchStatus getStatus() {
+    return jobExecution.getStatus();
+  }
+
+  public Date getStartTime() {
+    return jobExecution.getStartTime();
+  }
+
+  public Date getCreateTime() {
+    return jobExecution.getCreateTime();
+  }
+
+  public Date getEndTime() {
+    return jobExecution.getEndTime();
+  }
+
+  public Date getLastUpdated() {
+    return jobExecution.getLastUpdated();
+  }
+
+  public ExitStatus getExitStatus() {
+    return jobExecution.getExitStatus();
+  }
+
+  public ExecutionContext getExecutionContext() {
+    return jobExecution.getExecutionContext();
+  }
+
+  public List<Throwable> getFailureExceptions() {
+    return jobExecution.getFailureExceptions();
+  }
+
+  public String getJobConfigurationName() {
+    return jobExecution.getJobConfigurationName();
+  }
+
+  public Long getId() {
+    return jobExecution.getId();
+  }
+
+  public Long getJobId() {
+    return jobExecution.getJobId();
+  }
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/wrapper/StepExecutionData.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/wrapper/StepExecutionData.java
new file mode 100644
index 0000000..26552ae
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/model/wrapper/StepExecutionData.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.model.wrapper;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import org.springframework.batch.core.BatchStatus;
+import org.springframework.batch.core.ExitStatus;
+import org.springframework.batch.core.JobExecution;
+import org.springframework.batch.core.StepExecution;
+import org.springframework.batch.item.ExecutionContext;
+
+import java.util.Date;
+import java.util.List;
+
+/**
+ * Wrapper for #{{@link StepExecution}}
+ */
+public class StepExecutionData {
+
+  @JsonIgnore
+  private final JobExecution jobExecution;
+
+  @JsonIgnore
+  private final StepExecution stepExecution;
+
+
+  public StepExecutionData(StepExecution stepExecution) {
+    this.stepExecution = stepExecution;
+    this.jobExecution = stepExecution.getJobExecution();
+  }
+
+  @JsonIgnore
+  public JobExecution getJobExecution() {
+    return jobExecution;
+  }
+
+  @JsonIgnore
+  public StepExecution getStepExecution() {
+    return stepExecution;
+  }
+
+  public String getStepName() {
+    return stepExecution.getStepName();
+  }
+
+  public int getReadCount() {
+    return stepExecution.getReadCount();
+  }
+
+  public BatchStatus getStatus() {
+    return stepExecution.getStatus();
+  }
+
+  public int getWriteCount() {
+    return stepExecution.getWriteCount();
+  }
+
+  public int getCommitCount() {
+    return stepExecution.getCommitCount();
+  }
+
+  public int getRollbackCount() {
+    return stepExecution.getRollbackCount();
+  }
+
+  public int getReadSkipCount() {
+    return stepExecution.getReadSkipCount();
+  }
+
+  public int getProcessSkipCount() {
+    return stepExecution.getProcessSkipCount();
+  }
+
+  public Date getStartTime() {
+    return stepExecution.getStartTime();
+  }
+
+  public int getWriteSkipCount() {
+    return stepExecution.getWriteSkipCount();
+  }
+
+  public Date getEndTime() {
+    return stepExecution.getEndTime();
+  }
+
+  public Date getLastUpdated() {
+    return stepExecution.getLastUpdated();
+  }
+
+  public ExecutionContext getExecutionContext() {
+    return stepExecution.getExecutionContext();
+  }
+
+  public ExitStatus getExitStatus() {
+    return stepExecution.getExitStatus();
+  }
+
+  public boolean isTerminateOnly() {
+    return stepExecution.isTerminateOnly();
+  }
+
+  public int getFilterCount() {
+    return stepExecution.getFilterCount();
+  }
+
+  public List<Throwable> getFailureExceptions() {
+    return stepExecution.getFailureExceptions();
+  }
+
+  public Long getId() {
+    return stepExecution.getId();
+  }
+
+  public Long getJobExecutionId() {
+    return stepExecution.getJobExecutionId();
+  }
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/rest/JobExceptionMapper.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/rest/JobExceptionMapper.java
new file mode 100644
index 0000000..079cce3
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/rest/JobExceptionMapper.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.rest;
+
+
+import com.google.common.collect.Maps;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.batch.admin.service.NoSuchStepExecutionException;
+import org.springframework.batch.core.JobParametersInvalidException;
+import org.springframework.batch.core.launch.JobExecutionNotFailedException;
+import org.springframework.batch.core.launch.JobExecutionNotRunningException;
+import org.springframework.batch.core.launch.JobExecutionNotStoppedException;
+import org.springframework.batch.core.launch.JobInstanceAlreadyExistsException;
+import org.springframework.batch.core.launch.JobParametersNotFoundException;
+import org.springframework.batch.core.launch.NoSuchJobException;
+import org.springframework.batch.core.launch.NoSuchJobExecutionException;
+import org.springframework.batch.core.launch.NoSuchJobInstanceException;
+import org.springframework.batch.core.repository.JobExecutionAlreadyRunningException;
+import org.springframework.batch.core.repository.JobInstanceAlreadyCompleteException;
+import org.springframework.batch.core.repository.JobRestartException;
+import org.springframework.batch.core.step.NoSuchStepException;
+import org.springframework.web.bind.MethodArgumentNotValidException;
+
+import javax.batch.operations.JobExecutionAlreadyCompleteException;
+import javax.inject.Named;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.ext.ExceptionMapper;
+import javax.ws.rs.ext.Provider;
+import java.util.Map;
+
+@Named
+@Provider
+public class JobExceptionMapper implements ExceptionMapper<Throwable> {
+
+  private static final Logger LOG = LoggerFactory.getLogger(JobExceptionMapper.class);
+
+  private static final Map<Class, Response.Status> exceptionStatusCodeMap = Maps.newHashMap();
+
+  static {
+    exceptionStatusCodeMap.put(MethodArgumentNotValidException.class, Response.Status.BAD_REQUEST);
+    exceptionStatusCodeMap.put(NoSuchJobException.class, Response.Status.NOT_FOUND);
+    exceptionStatusCodeMap.put(NoSuchStepException.class, Response.Status.NOT_FOUND);
+    exceptionStatusCodeMap.put(NoSuchStepExecutionException.class, Response.Status.NOT_FOUND);
+    exceptionStatusCodeMap.put(NoSuchJobExecutionException.class, Response.Status.NOT_FOUND);
+    exceptionStatusCodeMap.put(NoSuchJobInstanceException.class, Response.Status.NOT_FOUND);
+    exceptionStatusCodeMap.put(JobExecutionNotRunningException.class, Response.Status.INTERNAL_SERVER_ERROR);
+    exceptionStatusCodeMap.put(JobExecutionNotStoppedException.class, Response.Status.INTERNAL_SERVER_ERROR);
+    exceptionStatusCodeMap.put(JobInstanceAlreadyExistsException.class, Response.Status.ACCEPTED);
+    exceptionStatusCodeMap.put(JobInstanceAlreadyCompleteException.class, Response.Status.ACCEPTED);
+    exceptionStatusCodeMap.put(JobExecutionAlreadyRunningException.class, Response.Status.ACCEPTED);
+    exceptionStatusCodeMap.put(JobExecutionAlreadyCompleteException.class, Response.Status.ACCEPTED);
+    exceptionStatusCodeMap.put(JobParametersNotFoundException.class, Response.Status.NOT_FOUND);
+    exceptionStatusCodeMap.put(JobExecutionNotFailedException.class, Response.Status.INTERNAL_SERVER_ERROR);
+    exceptionStatusCodeMap.put(JobRestartException.class, Response.Status.INTERNAL_SERVER_ERROR);
+    exceptionStatusCodeMap.put(JobParametersInvalidException.class, Response.Status.BAD_REQUEST);
+  }
+
+  @Override
+  public Response toResponse(Throwable throwable) {
+    LOG.error("REST Exception occurred:", throwable);
+    Response.Status status = Response.Status.INTERNAL_SERVER_ERROR;
+
+    for (Map.Entry<Class, Response.Status> entry : exceptionStatusCodeMap.entrySet()) {
+      if (throwable.getClass().isAssignableFrom(entry.getKey())) {
+        status = entry.getValue();
+        LOG.info("Exception mapped to: {} with status code: {}", entry.getKey().getCanonicalName(), entry.getValue().getStatusCode());
+        break;
+      }
+    }
+
+    return Response.status(status).entity(new StatusMessage(throwable.getMessage(), status.getStatusCode()))
+      .type(MediaType.APPLICATION_JSON_TYPE).build();
+  }
+
+  private class StatusMessage {
+    private String message;
+    private int statusCode;
+
+    StatusMessage(String message, int statusCode) {
+      this.message = message;
+      this.statusCode = statusCode;
+    }
+
+    public String getMessage() {
+      return message;
+    }
+
+    public int getStatusCode() {
+      return statusCode;
+    }
+  }
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/rest/JobResource.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/rest/JobResource.java
new file mode 100644
index 0000000..7023957
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/rest/JobResource.java
@@ -0,0 +1,191 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.infra.rest;
+
+import io.swagger.annotations.Api;
+import io.swagger.annotations.ApiOperation;
+import org.apache.ambari.infra.manager.JobManager;
+import org.apache.ambari.infra.model.ExecutionContextResponse;
+import org.apache.ambari.infra.model.JobDetailsResponse;
+import org.apache.ambari.infra.model.JobExecutionDetailsResponse;
+import org.apache.ambari.infra.model.JobExecutionInfoResponse;
+import org.apache.ambari.infra.model.JobExecutionRequest;
+import org.apache.ambari.infra.model.JobExecutionRestartRequest;
+import org.apache.ambari.infra.model.JobExecutionStopRequest;
+import org.apache.ambari.infra.model.JobInstanceStartRequest;
+import org.apache.ambari.infra.model.JobRequest;
+import org.apache.ambari.infra.model.PageRequest;
+import org.apache.ambari.infra.model.StepExecutionContextResponse;
+import org.apache.ambari.infra.model.StepExecutionInfoResponse;
+import org.apache.ambari.infra.model.StepExecutionProgressResponse;
+import org.apache.ambari.infra.model.StepExecutionRequest;
+import org.springframework.batch.admin.service.NoSuchStepExecutionException;
+import org.springframework.batch.admin.web.JobInfo;
+import org.springframework.batch.core.JobParametersInvalidException;
+import org.springframework.batch.core.launch.JobExecutionNotRunningException;
+import org.springframework.batch.core.launch.JobInstanceAlreadyExistsException;
+import org.springframework.batch.core.launch.NoSuchJobException;
+import org.springframework.batch.core.launch.NoSuchJobExecutionException;
+import org.springframework.batch.core.launch.NoSuchJobInstanceException;
+import org.springframework.batch.core.repository.JobExecutionAlreadyRunningException;
+import org.springframework.batch.core.repository.JobInstanceAlreadyCompleteException;
+import org.springframework.batch.core.repository.JobRestartException;
+import org.springframework.context.annotation.Scope;
+
+import javax.inject.Inject;
+import javax.inject.Named;
+import javax.validation.Valid;
+import javax.validation.constraints.NotNull;
+import javax.ws.rs.BeanParam;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import java.util.List;
+import java.util.Set;
+
+@Api(value = "jobs", description = "Job operations")
+@Path("jobs")
+@Named
+@Scope("request")
+public class JobResource {
+
+  @Inject
+  private JobManager jobManager;
+
+  @GET
+  @Produces({"application/json"})
+  @ApiOperation("Get all jobs")
+  public List<JobInfo> getAllJobs(@BeanParam @Valid PageRequest request) {
+    return jobManager.getAllJobs(request.getPage(), request.getSize());
+  }
+
+  @POST
+  @Produces({"application/json"})
+  @Path("{jobName}")
+  @ApiOperation("Start a new job instance by job name.")
+  public JobExecutionInfoResponse startJob(@BeanParam @Valid JobInstanceStartRequest request)
+    throws JobParametersInvalidException, JobInstanceAlreadyExistsException, NoSuchJobException, JobExecutionAlreadyRunningException,
+    JobRestartException, JobInstanceAlreadyCompleteException {
+    return jobManager.launchJob(request.getJobName(), request.getParams());
+  }
+
+  @GET
+  @Produces({"application/json"})
+  @Path("/info/names")
+  @ApiOperation("Get all job names")
+  public Set<String> getAllJobNames() {
+    return jobManager.getAllJobNames();
+  }
+
+  @GET
+  @Produces({"application/json"})
+  @Path("/info/{jobName}")
+  @ApiOperation("Get job details by job name.")
+  public JobDetailsResponse getJobDetails(@BeanParam @Valid JobRequest jobRequest) throws NoSuchJobException {
+    return jobManager.getJobDetails(jobRequest.getJobName(), jobRequest.getPage(), jobRequest.getSize());
+  }
+
+  @GET
+  @Path("{jobName}/executions")
+  @Produces({"application/json"})
+  @ApiOperation("Get the id values of all the running job instances.")
+  public Set<Long> getExecutionIdsByJobName(@PathParam("jobName") @NotNull @Valid String jobName) throws NoSuchJobException {
+    return jobManager.getExecutionIdsByJobName(jobName);
+  }
+
+  @GET
+  @Produces({"application/json"})
+  @Path("/executions/{jobExecutionId}")
+  @ApiOperation("Get job and step details for job execution instance.")
+  public JobExecutionDetailsResponse getExectionInfo(@PathParam("jobExecutionId") @Valid Long jobExecutionId) throws NoSuchJobExecutionException {
+    return jobManager.getExectionInfo(jobExecutionId);
+  }
+
+  @GET
+  @Produces({"application/json"})
+  @Path("/executions/{jobExecutionId}/context")
+  @ApiOperation("Get execution context for specific job.")
+  public ExecutionContextResponse getExecutionContextByJobExecId(@PathParam("jobExecutionId") Long executionId) throws NoSuchJobExecutionException {
+    return jobManager.getExecutionContextByJobExecutionId(executionId);
+  }
+
+
+  @DELETE
+  @Produces({"application/json"})
+  @Path("/executions/{jobExecutionId}")
+  @ApiOperation("Stop or abandon a running job execution.")
+  public JobExecutionInfoResponse stopOrAbandonJobExecution(@BeanParam @Valid JobExecutionStopRequest request)
+    throws NoSuchJobExecutionException, JobExecutionNotRunningException, JobExecutionAlreadyRunningException {
+    return jobManager.stopOrAbandonJobByExecutionId(request.getJobExecutionId(), request.getOperation());
+  }
+
+  @DELETE
+  @Produces({"application/json"})
+  @Path("/executions")
+  @ApiOperation("Stop all job executions.")
+  public Integer stopAll() {
+    return jobManager.stopAllJobs();
+  }
+
+  @GET
+  @Produces({"application/json"})
+  @Path("/{jobName}/{jobInstanceId}/executions")
+  @ApiOperation("Get execution for job instance.")
+  public List<JobExecutionInfoResponse> getExecutionsForInstance(@BeanParam @Valid JobExecutionRequest request) throws JobInstanceAlreadyCompleteException,
+    NoSuchJobExecutionException, JobExecutionAlreadyRunningException, JobParametersInvalidException, JobRestartException, NoSuchJobException, NoSuchJobInstanceException {
+    return jobManager.getExecutionsForJobInstance(request.getJobName(), request.getJobInstanceId());
+  }
+
+  @POST
+  @Produces({"application/json"})
+  @Path("/{jobName}/{jobInstanceId}/executions")
+  @ApiOperation("Restart job instance.")
+  public JobExecutionInfoResponse restartJobInstance(@BeanParam @Valid JobExecutionRestartRequest request) throws JobInstanceAlreadyCompleteException,
+    NoSuchJobExecutionException, JobExecutionAlreadyRunningException, JobParametersInvalidException, JobRestartException, NoSuchJobException {
+    return jobManager.restart(request.getJobInstanceId(), request.getJobName(), request.getOperation());
+  }
+
+  @GET
+  @Produces({"application/json"})
+  @Path("/executions/{jobExecutionId}/steps/{stepExecutionId}")
+  @ApiOperation("Get step execution details.")
+  public StepExecutionInfoResponse getStepExecution(@BeanParam @Valid StepExecutionRequest request) throws NoSuchStepExecutionException, NoSuchJobExecutionException {
+    return jobManager.getStepExecution(request.getJobExecutionId(), request.getStepExecutionId());
+  }
+
+  @GET
+  @Produces({"application/json"})
+  @Path("/executions/{jobExecutionId}/steps/{stepExecutionId}/execution-context")
+  @ApiOperation("Get the execution context of step execution.")
+  public StepExecutionContextResponse getStepExecutionContext(@BeanParam @Valid StepExecutionRequest request) throws NoSuchStepExecutionException, NoSuchJobExecutionException {
+    return jobManager.getStepExecutionContext(request.getJobExecutionId(), request.getStepExecutionId());
+  }
+
+  @GET
+  @Produces({"application/json"})
+  @Path("/executions/{jobExecutionId}/steps/{stepExecutionId}/progress")
+  @ApiOperation("Get progress of step execution.")
+  public StepExecutionProgressResponse getStepExecutionProgress(@BeanParam @Valid StepExecutionRequest request) throws NoSuchStepExecutionException, NoSuchJobExecutionException {
+    return jobManager.getStepExecutionProgress(request.getJobExecutionId(), request.getStepExecutionId());
+  }
+
+}
diff --git a/ambari-infra/ambari-infra-manager/src/main/resources/dummy/dummy.txt b/ambari-infra/ambari-infra-manager/src/main/resources/dummy/dummy.txt
new file mode 100644
index 0000000..41da725
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/resources/dummy/dummy.txt
@@ -0,0 +1,3 @@
+f1,f2
+v1,v2
+v3,v4
\ No newline at end of file
diff --git a/ambari-infra/ambari-infra-manager/src/main/resources/infra-manager-env.sh b/ambari-infra/ambari-infra-manager/src/main/resources/infra-manager-env.sh
new file mode 100644
index 0000000..c7e11c3
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/resources/infra-manager-env.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Extend with java options or system properties. e.g.: INFRA_MANAGER_OPTS="-Xdebug -Xrunjdwp:transport=dt_socket,address=5007,server=y,suspend=n"
+export INFRA_MANAGER_OPTS=""
\ No newline at end of file
diff --git a/ambari-infra/ambari-infra-manager/src/main/resources/infra-manager.properties b/ambari-infra/ambari-infra-manager/src/main/resources/infra-manager.properties
new file mode 100644
index 0000000..fbeac78
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/resources/infra-manager.properties
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+infra-manager.batch.db.file=job-repository.db
+infra-manager.batch.db.init=true
+infra-manager.batch.db.username=admin
+infra-manager.batch.db.password=admin
\ No newline at end of file
diff --git a/ambari-infra/ambari-infra-manager/src/main/resources/infraManager.sh b/ambari-infra/ambari-infra-manager/src/main/resources/infraManager.sh
new file mode 100644
index 0000000..65287b2
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/resources/infraManager.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+JVM="java"
+sdir="`dirname \"$0\"`"
+
+PATH=$JAVA_HOME/bin:$PATH nohup $JVM -classpath "/etc/ambari-infra-manager/conf:$sdir:$sdir/libs/*" $INFRA_MANAGER_OPTS org.apache.ambari.infra.InfraManager ${1+"$@"} &
\ No newline at end of file
diff --git a/ambari-infra/ambari-infra-manager/src/main/resources/log4j.xml b/ambari-infra/ambari-infra-manager/src/main/resources/log4j.xml
new file mode 100644
index 0000000..0450454
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/resources/log4j.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
+<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/">
+  <appender name="console" class="org.apache.log4j.ConsoleAppender">
+    <param name="Target" value="System.out" />
+    <layout class="org.apache.log4j.PatternLayout">
+      <param name="ConversionPattern" value="%d [%t] %-5p %C{6} (%F:%L) - %m%n" />
+    </layout>
+  </appender>
+
+  <root>
+    <level value="INFO" />
+    <appender-ref ref="console" />
+  </root>
+</log4j:configuration>
diff --git a/ambari-infra/ambari-infra-manager/src/main/resources/swagger/swagger.html b/ambari-infra/ambari-infra-manager/src/main/resources/swagger/swagger.html
new file mode 100644
index 0000000..8580e1a
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/resources/swagger/swagger.html
@@ -0,0 +1,115 @@
+<!DOCTYPE html>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<html>
+<head>
+    <title>Infra Manager REST API</title>
+    <link rel="icon" type="image/png" href="images/favicon-32x32.png" sizes="32x32" />
+    <link rel="icon" type="image/png" href="images/favicon-16x16.png" sizes="16x16" />
+    <link href='css/typography.css' media='screen' rel='stylesheet' type='text/css'/>
+    <link href='css/reset.css' media='screen' rel='stylesheet' type='text/css'/>
+    <link href='css/screen.css' media='screen' rel='stylesheet' type='text/css'/>
+    <link href='css/reset.css' media='print' rel='stylesheet' type='text/css'/>
+    <link href='css/print.css' media='print' rel='stylesheet' type='text/css'/>
+    <script src='lib/jquery-1.8.0.min.js' type='text/javascript'></script>
+    <script src='lib/jquery.slideto.min.js' type='text/javascript'></script>
+    <script src='lib/jquery.wiggle.min.js' type='text/javascript'></script>
+    <script src='lib/jquery.ba-bbq.min.js' type='text/javascript'></script>
+    <script src='lib/handlebars-2.0.0.js' type='text/javascript'></script>
+    <script src='lib/underscore-min.js' type='text/javascript'></script>
+    <script src='lib/backbone-min.js' type='text/javascript'></script>
+    <script src='swagger-ui.js' type='text/javascript'></script>
+    <script src='lib/highlight.7.3.pack.js' type='text/javascript'></script>
+    <script src='lib/marked.js' type='text/javascript'></script>
+    <script src='lib/swagger-oauth.js' type='text/javascript'></script>
+
+    <script type="text/javascript">
+        $(function () {
+            var url = window.location.search.match(/url=([^&]+)/);
+            if (url && url.length > 1) {
+                url = decodeURIComponent(url[1]);
+            } else {
+                var urlPrefix = location.protocol +'//'+ location.hostname+(location.port ? ':'+location.port: '');
+                url = urlPrefix + "/api/v1/swagger.yaml";
+            }
+            window.swaggerUi = new SwaggerUi({
+                url: url,
+                dom_id: "swagger-ui-container",
+                supportedSubmitMethods: ['get', 'post', 'put', 'delete', 'patch'],
+                onComplete: function(swaggerApi, swaggerUi){
+                    if(typeof initOAuth == "function") {
+                        initOAuth({
+                            clientId: "your-client-id",
+                            realm: "your-realms",
+                            appName: "your-app-name"
+                        });
+                    }
+
+                    $('pre code').each(function(i, e) {
+                        hljs.highlightBlock(e)
+                    });
+
+                    addApiKeyAuthorization();
+                },
+                onFailure: function(data) {
+                    log("Unable to Load SwaggerUI");
+                },
+                docExpansion: "none",
+                apisSorter: "alpha",
+                showRequestHeaders: false
+            });
+
+            function addApiKeyAuthorization(){
+                var username = encodeURIComponent($('#input_username')[0].value);
+                var password = encodeURIComponent($('#input_password')[0].value);
+                if (username && username.trim() != "" && password && password != "") {
+                    var apiKeyAuth = new SwaggerClient.PasswordAuthorization("Authorization", username, password);
+                    window.swaggerUi.api.clientAuthorizations.add("key", apiKeyAuth);
+                    log("added authorization header: " + 'Basic ' + btoa(username + ':' + password));
+                }
+            }
+
+            $('#input_username, #input_password').change(addApiKeyAuthorization);
+
+            window.swaggerUi.load();
+
+            function log() {
+                if ('console' in window) {
+                    console.log.apply(console, arguments);
+                }
+            }
+        });
+    </script>
+</head>
+
+<body class="swagger-section">
+<div id='header'>
+    <div class="swagger-ui-wrap">
+        <a id="logo" href="http://swagger.io">swagger</a>
+        <form id='api_selector'>
+            <div class='input'><input placeholder="http://example.com/api" id="input_baseUrl" name="baseUrl" type="text"/></div>
+            <div class="input"><input placeholder="username" id="input_username" name="username" type="text" size="10"></div>
+            <div class="input"><input placeholder="password" id="input_password" name="password" type="password" size="10"></div>
+            <div class='input'><a id="explore" href="#">Explore</a></div>
+        </form>
+    </div>
+</div>
+
+<div id="message-bar" class="swagger-ui-wrap">&nbsp;</div>
+<div id="swagger-ui-container" class="swagger-ui-wrap"></div>
+</body>
+</html>
diff --git a/ambari-infra/ambari-infra-manager/src/main/resources/webapp/index.html b/ambari-infra/ambari-infra-manager/src/main/resources/webapp/index.html
new file mode 100644
index 0000000..3e64867
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/resources/webapp/index.html
@@ -0,0 +1,24 @@
+<!DOCTYPE html>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<html>
+  <head>
+  </head>
+  <body>
+    <h1>Welcome!</h1>
+  </body>
+</html>
\ No newline at end of file
diff --git a/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java b/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java
index d5d971c..9479679 100644
--- a/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java
+++ b/ambari-infra/ambari-infra-solr-client/src/main/java/org/apache/ambari/infra/solr/AmbariSolrCloudClient.java
@@ -110,7 +110,7 @@
     List<String> collections = listCollections();
     if (!collections.contains(getCollection())) {
       String collection = new CreateCollectionCommand(getRetryTimes(), getInterval()).run(this);
-      LOG.info("Collection '{}' created.", collection);
+      LOG.info("Collection '{}' creation request sent.", collection);
     } else {
       LOG.info("Collection '{}' already exits.", getCollection());
       if (this.isSplitting()) {
@@ -234,7 +234,7 @@
       for (String shardName : shardList) {
         if (!existingShards.contains(shardName)) {
           new CreateShardCommand(shardName, getRetryTimes(), getInterval()).run(this);
-          LOG.info("New shard added to collection '{}': {}", getCollection(), shardName);
+          LOG.info("Adding new shard to collection request sent ('{}': {})", getCollection(), shardName);
           existingShards.add(shardName);
         }
       }
diff --git a/ambari-infra/ambari-infra-solr-client/src/main/resources/solrCloudCli.sh b/ambari-infra/ambari-infra-solr-client/src/main/resources/solrCloudCli.sh
index cd47f06..7bfa864 100644
--- a/ambari-infra/ambari-infra-solr-client/src/main/resources/solrCloudCli.sh
+++ b/ambari-infra/ambari-infra-solr-client/src/main/resources/solrCloudCli.sh
@@ -17,4 +17,4 @@
 JVM="java"
 sdir="`dirname \"$0\"`"
 
-PATH=$JAVA_HOME/bin:$PATH $JVM -classpath "$sdir:$sdir/libs/*" org.apache.ambari.logsearch.solr.AmbariSolrCloudCLI ${1+"$@"}
\ No newline at end of file
+PATH=$JAVA_HOME/bin:$PATH $JVM -classpath "$sdir:$sdir/libs/*" org.apache.ambari.infra.solr.AmbariSolrCloudCLI ${1+"$@"}
\ No newline at end of file
diff --git a/ambari-infra/pom.xml b/ambari-infra/pom.xml
index a6a6961..a5a5b38 100644
--- a/ambari-infra/pom.xml
+++ b/ambari-infra/pom.xml
@@ -43,6 +43,7 @@
     <module>ambari-infra-assembly</module>
     <module>ambari-infra-solr-client</module>
     <module>ambari-infra-solr-plugin</module>
+    <module>ambari-infra-manager</module>
   </modules>
 
   <build>
diff --git a/ambari-logsearch/README.md b/ambari-logsearch/README.md
index 5c41fcd..d05f45a 100644
--- a/ambari-logsearch/README.md
+++ b/ambari-logsearch/README.md
@@ -36,10 +36,15 @@
 
 ## Running Integration Tests
 
-By default integration tests are not a part of the build process, you need to set ${it.skip} variable to true (docker needed here too)
+By default integration tests are not a part of the build process, you need to set -Dbackend-tests or -Dselenium-tests (or you can use -Dall-tests to run both). To running the tests you will need docker here as well (right now docker-for-mac and unix are supported by default, for boot2docker you need to pass -Ddocker.host parameter to the build).
 
 ```bash
 # from ambari-logsearch folder
-mvn clean integration-test -Dit.skip=false
+mvn clean integration-test -Dbackend-tests failsafe:verify
+# or run selenium tests with docker for mac, but before that you nedd to start xquartz
+xquartz
+# then in an another window you can start ui tests
+mvn clean integration-test -Dselenium-tests failsafe:verify
+# you can specify story file folde location with -Dbackend.stories.location and -Dui.stories.location (absolute file path) in the commands
 ```
 Also you can run from the IDE, but make sure all of the ambari logsearch modules are built.
diff --git a/ambari-logsearch/ambari-logsearch-config-api/.gitignore b/ambari-logsearch/ambari-logsearch-config-api/.gitignore
new file mode 100644
index 0000000..ae3c172
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/.gitignore
@@ -0,0 +1 @@
+/bin/
diff --git a/ambari-logsearch/ambari-logsearch-config-api/pom.xml b/ambari-logsearch/ambari-logsearch-config-api/pom.xml
new file mode 100644
index 0000000..5355906
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/pom.xml
@@ -0,0 +1,49 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+  <parent>
+    <artifactId>ambari-logsearch</artifactId>
+    <groupId>org.apache.ambari</groupId>
+    <version>2.0.0.0-SNAPSHOT</version>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+
+  <artifactId>ambari-logsearch-config-api</artifactId>
+  <packaging>jar</packaging>
+  <name>Ambari Logsearch Config Api</name>
+  <url>http://maven.apache.org</url>
+
+  <properties>
+    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+      <version>1.7.7</version>
+    </dependency>
+  </dependencies>
+</project>
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/InputConfigMonitor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/InputConfigMonitor.java
new file mode 100644
index 0000000..746c14c
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/InputConfigMonitor.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api;
+
+import java.util.List;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputConfig;
+
+/**
+ * Monitors input configuration changes.
+ */
+public interface InputConfigMonitor {
+  /**
+   * @return A list of json strings for all the global config jsons.
+   */
+  List<String> getGlobalConfigJsons();
+  
+  /**
+   * Notification of a new input configuration.
+   * 
+   * @param serviceName The name of the service for which the input configuration was created.
+   * @param inputConfig The input configuration.
+   * @throws Exception
+   */
+  void loadInputConfigs(String serviceName, InputConfig inputConfig) throws Exception;
+  
+  /**
+   * Notification of the removal of an input configuration.
+   * 
+   * @param serviceName The name of the service of which's input configuration was removed.
+   */
+  void removeInputs(String serviceName);
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogLevelFilterMonitor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogLevelFilterMonitor.java
new file mode 100644
index 0000000..766f751
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogLevelFilterMonitor.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Monitors log level filter changes.
+ */
+package org.apache.ambari.logsearch.config.api;
+
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilter;
+
+public interface LogLevelFilterMonitor {
+
+  /**
+   * Notification of a new or updated log level filter.
+   * 
+   * @param logId The log for which the log level filter was created/updated.
+   * @param logLevelFilter The log level filter to apply from now on to the log.
+   */
+  void setLogLevelFilter(String logId, LogLevelFilter logLevelFilter);
+
+  /**
+   * Notification of the removal of a log level filter.
+   * 
+   * @param logId The log of which's log level filter was removed.
+   */
+  void removeLogLevelFilter(String logId);
+
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogSearchConfig.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogSearchConfig.java
new file mode 100644
index 0000000..4cbf21f
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogSearchConfig.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api;
+
+import java.io.Closeable;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilter;
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilterMap;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputConfig;
+
+/**
+ * Log Search Configuration, which uploads, retrieves configurations, and monitors it's changes.
+ */
+public interface LogSearchConfig extends Closeable {
+  /**
+   * Enumeration of the components of the Log Search service.
+   */
+  public enum Component {
+    SERVER, LOGFEEDER;
+  }
+
+  /**
+   * Initialization of the configuration.
+   * 
+   * @param component The component which will use the configuration.
+   * @param properties The properties of that component.
+   * @throws Exception
+   */
+  void init(Component component, Map<String, String> properties) throws Exception;
+
+  /**
+   * Returns all the service names with input configurations of a cluster. Will be used only in SERVER mode.
+   * 
+   * @param clusterName The name of the cluster which's services are required.
+   * @return List of the service names.
+   */
+  List<String> getServices(String clusterName);
+
+  /**
+   * Checks if input configuration exists.
+   * 
+   * @param clusterName The name of the cluster where the service is looked for.
+   * @param serviceName The name of the service looked for.
+   * @return If input configuration exists for the service.
+   * @throws Exception
+   */
+  boolean inputConfigExists(String clusterName, String serviceName) throws Exception;
+
+  /**
+   * Returns the input configuration of a service in a cluster. Will be used only in SERVER mode.
+   * 
+   * @param clusterName The name of the cluster where the service is looked for.
+   * @param serviceName The name of the service looked for.
+   * @return The input configuration for the service if it exists, null otherwise.
+   */
+  InputConfig getInputConfig(String clusterName, String serviceName);
+
+  /**
+   * Uploads the input configuration for a service in a cluster.
+   * 
+   * @param clusterName The name of the cluster where the service is.
+   * @param serviceName The name of the service of which's input configuration is uploaded.
+   * @param inputConfig The input configuration of the service.
+   * @throws Exception
+   */
+  void createInputConfig(String clusterName, String serviceName, String inputConfig) throws Exception;
+
+  /**
+   * Modifies the input configuration for a service in a cluster.
+   * 
+   * @param clusterName The name of the cluster where the service is.
+   * @param serviceName The name of the service of which's input configuration is uploaded.
+   * @param inputConfig The input configuration of the service.
+   * @throws Exception
+   */
+  void setInputConfig(String clusterName, String serviceName, String inputConfig) throws Exception;
+
+  /**
+   * Uploads the log level filter of a log.
+   * 
+   * @param clusterName The name of the cluster where the log is.
+   * @param logId The id of the log.
+   * @param filter The log level filter for the log.
+   * @throws Exception 
+   */
+  void createLogLevelFilter(String clusterName, String logId, LogLevelFilter filter) throws Exception;
+
+  /**
+   * Modifies the log level filters for all the logs.
+   * 
+   * @param clusterName The name of the cluster where the logs are.
+   * @param filters The log level filters to set.
+   * @throws Exception
+   */
+  void setLogLevelFilters(String clusterName, LogLevelFilterMap filters) throws Exception;
+
+  /**
+   * Returns the Log Level Filters of a cluster.
+   * 
+   * @param clusterName The name of the cluster which's log level filters are required.
+   * @return All the log level filters of the cluster.
+   */
+  LogLevelFilterMap getLogLevelFilters(String clusterName);
+
+  /**
+   * Starts the monitoring of the input configurations, asynchronously. Will be used only in LOGFEEDER mode.
+   * 
+   * @param inputConfigMonitor The input config monitor to call in case of an input config change.
+   * @param logLevelFilterMonitor The log level filter monitor to call in case of a log level filter change.
+   * @throws Exception
+   */
+  void monitorInputConfigChanges(InputConfigMonitor inputConfigMonitor, LogLevelFilterMonitor logLevelFilterMonitor) throws Exception;
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogSearchConfigFactory.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogSearchConfigFactory.java
new file mode 100644
index 0000000..947e7e7
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogSearchConfigFactory.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api;
+
+import java.util.Map;
+
+import org.apache.ambari.logsearch.config.api.LogSearchConfig.Component;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Factory class for LogSearchConfig.
+ */
+public class LogSearchConfigFactory {
+  private static final Logger LOG = LoggerFactory.getLogger(LogSearchConfigFactory.class);
+
+  /**
+   * Creates a Log Search Configuration instance that implements {@link org.apache.ambari.logsearch.config.api.LogSearchConfig}.
+   * 
+   * @param component The component of the Log Search Service to create the configuration for (SERVER/LOGFEEDER).
+   * @param properties The properties of the component for which the configuration is created. If the properties contain the
+   *                  "logsearch.config.class" entry than the class defined there would be used instead of the default class.
+   * @param defaultClass The default configuration class to use if not specified otherwise.
+   * @return The Log Search Configuration instance.
+   * @throws Exception Throws exception if the defined class does not implement LogSearchConfig, or doesn't have an empty
+   *                   constructor, or throws an exception in it's init method.
+   */
+  public static LogSearchConfig createLogSearchConfig(Component component, Map<String, String> properties,
+      Class<? extends LogSearchConfig> defaultClass) throws Exception {
+    try {
+      LogSearchConfig logSearchConfig = null;
+      String configClassName = properties.get("logsearch.config.class");
+      if (configClassName != null && !"".equals(configClassName.trim())) {
+        Class<?> clazz = Class.forName(configClassName);
+        if (LogSearchConfig.class.isAssignableFrom(clazz)) {
+          logSearchConfig = (LogSearchConfig) clazz.newInstance();
+        } else {
+          throw new IllegalArgumentException("Class " + configClassName + " does not implement the interface " + LogSearchConfig.class.getName());
+        }
+      } else {
+        logSearchConfig = defaultClass.newInstance();
+      }
+      
+      logSearchConfig.init(component, properties);
+      return logSearchConfig;
+    } catch (Exception e) {
+      LOG.error("Could not initialize logsearch config.", e);
+      throw e;
+    }
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/Conditions.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/Conditions.java
new file mode 100644
index 0000000..4da400a
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/Conditions.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+public interface Conditions {
+  Fields getFields();
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/Fields.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/Fields.java
new file mode 100644
index 0000000..5d34b1e
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/Fields.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+import java.util.Set;
+
+public interface Fields {
+  Set<String> getType();
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterDescriptor.java
new file mode 100644
index 0000000..632c6cb
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterDescriptor.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+import java.util.List;
+import java.util.Map;
+
+public interface FilterDescriptor {
+  String getFilter();
+
+  Conditions getConditions();
+
+  Integer getSortOrder();
+
+  String getSourceField();
+
+  Boolean isRemoveSourceField();
+
+  Map<String, ? extends List<? extends PostMapValues>> getPostMapValues();
+
+  Boolean isEnabled();
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterGrokDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterGrokDescriptor.java
new file mode 100644
index 0000000..e85ce97
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterGrokDescriptor.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+public interface FilterGrokDescriptor extends FilterDescriptor {
+  String getLog4jFormat();
+
+  String getMultilinePattern();
+
+  String getMessagePattern();
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterJsonDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterJsonDescriptor.java
new file mode 100644
index 0000000..08f1893
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterJsonDescriptor.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+public interface FilterJsonDescriptor extends FilterDescriptor {
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterKeyValueDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterKeyValueDescriptor.java
new file mode 100644
index 0000000..6edd140
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/FilterKeyValueDescriptor.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+public interface FilterKeyValueDescriptor extends FilterDescriptor {
+  String getFieldSplit();
+
+  String getValueSplit();
+
+  String getValueBorders();
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputConfig.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputConfig.java
new file mode 100644
index 0000000..8126ac9
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputConfig.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+import java.util.List;
+
+public interface InputConfig {
+  List<? extends InputDescriptor> getInput();
+
+  List<? extends FilterDescriptor> getFilter();
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputDescriptor.java
new file mode 100644
index 0000000..c41da93
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputDescriptor.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+import java.util.Map;
+
+public interface InputDescriptor {
+  String getType();
+
+  String getRowtype();
+
+  String getPath();
+
+  Map<String, String> getAddFields();
+
+  String getSource();
+
+  Boolean isTail();
+
+  Boolean isGenEventMd5();
+
+  Boolean isUseEventMd5AsId();
+
+  String getStartPosition();
+
+  Boolean isCacheEnabled();
+
+  String getCacheKeyField();
+
+  Boolean getCacheLastDedupEnabled();
+
+  Integer getCacheSize();
+
+  Long getCacheDedupInterval();
+
+  Boolean isEnabled();
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputFileBaseDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputFileBaseDescriptor.java
new file mode 100644
index 0000000..a393dc7
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputFileBaseDescriptor.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+public interface InputFileBaseDescriptor extends InputDescriptor {
+  Boolean getProcessFile();
+
+  Boolean getCopyFile();
+
+  Integer getCheckpointIntervalMs();
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputFileDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputFileDescriptor.java
new file mode 100644
index 0000000..0070ad9
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputFileDescriptor.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+public interface InputFileDescriptor extends InputFileBaseDescriptor {
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputS3FileDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputS3FileDescriptor.java
new file mode 100644
index 0000000..b075629
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/InputS3FileDescriptor.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+public interface InputS3FileDescriptor extends InputFileBaseDescriptor {
+  String getS3AccessKey();
+
+  String getS3SecretKey();
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapDateDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapDateDescriptor.java
new file mode 100644
index 0000000..f88435f
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapDateDescriptor.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+public interface MapDateDescriptor extends MapFieldDescriptor {
+  String getSourceDatePattern();
+
+  public String getTargetDatePattern();
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldCopyDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldCopyDescriptor.java
new file mode 100644
index 0000000..596c173
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldCopyDescriptor.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+public interface MapFieldCopyDescriptor extends MapFieldDescriptor {
+  String getCopyName();
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldDescriptor.java
new file mode 100644
index 0000000..db086c5
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldDescriptor.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+public interface MapFieldDescriptor {
+  public String getJsonName();
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldNameDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldNameDescriptor.java
new file mode 100644
index 0000000..da8cd0d
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldNameDescriptor.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+public interface MapFieldNameDescriptor extends MapFieldDescriptor {
+  String getNewFieldName();
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldValueDescriptor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldValueDescriptor.java
new file mode 100644
index 0000000..cf37e62
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/MapFieldValueDescriptor.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+public interface MapFieldValueDescriptor extends MapFieldDescriptor {
+  String getPreValue();
+
+  public String getPostValue();
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/PostMapValues.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/PostMapValues.java
new file mode 100644
index 0000000..5be7287
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/inputconfig/PostMapValues.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api.model.inputconfig;
+
+import java.util.List;
+
+public interface PostMapValues {
+  List<MapFieldDescriptor> getMappers();
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/loglevelfilter/LogLevelFilter.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/loglevelfilter/LogLevelFilter.java
new file mode 100644
index 0000000..06cf589
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/loglevelfilter/LogLevelFilter.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.config.api.model.loglevelfilter;
+
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+
+public class LogLevelFilter {
+
+  private String label;
+  private List<String> hosts;
+  private List<String> defaultLevels;
+  private List<String> overrideLevels;
+  private Date expiryTime;
+
+  public LogLevelFilter() {
+    hosts = new ArrayList<String>();
+    defaultLevels = new ArrayList<String>();
+    overrideLevels = new ArrayList<String>();
+  }
+
+  public String getLabel() {
+    return label;
+  }
+
+  public void setLabel(String label) {
+    this.label = label;
+  }
+
+  public List<String> getHosts() {
+    return hosts;
+  }
+
+  public void setHosts(List<String> hosts) {
+    this.hosts = hosts;
+  }
+
+  public List<String> getDefaultLevels() {
+    return defaultLevels;
+  }
+
+  public void setDefaultLevels(List<String> defaultLevels) {
+    this.defaultLevels = defaultLevels;
+  }
+
+  public List<String> getOverrideLevels() {
+    return overrideLevels;
+  }
+
+  public void setOverrideLevels(List<String> overrideLevels) {
+    this.overrideLevels = overrideLevels;
+  }
+
+  public Date getExpiryTime() {
+    return expiryTime;
+  }
+
+  public void setExpiryTime(Date expiryTime) {
+    this.expiryTime = expiryTime;
+  }
+
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/loglevelfilter/LogLevelFilterMap.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/loglevelfilter/LogLevelFilterMap.java
new file mode 100644
index 0000000..37fdb9f
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/loglevelfilter/LogLevelFilterMap.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.config.api.model.loglevelfilter;
+
+import java.util.TreeMap;
+
+public class LogLevelFilterMap {
+  private TreeMap<String, LogLevelFilter> filter;
+
+  public TreeMap<String, LogLevelFilter> getFilter() {
+    return filter;
+  }
+
+  public void setFilter(TreeMap<String, LogLevelFilter> filter) {
+    this.filter = filter;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass1.java b/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass1.java
new file mode 100644
index 0000000..d7e3c0a
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass1.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.ambari.logsearch.config.api.InputConfigMonitor;
+import org.apache.ambari.logsearch.config.api.LogSearchConfig;
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilter;
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilterMap;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputConfig;
+
+public class LogSearchConfigClass1 implements LogSearchConfig {
+  @Override
+  public void init(Component component, Map<String, String> properties) {}
+
+  @Override
+  public boolean inputConfigExists(String clusterName, String serviceName) throws Exception {
+    return false;
+  }
+
+  @Override
+  public void createInputConfig(String clusterName, String serviceName, String inputConfig) throws Exception {}
+
+  @Override
+  public void setInputConfig(String clusterName, String serviceName, String inputConfig) throws Exception {}
+
+  @Override
+  public void monitorInputConfigChanges(InputConfigMonitor inputConfigMonitor, LogLevelFilterMonitor logLevelFilterMonitor)
+      throws Exception {}
+
+  @Override
+  public List<String> getServices(String clusterName) {
+    return null;
+  }
+
+  @Override
+  public InputConfig getInputConfig(String clusterName, String serviceName) {
+    return null;
+  }
+
+  @Override
+  public void createLogLevelFilter(String clusterName, String logId, LogLevelFilter filter) {}
+
+  @Override
+  public void setLogLevelFilters(String clusterName, LogLevelFilterMap filters) throws Exception {}
+
+  @Override
+  public LogLevelFilterMap getLogLevelFilters(String clusterName) {
+    return null;
+  }
+
+  @Override
+  public void close() {}
+}
\ No newline at end of file
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass2.java b/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass2.java
new file mode 100644
index 0000000..198c133
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass2.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.ambari.logsearch.config.api.InputConfigMonitor;
+import org.apache.ambari.logsearch.config.api.LogSearchConfig;
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilter;
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilterMap;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputConfig;
+
+public class LogSearchConfigClass2 implements LogSearchConfig {
+  @Override
+  public void init(Component component, Map<String, String> properties) {}
+
+  @Override
+  public boolean inputConfigExists(String clusterName, String serviceName) throws Exception {
+    return false;
+  }
+
+  @Override
+  public void createInputConfig(String clusterName, String serviceName, String inputConfig) throws Exception {}
+
+  @Override
+  public void setInputConfig(String clusterName, String serviceName, String inputConfig) throws Exception {}
+
+  @Override
+  public void monitorInputConfigChanges(InputConfigMonitor inputConfigMonitor, LogLevelFilterMonitor logLevelFilterMonitor)
+      throws Exception {}
+
+  @Override
+  public List<String> getServices(String clusterName) {
+    return null;
+  }
+
+  @Override
+  public InputConfig getInputConfig(String clusterName, String serviceName) {
+    return null;
+  }
+
+  @Override
+  public void createLogLevelFilter(String clusterName, String logId, LogLevelFilter filter) {}
+
+  @Override
+  public void setLogLevelFilters(String clusterName, LogLevelFilterMap filters) throws Exception {}
+
+  @Override
+  public LogLevelFilterMap getLogLevelFilters(String clusterName) {
+    return null;
+  }
+
+  @Override
+  public void close() {}
+}
\ No newline at end of file
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigFactoryTest.java b/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigFactoryTest.java
new file mode 100644
index 0000000..425694f
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigFactoryTest.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.ambari.logsearch.config.api.LogSearchConfig.Component;
+import org.junit.Test;
+
+import junit.framework.Assert;
+
+public class LogSearchConfigFactoryTest {
+
+  @Test
+  public void testDefaultConfig() throws Exception {
+    LogSearchConfig config = LogSearchConfigFactory.createLogSearchConfig(Component.SERVER,
+        Collections.<String, String> emptyMap(), LogSearchConfigClass1.class);
+    
+    Assert.assertSame(config.getClass(), LogSearchConfigClass1.class);
+  }
+
+  @Test
+  public void testCustomConfig() throws Exception {
+    Map<String, String> logsearchConfClassMap = new HashMap<>();
+    logsearchConfClassMap.put("logsearch.config.class", "org.apache.ambari.logsearch.config.api.LogSearchConfigClass2");
+    LogSearchConfig config = LogSearchConfigFactory.createLogSearchConfig(Component.SERVER,
+      logsearchConfClassMap, LogSearchConfigClass1.class);
+    
+    Assert.assertSame(config.getClass(), LogSearchConfigClass2.class);
+  }
+  
+  @Test(expected = IllegalArgumentException.class)
+  public void testNonConfigClass() throws Exception {
+    Map<String, String> logsearchConfClassMap = new HashMap<>();
+    logsearchConfClassMap.put("logsearch.config.class", "org.apache.ambari.logsearch.config.api.NonLogSearchConfigClass");
+    LogSearchConfigFactory.createLogSearchConfig(Component.SERVER,
+      logsearchConfClassMap, LogSearchConfigClass1.class);
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/NonLogSearchConfigClass.java b/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/NonLogSearchConfigClass.java
new file mode 100644
index 0000000..9564f33
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/NonLogSearchConfigClass.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.api;
+
+public class NonLogSearchConfigClass {
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/test/resources/log4j.xml b/ambari-logsearch/ambari-logsearch-config-api/src/test/resources/log4j.xml
new file mode 100644
index 0000000..6d968f9
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/test/resources/log4j.xml
@@ -0,0 +1,34 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
+
+<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/">
+  <appender name="console" class="org.apache.log4j.ConsoleAppender">
+    <param name="Target" value="System.out" />
+    <layout class="org.apache.log4j.PatternLayout">
+      <param name="ConversionPattern" value="%d [%t] %-5p %C{6} (%F:%L) - %m%n" />
+      <!-- <param name="ConversionPattern" value="%d [%t] %-5p %c %x - %m%n"/> -->
+    </layout>
+  </appender>
+
+  <root>
+    <priority value="warn" />
+    <appender-ref ref="console" />
+  </root>
+
+</log4j:configuration>
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/.gitignore b/ambari-logsearch/ambari-logsearch-config-zookeeper/.gitignore
new file mode 100644
index 0000000..ae3c172
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/.gitignore
@@ -0,0 +1 @@
+/bin/
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/pom.xml b/ambari-logsearch/ambari-logsearch-config-zookeeper/pom.xml
new file mode 100644
index 0000000..7ecda60
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/pom.xml
@@ -0,0 +1,84 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+  <parent>
+    <artifactId>ambari-logsearch</artifactId>
+    <groupId>org.apache.ambari</groupId>
+    <version>2.0.0.0-SNAPSHOT</version>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+
+  <artifactId>ambari-logsearch-config-zookeeper</artifactId>
+  <packaging>jar</packaging>
+  <name>Ambari Logsearch Config Zookeeper</name>
+  <url>http://maven.apache.org</url>
+
+  <properties>
+    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.ambari</groupId>
+      <artifactId>ambari-logsearch-config-api</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-lang3</artifactId>
+      <version>3.4</version>
+    </dependency>
+    <dependency>
+      <groupId>commons-collections</groupId>
+      <artifactId>commons-collections</artifactId>
+      <version>3.2.1</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.zookeeper</groupId>
+      <artifactId>zookeeper</artifactId>
+      <version>3.4.9</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.curator</groupId>
+      <artifactId>curator-framework</artifactId>
+      <version>2.12.0</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.curator</groupId>
+      <artifactId>curator-client</artifactId>
+      <version>2.12.0</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.curator</groupId>
+      <artifactId>curator-recipes</artifactId>
+      <version>2.12.0</version>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+      <version>1.7.7</version>
+    </dependency>
+    <dependency>
+      <groupId>com.google.code.gson</groupId>
+      <artifactId>gson</artifactId>
+      <version>2.6.2</version>
+    </dependency>
+  </dependencies>
+</project>
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/LogSearchConfigZK.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/LogSearchConfigZK.java
new file mode 100644
index 0000000..4d10a5b
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/LogSearchConfigZK.java
@@ -0,0 +1,362 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+import org.apache.ambari.logsearch.config.api.LogSearchConfig;
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilter;
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilterMap;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputConfig;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputAdapter;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputConfigGson;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputConfigImpl;
+import org.apache.ambari.logsearch.config.api.InputConfigMonitor;
+import org.apache.ambari.logsearch.config.api.LogLevelFilterMonitor;
+import org.apache.commons.collections.MapUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.framework.recipes.cache.ChildData;
+import org.apache.curator.framework.recipes.cache.TreeCache;
+import org.apache.curator.framework.recipes.cache.TreeCacheEvent;
+import org.apache.curator.framework.recipes.cache.TreeCacheEvent.Type;
+import org.apache.curator.framework.recipes.cache.TreeCacheListener;
+import org.apache.curator.retry.ExponentialBackoffRetry;
+import org.apache.curator.utils.ZKPaths;
+import org.apache.zookeeper.KeeperException.NodeExistsException;
+import org.apache.zookeeper.ZooDefs;
+import org.apache.zookeeper.data.ACL;
+import org.apache.zookeeper.data.Id;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Splitter;
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+import com.google.gson.JsonArray;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParser;
+
+public class LogSearchConfigZK implements LogSearchConfig {
+  private static final Logger LOG = LoggerFactory.getLogger(LogSearchConfigZK.class);
+
+  private static final int SESSION_TIMEOUT = 15000;
+  private static final int CONNECTION_TIMEOUT = 30000;
+  private static final String DEFAULT_ZK_ROOT = "/logsearch";
+  private static final long WAIT_FOR_ROOT_SLEEP_SECONDS = 10;
+  private static final String DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSS";
+
+  private static final String CLUSTER_NAME_PROPERTY = "cluster.name";
+  private static final String ZK_CONNECT_STRING_PROPERTY = "logsearch.config.zk_connect_string";
+  private static final String ZK_ACLS_PROPERTY = "logsearch.config.zk_acls";
+  private static final String ZK_ROOT_NODE_PROPERTY = "logsearch.config.zk_root";
+
+  private Map<String, String> properties;
+  private String root;
+  private CuratorFramework client;
+  private TreeCache cache;
+  private Gson gson;
+
+  @Override
+  public void init(Component component, Map<String, String> properties) throws Exception {
+    this.properties = properties;
+    
+    LOG.info("Connecting to ZooKeeper at " + properties.get(ZK_CONNECT_STRING_PROPERTY));
+    client = CuratorFrameworkFactory.builder()
+        .connectString(properties.get(ZK_CONNECT_STRING_PROPERTY))
+        .retryPolicy(new ExponentialBackoffRetry(1000, 3))
+        .connectionTimeoutMs(CONNECTION_TIMEOUT)
+        .sessionTimeoutMs(SESSION_TIMEOUT)
+        .build();
+    client.start();
+
+    root = MapUtils.getString(properties, ZK_ROOT_NODE_PROPERTY, DEFAULT_ZK_ROOT);
+
+    if (component == Component.SERVER) {
+      if (client.checkExists().forPath(root) == null) {
+        client.create().creatingParentContainersIfNeeded().forPath(root);
+      }
+      cache = new TreeCache(client, root);
+      cache.start();
+    } else {
+      while (client.checkExists().forPath(root) == null) {
+        LOG.info("Root node is not present yet, going to sleep for " + WAIT_FOR_ROOT_SLEEP_SECONDS + " seconds");
+        Thread.sleep(WAIT_FOR_ROOT_SLEEP_SECONDS * 1000);
+      }
+
+      cache = new TreeCache(client, String.format("%s/%s", root, properties.get(CLUSTER_NAME_PROPERTY)));
+    }
+    
+    gson = new GsonBuilder().setDateFormat(DATE_FORMAT).create();
+  }
+
+  @Override
+  public boolean inputConfigExists(String clusterName, String serviceName) throws Exception {
+    String nodePath = root + "/" + clusterName + "/input/" + serviceName;
+    return cache.getCurrentData(nodePath) != null;
+  }
+
+  @Override
+  public void createInputConfig(String clusterName, String serviceName, String inputConfig) throws Exception {
+    String nodePath = String.format("%s/%s/input/%s", root, clusterName, serviceName);
+    try {
+      client.create().creatingParentContainersIfNeeded().withACL(getAcls()).forPath(nodePath, inputConfig.getBytes());
+      LOG.info("Uploaded input config for the service " + serviceName + " for cluster " + clusterName);
+    } catch (NodeExistsException e) {
+      LOG.debug("Did not upload input config for service " + serviceName + " as it was already uploaded by another Log Feeder");
+    }
+  }
+
+  @Override
+  public void setInputConfig(String clusterName, String serviceName, String inputConfig) throws Exception {
+    String nodePath = String.format("%s/%s/input/%s", root, clusterName, serviceName);
+    client.setData().forPath(nodePath, inputConfig.getBytes());
+    LOG.info("Set input config for the service " + serviceName + " for cluster " + clusterName);
+  }
+
+  @Override
+  public void monitorInputConfigChanges(final InputConfigMonitor inputConfigMonitor,
+      final LogLevelFilterMonitor logLevelFilterMonitor) throws Exception {
+    final JsonParser parser = new JsonParser();
+    final JsonArray globalConfigNode = new JsonArray();
+    for (String globalConfigJsonString : inputConfigMonitor.getGlobalConfigJsons()) {
+      JsonElement globalConfigJson = parser.parse(globalConfigJsonString);
+      globalConfigNode.add(globalConfigJson.getAsJsonObject().get("global"));
+    }
+    
+    createGlobalConfigNode(globalConfigNode);
+    
+    TreeCacheListener listener = new TreeCacheListener() {
+      public void childEvent(CuratorFramework client, TreeCacheEvent event) throws Exception {
+        String nodeName = ZKPaths.getNodeFromPath(event.getData().getPath());
+        String nodeData = new String(event.getData().getData());
+        Type eventType = event.getType();
+        
+        String configPathStab = String.format("%s/%s/", root, properties.get(CLUSTER_NAME_PROPERTY));
+        
+        if (event.getData().getPath().startsWith(configPathStab + "input/")) {
+          handleInputConfigChange(eventType, nodeName, nodeData);
+        } else if (event.getData().getPath().startsWith(configPathStab + "loglevelfilter/")) {
+          handleLogLevelFilterChange(eventType, nodeName, nodeData);
+        }
+      }
+
+      private void handleInputConfigChange(Type eventType, String nodeName, String nodeData) {
+        switch (eventType) {
+          case NODE_ADDED:
+            LOG.info("Node added under input ZK node: " + nodeName);
+            addInputs(nodeName, nodeData);
+            break;
+          case NODE_UPDATED:
+            LOG.info("Node updated under input ZK node: " + nodeName);
+            removeInputs(nodeName);
+            addInputs(nodeName, nodeData);
+            break;
+          case NODE_REMOVED:
+            LOG.info("Node removed from input ZK node: " + nodeName);
+            removeInputs(nodeName);
+            break;
+          default:
+            break;
+        }
+      }
+
+      private void removeInputs(String serviceName) {
+        inputConfigMonitor.removeInputs(serviceName);
+      }
+
+      private void addInputs(String serviceName, String inputConfig) {
+        try {
+          JsonElement inputConfigJson = parser.parse(inputConfig);
+          for (Map.Entry<String, JsonElement> typeEntry : inputConfigJson.getAsJsonObject().entrySet()) {
+            for (JsonElement e : typeEntry.getValue().getAsJsonArray()) {
+              for (JsonElement globalConfig : globalConfigNode) {
+                merge(globalConfig.getAsJsonObject(), e.getAsJsonObject());
+              }
+            }
+          }
+          
+          inputConfigMonitor.loadInputConfigs(serviceName, InputConfigGson.gson.fromJson(inputConfigJson, InputConfigImpl.class));
+        } catch (Exception e) {
+          LOG.error("Could not load input configuration for service " + serviceName + ":\n" + inputConfig, e);
+        }
+      }
+
+      private void handleLogLevelFilterChange(Type eventType, String nodeName, String nodeData) {
+        switch (eventType) {
+          case NODE_ADDED:
+          case NODE_UPDATED:
+            LOG.info("Node added/updated under loglevelfilter ZK node: " + nodeName);
+            LogLevelFilter logLevelFilter = gson.fromJson(nodeData, LogLevelFilter.class);
+            logLevelFilterMonitor.setLogLevelFilter(nodeName, logLevelFilter);
+            break;
+          case NODE_REMOVED:
+            LOG.info("Node removed loglevelfilter input ZK node: " + nodeName);
+            logLevelFilterMonitor.removeLogLevelFilter(nodeName);
+            break;
+          default:
+            break;
+        }
+      }
+
+      private void merge(JsonObject source, JsonObject target) {
+        for (Map.Entry<String, JsonElement> e : source.entrySet()) {
+          if (!target.has(e.getKey())) {
+            target.add(e.getKey(), e.getValue());
+          } else {
+            if (e.getValue().isJsonObject()) {
+              JsonObject valueJson = (JsonObject)e.getValue();
+              merge(valueJson, target.get(e.getKey()).getAsJsonObject());
+            }
+          }
+        }
+      }
+    };
+    cache.getListenable().addListener(listener);
+    cache.start();
+  }
+
+  private void createGlobalConfigNode(JsonArray globalConfigNode) {
+    String globalConfigNodePath = String.format("%s/%s/global", root, properties.get(CLUSTER_NAME_PROPERTY));
+    String data = InputConfigGson.gson.toJson(globalConfigNode);
+    
+    try {
+      if (cache.getCurrentData(globalConfigNodePath) != null) {
+        client.setData().forPath(globalConfigNodePath, data.getBytes());
+      } else {
+        client.create().creatingParentContainersIfNeeded().withACL(getAcls()).forPath(globalConfigNodePath, data.getBytes());
+      }
+    } catch (Exception e) {
+      LOG.warn("Exception during global config node creation/update", e);
+    }
+  }
+
+  @Override
+  public List<String> getServices(String clusterName) {
+    String parentPath = String.format("%s/%s/input", root, clusterName);
+    Map<String, ChildData> serviceNodes = cache.getCurrentChildren(parentPath);
+    return new ArrayList<String>(serviceNodes.keySet());
+  }
+
+  @Override
+  public InputConfig getInputConfig(String clusterName, String serviceName) {
+    String globalConfigNodePath = String.format("%s/%s/global", root, clusterName);
+    String globalConfigData = new String(cache.getCurrentData(globalConfigNodePath).getData());
+    JsonArray globalConfigs = (JsonArray) new JsonParser().parse(globalConfigData);
+    InputAdapter.setGlobalConfigs(globalConfigs);
+    
+    ChildData childData = cache.getCurrentData(String.format("%s/%s/input/%s", root, clusterName, serviceName));
+    return childData == null ? null : InputConfigGson.gson.fromJson(new String(childData.getData()), InputConfigImpl.class);
+  }
+
+  @Override
+  public void createLogLevelFilter(String clusterName, String logId, LogLevelFilter filter) throws Exception {
+    String nodePath = String.format("%s/%s/loglevelfilter/%s", root, clusterName, logId);
+    String logLevelFilterJson = gson.toJson(filter);
+    try {
+      client.create().creatingParentContainersIfNeeded().withACL(getAcls()).forPath(nodePath, logLevelFilterJson.getBytes());
+      LOG.info("Uploaded log level filter for the log " + logId + " for cluster " + clusterName);
+    } catch (NodeExistsException e) {
+      LOG.debug("Did not upload log level filters for log " + logId + " as it was already uploaded by another Log Feeder");
+    }
+  }
+
+  @Override
+  public void setLogLevelFilters(String clusterName, LogLevelFilterMap filters) throws Exception {
+    for (Map.Entry<String, LogLevelFilter> e : filters.getFilter().entrySet()) {
+      String nodePath = String.format("%s/%s/loglevelfilter/%s", root, clusterName, e.getKey());
+      String logLevelFilterJson = gson.toJson(e.getValue());
+      String currentLogLevelFilterJson = new String(cache.getCurrentData(nodePath).getData());
+      if (!logLevelFilterJson.equals(currentLogLevelFilterJson)) {
+        client.setData().forPath(nodePath, logLevelFilterJson.getBytes());
+        LOG.info("Set log level filter for the log " + e.getKey() + " for cluster " + clusterName);
+      }
+    }
+  }
+
+  @Override
+  public LogLevelFilterMap getLogLevelFilters(String clusterName) {
+    String parentPath = String.format("%s/%s/loglevelfilter", root, clusterName);
+    Map<String, ChildData> logLevelFilterNodes = cache.getCurrentChildren(parentPath);
+    TreeMap<String, LogLevelFilter> filters = new TreeMap<>();
+    for (Map.Entry<String, ChildData> e : logLevelFilterNodes.entrySet()) {
+      LogLevelFilter logLevelFilter = gson.fromJson(new String(e.getValue().getData()), LogLevelFilter.class);
+      filters.put(e.getKey(), logLevelFilter);
+    }
+    
+    LogLevelFilterMap logLevelFilters = new LogLevelFilterMap();
+    logLevelFilters.setFilter(filters);
+    return logLevelFilters;
+  }
+
+  private List<ACL> getAcls() {
+    String aclStr = properties.get(ZK_ACLS_PROPERTY);
+    if (StringUtils.isBlank(aclStr)) {
+      return ZooDefs.Ids.OPEN_ACL_UNSAFE;
+    }
+
+    List<ACL> acls = new ArrayList<>();
+    List<String> aclStrList = Splitter.on(",").omitEmptyStrings().trimResults().splitToList(aclStr);
+    for (String unparcedAcl : aclStrList) {
+      String[] parts = unparcedAcl.split(":");
+      if (parts.length == 3) {
+        acls.add(new ACL(parsePermission(parts[2]), new Id(parts[0], parts[1])));
+      }
+    }
+    return acls;
+  }
+
+  private Integer parsePermission(String permission) {
+    int permissionCode = 0;
+    for (char each : permission.toLowerCase().toCharArray()) {
+      switch (each) {
+        case 'r':
+          permissionCode |= ZooDefs.Perms.READ;
+          break;
+        case 'w':
+          permissionCode |= ZooDefs.Perms.WRITE;
+          break;
+        case 'c':
+          permissionCode |= ZooDefs.Perms.CREATE;
+          break;
+        case 'd':
+          permissionCode |= ZooDefs.Perms.DELETE;
+          break;
+        case 'a':
+          permissionCode |= ZooDefs.Perms.ADMIN;
+          break;
+        default:
+          throw new IllegalArgumentException("Unsupported permission: " + permission);
+      }
+    }
+    return permissionCode;
+  }
+
+  @Override
+  public void close() {
+    LOG.info("Closing ZooKeeper Connection");
+    client.close();
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/ConditionsImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/ConditionsImpl.java
new file mode 100644
index 0000000..8bbff8f
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/ConditionsImpl.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.Conditions;
+
+import com.google.gson.annotations.Expose;
+
+public class ConditionsImpl implements Conditions {
+  @Expose
+  private FieldsImpl fields;
+
+  public FieldsImpl getFields() {
+    return fields;
+  }
+
+  public void setFields(FieldsImpl fields) {
+    this.fields = fields;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FieldsImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FieldsImpl.java
new file mode 100644
index 0000000..68cd0e2
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FieldsImpl.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import java.util.Set;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.Fields;
+
+import com.google.gson.annotations.Expose;
+
+public class FieldsImpl implements Fields {
+  @Expose
+  private Set<String> type;
+
+  public Set<String> getType() {
+    return type;
+  }
+
+  public void setType(Set<String> type) {
+    this.type = type;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterAdapter.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterAdapter.java
new file mode 100644
index 0000000..b84403b
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterAdapter.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import java.lang.reflect.Type;
+
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+
+public class FilterAdapter implements JsonDeserializer<FilterDescriptorImpl> {
+  @Override
+  public FilterDescriptorImpl deserialize(JsonElement json, Type typeOfT, JsonDeserializationContext context) {
+    switch (json.getAsJsonObject().get("filter").getAsString()) {
+      case "grok":
+        return (FilterDescriptorImpl)context.deserialize(json, FilterGrokDescriptorImpl.class);
+      case "keyvalue":
+        return (FilterDescriptorImpl)context.deserialize(json, FilterKeyValueDescriptorImpl.class);
+      case "json":
+        return (FilterDescriptorImpl)context.deserialize(json, FilterJsonDescriptorImpl.class);
+      default:
+        throw new IllegalArgumentException("Unknown filter type: " + json.getAsJsonObject().get("filter").getAsString());
+    }
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterDescriptorImpl.java
new file mode 100644
index 0000000..4e11715
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterDescriptorImpl.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.PostMapValues;
+
+import com.google.gson.annotations.Expose;
+import com.google.gson.annotations.SerializedName;
+
+public abstract class FilterDescriptorImpl implements FilterDescriptor {
+  @Expose
+  private String filter;
+
+  @Expose
+  private ConditionsImpl conditions;
+
+  @Expose
+  @SerializedName("sort_order")
+  private Integer sortOrder;
+
+  @Expose
+  @SerializedName("source_field")
+  private String sourceField;
+
+  @Expose
+  @SerializedName("remove_source_field")
+  private Boolean removeSourceField;
+
+  @Expose
+  @SerializedName("post_map_values")
+  private Map<String, List<PostMapValuesImpl>> postMapValues;
+
+  @Expose
+  @SerializedName("is_enabled")
+  private Boolean isEnabled;
+
+  public String getFilter() {
+    return filter;
+  }
+
+  public void setFilter(String filter) {
+    this.filter = filter;
+  }
+
+  public ConditionsImpl getConditions() {
+    return conditions;
+  }
+
+  public void setConditions(ConditionsImpl conditions) {
+    this.conditions = conditions;
+  }
+
+  public Integer getSortOrder() {
+    return sortOrder;
+  }
+
+  public void setSortOrder(Integer sortOrder) {
+    this.sortOrder = sortOrder;
+  }
+
+  public String getSourceField() {
+    return sourceField;
+  }
+
+  public void setSourceField(String sourceField) {
+    this.sourceField = sourceField;
+  }
+
+  public Boolean isRemoveSourceField() {
+    return removeSourceField;
+  }
+
+  public void setRemoveSourceField(Boolean removeSourceField) {
+    this.removeSourceField = removeSourceField;
+  }
+
+  public Map<String, ? extends List<? extends PostMapValues>> getPostMapValues() {
+    return postMapValues;
+  }
+
+  public void setPostMapValues(Map<String, List<PostMapValuesImpl>> postMapValues) {
+    this.postMapValues = postMapValues;
+  }
+
+  public Boolean isEnabled() {
+    return isEnabled;
+  }
+
+  public void setIsEnabled(Boolean isEnabled) {
+    this.isEnabled = isEnabled;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterGrokDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterGrokDescriptorImpl.java
new file mode 100644
index 0000000..7f40b7f
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterGrokDescriptorImpl.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterGrokDescriptor;
+
+import com.google.gson.annotations.Expose;
+import com.google.gson.annotations.SerializedName;
+
+public class FilterGrokDescriptorImpl extends FilterDescriptorImpl implements FilterGrokDescriptor {
+  @Expose
+  @SerializedName("log4j_format")
+  private String log4jFormat;
+
+  @Expose
+  @SerializedName("multiline_pattern")
+  private String multilinePattern;
+
+  @Expose
+  @SerializedName("message_pattern")
+  private String messagePattern;
+
+  @Override
+  public String getLog4jFormat() {
+    return log4jFormat;
+  }
+
+  public void setLog4jFormat(String log4jFormat) {
+    this.log4jFormat = log4jFormat;
+  }
+
+  @Override
+  public String getMultilinePattern() {
+    return multilinePattern;
+  }
+
+  public void setMultilinePattern(String multilinePattern) {
+    this.multilinePattern = multilinePattern;
+  }
+
+  @Override
+  public String getMessagePattern() {
+    return messagePattern;
+  }
+
+  public void setMessagePattern(String messagePattern) {
+    this.messagePattern = messagePattern;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterJsonDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterJsonDescriptorImpl.java
new file mode 100644
index 0000000..9bf1a2b
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterJsonDescriptorImpl.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterJsonDescriptor;
+
+public class FilterJsonDescriptorImpl extends FilterDescriptorImpl implements FilterJsonDescriptor {
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterKeyValueDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterKeyValueDescriptorImpl.java
new file mode 100644
index 0000000..8e89990
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/FilterKeyValueDescriptorImpl.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterKeyValueDescriptor;
+
+import com.google.gson.annotations.Expose;
+import com.google.gson.annotations.SerializedName;
+
+public class FilterKeyValueDescriptorImpl extends FilterDescriptorImpl implements FilterKeyValueDescriptor {
+  @Expose
+  @SerializedName("field_split")
+  private String fieldSplit;
+
+  @Expose
+  @SerializedName("value_split")
+  private String valueSplit;
+
+  @Expose
+  @SerializedName("value_borders")
+  private String valueBorders;
+
+  public String getFieldSplit() {
+    return fieldSplit;
+  }
+
+  public void setFieldSplit(String fieldSplit) {
+    this.fieldSplit = fieldSplit;
+  }
+
+  public String getValueSplit() {
+    return valueSplit;
+  }
+
+  public void setValueSplit(String valueSplit) {
+    this.valueSplit = valueSplit;
+  }
+
+  public String getValueBorders() {
+    return valueBorders;
+  }
+
+  public void setValueBorders(String valueBorders) {
+    this.valueBorders = valueBorders;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputAdapter.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputAdapter.java
new file mode 100644
index 0000000..86741c6
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputAdapter.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import java.lang.reflect.Type;
+
+import com.google.gson.JsonArray;
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+
+public class InputAdapter implements JsonDeserializer<InputDescriptorImpl> {
+  private static JsonArray globalConfigs;
+  public static void setGlobalConfigs(JsonArray globalConfigs_) {
+    globalConfigs = globalConfigs_;
+  }
+  
+  @Override
+  public InputDescriptorImpl deserialize(JsonElement json, Type typeOfT, JsonDeserializationContext context) {
+    String source = null;
+    if (json.getAsJsonObject().has("source")) {
+      source = json.getAsJsonObject().get("source").getAsString();
+    } else {
+      for (JsonElement e : globalConfigs) {
+        if (e.getAsJsonObject().has("source")) {
+          source = e.getAsJsonObject().get("source").getAsString();
+          break;
+        }
+      }
+    }
+    
+    switch (source) {
+      case "file":
+        return (InputDescriptorImpl)context.deserialize(json, InputFileDescriptorImpl.class);
+      case "s3_file":
+        return (InputDescriptorImpl)context.deserialize(json, InputS3FileDescriptorImpl.class);
+      default:
+        throw new IllegalArgumentException("Unknown input type: " + json.getAsJsonObject().get("source").getAsString());
+    }
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigGson.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigGson.java
new file mode 100644
index 0000000..3b78aff
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigGson.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import java.lang.reflect.Type;
+import java.util.List;
+
+import com.google.common.reflect.TypeToken;
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+
+/**
+ * Helper class to convert betweeb json string and InputConfig class.
+ */
+public class InputConfigGson {
+  public static Gson gson;
+  static {
+    Type inputType = new TypeToken<InputDescriptorImpl>() {}.getType();
+    Type filterType = new TypeToken<FilterDescriptorImpl>() {}.getType();
+    Type postMapValuesType = new TypeToken<List<PostMapValuesImpl>>() {}.getType();
+    gson = new GsonBuilder()
+        .registerTypeAdapter(inputType, new InputAdapter())
+        .registerTypeAdapter(filterType, new FilterAdapter())
+        .registerTypeAdapter(postMapValuesType, new PostMapValuesAdapter())
+        .setPrettyPrinting()
+        .excludeFieldsWithoutExposeAnnotation()
+        .create();
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigImpl.java
new file mode 100644
index 0000000..a4eba8e
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputConfigImpl.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import java.util.List;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputConfig;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
+
+import com.google.gson.annotations.Expose;
+
+public class InputConfigImpl implements InputConfig {
+  @Expose
+  private List<InputDescriptorImpl> input;
+
+  @Expose
+  private List<FilterDescriptorImpl> filter;
+
+  @Override
+  public List<? extends InputDescriptor> getInput() {
+    return input;
+  }
+
+  public void setInput(List<InputDescriptorImpl> input) {
+    this.input = input;
+  }
+
+  @Override
+  public List<? extends FilterDescriptor> getFilter() {
+    return filter;
+  }
+
+  public void setFilter(List<FilterDescriptorImpl> filter) {
+    this.filter = filter;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputDescriptorImpl.java
new file mode 100644
index 0000000..94dcc2a
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputDescriptorImpl.java
@@ -0,0 +1,204 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import java.util.Map;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
+
+import com.google.gson.annotations.Expose;
+import com.google.gson.annotations.SerializedName;
+
+public abstract class InputDescriptorImpl implements InputDescriptor {
+  @Expose
+  private String type;
+
+  @Expose
+  private String rowtype;
+
+  @Expose
+  private String path;
+
+  @Expose
+  @SerializedName("add_fields")
+  private Map<String, String> addFields;
+  
+  @Expose
+  private String source;
+  
+  @Expose
+  private Boolean tail;
+  
+  @Expose
+  @SerializedName("gen_event_md5")
+  private Boolean genEventMd5;
+  
+  @Expose
+  @SerializedName("use_event_md5_as_id")
+  private Boolean useEventMd5AsId;
+  
+  @Expose
+  @SerializedName("start_position")
+  private String startPosition;
+
+  @Expose
+  @SerializedName("cache_enabled")
+  private Boolean cacheEnabled;
+
+  @Expose
+  @SerializedName("cache_key_field")
+  private String cacheKeyField;
+
+  @Expose
+  @SerializedName("cache_last_dedup_enabled")
+  private Boolean cacheLastDedupEnabled;
+
+  @Expose
+  @SerializedName("cache_size")
+  private Integer cacheSize;
+
+  @Expose
+  @SerializedName("cache_dedup_interval")
+  private Long cacheDedupInterval;
+
+  @Expose
+  @SerializedName("is_enabled")
+  private Boolean isEnabled;
+
+  public String getType() {
+    return type;
+  }
+
+  public void setType(String type) {
+    this.type = type;
+  }
+
+  public String getRowtype() {
+    return rowtype;
+  }
+
+  public void setRowtype(String rowType) {
+    this.rowtype = rowType;
+  }
+
+  public String getPath() {
+    return path;
+  }
+
+  public void setPath(String path) {
+    this.path = path;
+  }
+
+  public Map<String, String> getAddFields() {
+    return addFields;
+  }
+
+  public void setAddFields(Map<String, String> addFields) {
+    this.addFields = addFields;
+  }
+
+  public String getSource() {
+    return source;
+  }
+
+  public void setSource(String source) {
+    this.source = source;
+  }
+
+  public Boolean isTail() {
+    return tail;
+  }
+
+  public void setTail(Boolean tail) {
+    this.tail = tail;
+  }
+
+  public Boolean isGenEventMd5() {
+    return genEventMd5;
+  }
+
+  public void setGenEventMd5(Boolean genEventMd5) {
+    this.genEventMd5 = genEventMd5;
+  }
+
+  public Boolean isUseEventMd5AsId() {
+    return useEventMd5AsId;
+  }
+
+  public void setUseEventMd5AsId(Boolean useEventMd5AsId) {
+    this.useEventMd5AsId = useEventMd5AsId;
+  }
+
+  public String getStartPosition() {
+    return startPosition;
+  }
+
+  public void setStartPosition(String startPosition) {
+    this.startPosition = startPosition;
+  }
+
+  public Boolean isCacheEnabled() {
+    return cacheEnabled;
+  }
+
+  public void setCacheEnabled(Boolean cacheEnabled) {
+    this.cacheEnabled = cacheEnabled;
+  }
+
+  public String getCacheKeyField() {
+    return cacheKeyField;
+  }
+
+  public void setCacheKeyField(String cacheKeyField) {
+    this.cacheKeyField = cacheKeyField;
+  }
+
+  public Boolean getCacheLastDedupEnabled() {
+    return cacheLastDedupEnabled;
+  }
+
+  public void setCacheLastDedupEnabled(Boolean cacheLastDedupEnabled) {
+    this.cacheLastDedupEnabled = cacheLastDedupEnabled;
+  }
+
+  public Integer getCacheSize() {
+    return cacheSize;
+  }
+
+  public void setCacheSize(Integer cacheSize) {
+    this.cacheSize = cacheSize;
+  }
+
+  public Long getCacheDedupInterval() {
+    return cacheDedupInterval;
+  }
+
+  public void setCacheDedupInterval(Long cacheDedupInterval) {
+    this.cacheDedupInterval = cacheDedupInterval;
+  }
+
+  public Boolean isEnabled() {
+    return isEnabled;
+  }
+
+  public void setIsEnabled(Boolean isEnabled) {
+    this.isEnabled = isEnabled;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileBaseDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileBaseDescriptorImpl.java
new file mode 100644
index 0000000..51c7ec8
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileBaseDescriptorImpl.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputFileBaseDescriptor;
+
+import com.google.gson.annotations.Expose;
+import com.google.gson.annotations.SerializedName;
+
+public class InputFileBaseDescriptorImpl extends InputDescriptorImpl implements InputFileBaseDescriptor {
+  @Expose
+  @SerializedName("checkpoint_interval_ms")
+  private Integer checkpointIntervalMs;
+
+  @Expose
+  @SerializedName("process_file")
+  private Boolean processFile;
+
+  @Expose
+  @SerializedName("copy_file")
+  private Boolean copyFile;
+
+  @Override
+  public Boolean getProcessFile() {
+    return processFile;
+  }
+
+  public void setProcessFile(Boolean processFile) {
+    this.processFile = processFile;
+  }
+
+  @Override
+  public Boolean getCopyFile() {
+    return copyFile;
+  }
+
+  public void setCopyFile(Boolean copyFile) {
+    this.copyFile = copyFile;
+  }
+
+  @Override
+  public Integer getCheckpointIntervalMs() {
+    return checkpointIntervalMs;
+  }
+
+  public void setCheckpointIntervalMs(Integer checkpointIntervalMs) {
+    this.checkpointIntervalMs = checkpointIntervalMs;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileDescriptorImpl.java
new file mode 100644
index 0000000..3bfd161
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputFileDescriptorImpl.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputFileDescriptor;
+
+public class InputFileDescriptorImpl extends InputFileBaseDescriptorImpl implements InputFileDescriptor {
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputS3FileDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputS3FileDescriptorImpl.java
new file mode 100644
index 0000000..277a57c
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/InputS3FileDescriptorImpl.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputS3FileDescriptor;
+
+import com.google.gson.annotations.Expose;
+import com.google.gson.annotations.SerializedName;
+
+public class InputS3FileDescriptorImpl extends InputFileBaseDescriptorImpl implements InputS3FileDescriptor {
+  @Expose
+  @SerializedName("s3_access_key")
+  private String s3AccessKey;
+
+  @Expose
+  @SerializedName("s3_secret_key")
+  private String s3SecretKey;
+
+  @Override
+  public String getS3AccessKey() {
+    return s3AccessKey;
+  }
+
+  public void setS3AccessKey(String s3AccessKey) {
+    this.s3AccessKey = s3AccessKey;
+  }
+
+  @Override
+  public String getS3SecretKey() {
+    return s3SecretKey;
+  }
+
+  public void setS3SecretKey(String s3SecretKey) {
+    this.s3SecretKey = s3SecretKey;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapDateDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapDateDescriptorImpl.java
new file mode 100644
index 0000000..9daad2b
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapDateDescriptorImpl.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapDateDescriptor;
+
+import com.google.gson.annotations.Expose;
+import com.google.gson.annotations.SerializedName;
+
+public class MapDateDescriptorImpl implements MapDateDescriptor {
+  @Override
+  public String getJsonName() {
+    return "map_date";
+  }
+
+  @Expose
+  @SerializedName("source_date_pattern")
+  private String sourceDatePattern;
+
+  @Expose
+  @SerializedName("target_date_pattern")
+  private String targetDatePattern;
+
+  @Override
+  public String getSourceDatePattern() {
+    return sourceDatePattern;
+  }
+
+  public void setSourceDatePattern(String sourceDatePattern) {
+    this.sourceDatePattern = sourceDatePattern;
+  }
+
+  @Override
+  public String getTargetDatePattern() {
+    return targetDatePattern;
+  }
+
+  public void setTargetDatePattern(String targetDatePattern) {
+    this.targetDatePattern = targetDatePattern;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldCopyDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldCopyDescriptorImpl.java
new file mode 100644
index 0000000..4a8d746
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldCopyDescriptorImpl.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldCopyDescriptor;
+
+import com.google.gson.annotations.Expose;
+import com.google.gson.annotations.SerializedName;
+
+public class MapFieldCopyDescriptorImpl implements MapFieldCopyDescriptor {
+  @Override
+  public String getJsonName() {
+    return "map_fieldcopy";
+  }
+
+  @Expose
+  @SerializedName("copy_name")
+  private String copyName;
+
+  @Override
+  public String getCopyName() {
+    return copyName;
+  }
+
+  public void setCopyName(String copyName) {
+    this.copyName = copyName;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldNameDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldNameDescriptorImpl.java
new file mode 100644
index 0000000..333cb67
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldNameDescriptorImpl.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldNameDescriptor;
+
+import com.google.gson.annotations.Expose;
+import com.google.gson.annotations.SerializedName;
+
+public class MapFieldNameDescriptorImpl implements MapFieldNameDescriptor {
+  @Override
+  public String getJsonName() {
+    return "map_fieldname";
+  }
+
+  @Expose
+  @SerializedName("new_fieldname")
+  private String newFieldName;
+
+  @Override
+  public String getNewFieldName() {
+    return newFieldName;
+  }
+
+  public void setNewFieldName(String newFieldName) {
+    this.newFieldName = newFieldName;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldValueDescriptorImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldValueDescriptorImpl.java
new file mode 100644
index 0000000..599e152
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/MapFieldValueDescriptorImpl.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldValueDescriptor;
+
+import com.google.gson.annotations.Expose;
+import com.google.gson.annotations.SerializedName;
+
+public class MapFieldValueDescriptorImpl implements MapFieldValueDescriptor {
+  @Override
+  public String getJsonName() {
+    return "map_fieldvalue";
+  }
+
+  @Expose
+  @SerializedName("pre_value")
+  private String preValue;
+
+  @Expose
+  @SerializedName("post_value")
+  private String postValue;
+
+  @Override
+  public String getPreValue() {
+    return preValue;
+  }
+
+  public void setPreValue(String preValue) {
+    this.preValue = preValue;
+  }
+
+  @Override
+  public String getPostValue() {
+    return postValue;
+  }
+
+  public void setPostValue(String postValue) {
+    this.postValue = postValue;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesAdapter.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesAdapter.java
new file mode 100644
index 0000000..32aded8
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesAdapter.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import java.lang.reflect.Type;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldDescriptor;
+
+import com.google.gson.JsonArray;
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonSerializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonSerializationContext;
+
+public class PostMapValuesAdapter implements JsonDeserializer<List<PostMapValuesImpl>>, JsonSerializer<List<PostMapValuesImpl>> {
+  @Override
+  public List<PostMapValuesImpl> deserialize(JsonElement json, Type typeOfT, JsonDeserializationContext context) {
+    List<PostMapValuesImpl> vals = new ArrayList<>();
+    if (json.isJsonArray()) {
+      for (JsonElement e : json.getAsJsonArray()) {
+        vals.add(createPostMapValues(e, context));
+      }
+    } else if (json.isJsonObject()) {
+      vals.add(createPostMapValues(json, context));
+    } else {
+      throw new RuntimeException("Unexpected JSON type: " + json.getClass());
+    }
+    return vals;
+  }
+
+  private PostMapValuesImpl createPostMapValues(JsonElement e, JsonDeserializationContext context) {
+    List<MapFieldDescriptor> mappers = new ArrayList<>();
+    for (Map.Entry<String, JsonElement> m : e.getAsJsonObject().entrySet()) {
+      switch (m.getKey()) {
+        case "map_date":
+          mappers.add((MapDateDescriptorImpl)context.deserialize(m.getValue(), MapDateDescriptorImpl.class));
+          break;
+        case "map_fieldcopy":
+          mappers.add((MapFieldCopyDescriptorImpl)context.deserialize(m.getValue(), MapFieldCopyDescriptorImpl.class));
+          break;
+        case "map_fieldname":
+          mappers.add((MapFieldNameDescriptorImpl)context.deserialize(m.getValue(), MapFieldNameDescriptorImpl.class));
+          break;
+        case "map_fieldvalue":
+          mappers.add((MapFieldValueDescriptorImpl)context.deserialize(m.getValue(), MapFieldValueDescriptorImpl.class));
+          break;
+        default:
+          System.out.println("Unknown key: " + m.getKey());
+      }
+    }
+    
+    PostMapValuesImpl postMapValues = new PostMapValuesImpl();
+    postMapValues.setMappers(mappers);
+    return postMapValues;
+  }
+
+  @Override
+  public JsonElement serialize(List<PostMapValuesImpl> src, Type typeOfSrc, JsonSerializationContext context) {
+    if (src.size() == 1) {
+      return createMapperObject(src.get(0), context);
+    } else {
+      JsonArray jsonArray = new JsonArray();
+      for (PostMapValuesImpl postMapValues : src) {
+        jsonArray.add(createMapperObject(postMapValues, context));
+      }
+      return jsonArray;
+    }
+  }
+
+  private JsonElement createMapperObject(PostMapValuesImpl postMapValues, JsonSerializationContext context) {
+    JsonObject jsonObject = new JsonObject();
+    for (MapFieldDescriptor m : postMapValues.getMappers()) {
+      jsonObject.add(((MapFieldDescriptor)m).getJsonName(), context.serialize(m));
+    }
+    return jsonObject;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesImpl.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesImpl.java
new file mode 100644
index 0000000..4d2254a
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/model/inputconfig/impl/PostMapValuesImpl.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl;
+
+import java.util.List;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.PostMapValues;
+
+import com.google.gson.annotations.Expose;
+
+public class PostMapValuesImpl implements PostMapValues {
+  @Expose
+  private List<MapFieldDescriptor> mappers;
+
+  public List<MapFieldDescriptor> getMappers() {
+    return mappers;
+  }
+
+  public void setMappers(List<MapFieldDescriptor> mappers) {
+    this.mappers = mappers;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-it/pom.xml b/ambari-logsearch/ambari-logsearch-it/pom.xml
index be7ab57..81af9e8 100644
--- a/ambari-logsearch/ambari-logsearch-it/pom.xml
+++ b/ambari-logsearch/ambari-logsearch-it/pom.xml
@@ -33,8 +33,14 @@
   <properties>
     <it.skip>true</it.skip>
     <jbehave.version>4.0.5</jbehave.version>
+    <jbehave-selenium>3.5.5</jbehave-selenium>
     <jersey.version>2.23.1</jersey.version>
     <jackson-jaxrs.version>2.6.4</jackson-jaxrs.version>
+    <failsafe-plugin.version>2.20</failsafe-plugin.version>
+    <forkCount>1</forkCount>
+    <docker.host>localhost</docker.host>
+    <backend.stories.location>NONE</backend.stories.location>
+    <ui.stories.location>NONE</ui.stories.location>
   </properties>
 
   <dependencies>
@@ -44,6 +50,11 @@
       <version>${jbehave.version}</version>
     </dependency>
     <dependency>
+      <groupId>org.jbehave.web</groupId>
+      <artifactId>jbehave-web-selenium</artifactId>
+      <version>${jbehave-selenium}</version>
+    </dependency>
+    <dependency>
       <groupId>org.apache.solr</groupId>
       <artifactId>solr-solrj</artifactId>
       <version>${solr.version}</version>
@@ -58,11 +69,6 @@
       <version>2.5</version>
     </dependency>
     <dependency>
-      <groupId>com.github.docker-java</groupId>
-      <artifactId>docker-java</artifactId>
-      <version>3.0.0</version>
-    </dependency>
-    <dependency>
       <groupId>com.fasterxml.jackson.jaxrs</groupId>
       <artifactId>jackson-jaxrs-json-provider</artifactId>
       <version>${jackson-jaxrs.version}</version>
@@ -86,6 +92,32 @@
       <groupId>com.flipkart.zjsonpatch</groupId>
       <artifactId>zjsonpatch</artifactId>
       <version>0.2.4</version>
+      <exclusions>
+        <exclusion>
+          <groupId>com.google.guava</groupId>
+          <artifactId>guava</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.ambari</groupId>
+      <artifactId>ambari-logsearch-server</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.ambari</groupId>
+      <artifactId>ambari-logsearch-web</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.ambari</groupId>
+      <artifactId>ambari-logsearch-logfeeder</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+      <version>11.0.1</version>
     </dependency>
   </dependencies>
 
@@ -102,35 +134,139 @@
         <directory>src/test/resources</directory>
       </testResource>
     </testResources>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-failsafe-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>run-integration-tests</id>
-            <phase>integration-test</phase>
-            <goals>
-              <goal>integration-test</goal>
-            </goals>
-            <configuration>
-              <includes>
-                <include>**/*Stories.java</include>
-                <include>**/*Story.java</include>
-              </includes>
-              <skip>${it.skip}</skip>
-            </configuration>
-          </execution>
-          <execution>
-            <id>verify-integration-tests</id>
-            <phase>verify</phase>
-            <goals>
-              <goal>verify</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
   </build>
 
+  <profiles>
+    <profile>
+      <id>selenium-tests</id>
+      <activation>
+        <property>
+          <name>selenium-tests</name>
+        </property>
+      </activation>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-failsafe-plugin</artifactId>
+            <version>${failsafe-plugin.version}</version>
+            <executions>
+              <execution>
+                <id>run-integration-tests</id>
+                <phase>integration-test</phase>
+                <goals>
+                  <goal>integration-test</goal>
+                </goals>
+                <configuration>
+                  <includes>
+                    <include>**/*UIStories.java</include>
+                  </includes>
+                  <systemPropertyVariables>
+                    <log4j.configuration>file:${project.build.testOutputDirectory}/log4j.properties</log4j.configuration>
+                    <docker.host>${docker.host}</docker.host>
+                    <ui.stories.location>${ui.stories.location}</ui.stories.location>
+                  </systemPropertyVariables>
+                </configuration>
+              </execution>
+              <execution>
+                <id>verify-integration-tests</id>
+                <phase>verify</phase>
+                <goals>
+                  <goal>verify</goal>
+                </goals>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+    <profile>
+      <id>backend-tests</id>
+      <activation>
+        <property>
+          <name>backend-tests</name>
+        </property>
+      </activation>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-failsafe-plugin</artifactId>
+            <version>${failsafe-plugin.version}</version>
+            <executions>
+              <execution>
+                <id>run-integration-tests</id>
+                <phase>integration-test</phase>
+                <goals>
+                  <goal>integration-test</goal>
+                </goals>
+                <configuration>
+                  <includes>
+                    <include>**/*BackendStories.java</include>
+                  </includes>
+                  <systemPropertyVariables>
+                    <log4j.configuration>file:${project.build.testOutputDirectory}/log4j.properties</log4j.configuration>
+                    <docker.host>${docker.host}</docker.host>
+                    <backend.stories.location>${backend.stories.location}</backend.stories.location>
+                  </systemPropertyVariables>
+                </configuration>
+              </execution>
+              <execution>
+                <id>verify-integration-tests</id>
+                <phase>verify</phase>
+                <goals>
+                  <goal>verify</goal>
+                </goals>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+    <profile>
+      <id>all-tests</id>
+      <activation>
+        <property>
+          <name>all-tests</name>
+        </property>
+      </activation>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-failsafe-plugin</artifactId>
+            <version>${failsafe-plugin.version}</version>
+            <executions>
+              <execution>
+                <id>run-integration-tests</id>
+                <phase>integration-test</phase>
+                <goals>
+                  <goal>integration-test</goal>
+                </goals>
+                <configuration>
+                  <includes>
+                    <include>**/*Stories.java</include>
+                  </includes>
+                  <systemPropertyVariables>
+                    <log4j.configuration>file:${project.build.testOutputDirectory}/log4j.properties</log4j.configuration>
+                    <docker.host>${docker.host}</docker.host>
+                    <backend.stories.location>${backend.stories.location}</backend.stories.location>
+                    <ui.stories.location>${ui.stories.location}</ui.stories.location>
+                  </systemPropertyVariables>
+                </configuration>
+              </execution>
+              <execution>
+                <id>verify-integration-tests</id>
+                <phase>verify</phase>
+                <goals>
+                  <goal>verify</goal>
+                </goals>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
+
 </project>
\ No newline at end of file
diff --git a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/domain/StoryDataRegistry.java b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/domain/StoryDataRegistry.java
index 564972a..41d6391 100644
--- a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/domain/StoryDataRegistry.java
+++ b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/domain/StoryDataRegistry.java
@@ -18,24 +18,23 @@
  */
 package org.apache.ambari.logsearch.domain;
 
-import com.github.dockerjava.api.DockerClient;
-import com.github.dockerjava.core.DockerClientConfig;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.SolrClient;
+import org.jbehave.web.selenium.WebDriverProvider;
 
 public class StoryDataRegistry {
   public static final StoryDataRegistry INSTANCE = new StoryDataRegistry();
 
-  private DockerClient dockerClient;
-  private DockerClientConfig dockerClientConfig;
-  private CloudSolrClient cloudSolrClient;
+  private SolrClient solrClient;
   private boolean logsearchContainerStarted = false;
   private String dockerHost;
   private String ambariFolder;
+  private String shellScriptLocation;
   private final int solrPort = 8886;
   private final int logsearchPort = 61888;
   private final int zookeeperPort = 9983;
   private final String serviceLogsCollection = "hadoop_logs";
   private final String auditLogsCollection = "audit_logs";
+  private WebDriverProvider webDriverProvider;
 
   private StoryDataRegistry() {
   }
@@ -60,14 +59,6 @@
     return zookeeperPort;
   }
 
-  public DockerClient getDockerClient() {
-    return dockerClient;
-  }
-
-  public void setDockerClient(DockerClient dockerClient) {
-    this.dockerClient = dockerClient;
-  }
-
   public String getServiceLogsCollection() {
     return serviceLogsCollection;
   }
@@ -76,12 +67,12 @@
     return auditLogsCollection;
   }
 
-  public CloudSolrClient getCloudSolrClient() {
-    return cloudSolrClient;
+  public SolrClient getSolrClient() {
+    return solrClient;
   }
 
-  public void setCloudSolrClient(CloudSolrClient cloudSolrClient) {
-    this.cloudSolrClient = cloudSolrClient;
+  public void setSolrClient(SolrClient solrClient) {
+    this.solrClient = solrClient;
   }
 
   public String getAmbariFolder() {
@@ -92,12 +83,12 @@
     this.ambariFolder = ambariFolder;
   }
 
-  public DockerClientConfig getDockerClientConfig() {
-    return dockerClientConfig;
+  public String getShellScriptLocation() {
+    return shellScriptLocation;
   }
 
-  public void setDockerClientConfig(DockerClientConfig dockerClientConfig) {
-    this.dockerClientConfig = dockerClientConfig;
+  public void setShellScriptLocation(String shellScriptLocation) {
+    this.shellScriptLocation = shellScriptLocation;
   }
 
   public boolean isLogsearchContainerStarted() {
@@ -107,4 +98,12 @@
   public void setLogsearchContainerStarted(boolean logsearchContainerStarted) {
     this.logsearchContainerStarted = logsearchContainerStarted;
   }
+
+  public WebDriverProvider getWebDriverProvider() {
+    return webDriverProvider;
+  }
+
+  public void setWebDriverProvider(WebDriverProvider webDriverProvider) {
+    this.webDriverProvider = webDriverProvider;
+  }
 }
diff --git a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/steps/AbstractLogSearchSteps.java b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/steps/AbstractLogSearchSteps.java
new file mode 100644
index 0000000..a0027ae
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/steps/AbstractLogSearchSteps.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.steps;
+
+import org.apache.ambari.logsearch.domain.StoryDataRegistry;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.LBHttpSolrClient;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.client.solrj.response.SolrPingResponse;
+import org.apache.solr.common.SolrDocumentList;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+import java.net.URL;
+
+public class AbstractLogSearchSteps {
+
+  private static final Logger LOG = LoggerFactory.getLogger(AbstractLogSearchSteps.class);
+
+  protected void initDockerContainer() throws Exception{
+    boolean logsearchStarted = StoryDataRegistry.INSTANCE.isLogsearchContainerStarted();
+    if (!logsearchStarted) {
+      LOG.info("Create new docker container for Log Search ...");
+      URL location = LogSearchDockerSteps.class.getProtectionDomain().getCodeSource().getLocation();
+      String ambariFolder = new File(location.toURI()).getParentFile().getParentFile().getParentFile().getParent();
+      StoryDataRegistry.INSTANCE.setAmbariFolder(ambariFolder);
+      String shellScriptLocation = ambariFolder + "/ambari-logsearch/docker/logsearch-docker.sh";
+      StoryDataRegistry.INSTANCE.setShellScriptLocation(shellScriptLocation);
+      String output = runCommand(new String[]{StoryDataRegistry.INSTANCE.getShellScriptLocation(), "start"});
+      LOG.info("Command output: {}", output);
+      StoryDataRegistry.INSTANCE.setLogsearchContainerStarted(true);
+
+      String dockerHostFromUri = System.getProperty("docker.host") != null ? System.getProperty("docker.host") : "localhost";;
+
+      StoryDataRegistry.INSTANCE.setDockerHost(dockerHostFromUri);
+      checkHostAndPortReachable(dockerHostFromUri, StoryDataRegistry.INSTANCE.getLogsearchPort(), "LogSearch");
+      waitUntilSolrIsUp();
+      waitUntilSolrHasAnyData();
+
+      LOG.info("Waiting for logfeeder to finish the test log parsings... (10 sec)");
+      Thread.sleep(10000);
+    }
+  }
+
+  private void waitUntilSolrIsUp() throws Exception {
+    int maxTries = 30;
+    boolean solrIsUp = false;
+    String lastExceptionMessage = null;
+    for (int tries = 1; tries < maxTries; tries++) {
+      try {
+        SolrClient solrClient = new LBHttpSolrClient(String.format("http://%s:%d/solr/%s_shard0_replica1",
+          StoryDataRegistry.INSTANCE.getDockerHost(),
+          StoryDataRegistry.INSTANCE.getSolrPort(),
+          StoryDataRegistry.INSTANCE.getServiceLogsCollection()));
+        StoryDataRegistry.INSTANCE.setSolrClient(solrClient);
+        SolrPingResponse pingResponse = solrClient.ping();
+        if (pingResponse.getStatus() != 0) {
+          LOG.info("Solr is not up yet, Retrying... ({} tries)", tries);
+          Thread.sleep(2000);
+        } else {
+          solrIsUp = true;
+          LOG.info("Solr is up and running");
+          break;
+        }
+      } catch (Exception e) {
+        LOG.info("Error occurred during pinging solr. Retrying... ({} tries)", tries);
+        lastExceptionMessage = e.getMessage();
+        Thread.sleep(2000);
+      }
+    }
+
+    if (!solrIsUp) {
+      throw new IllegalStateException(String.format("Solr is not up after %d tries. Exception: %s", maxTries, lastExceptionMessage));
+    }
+  }
+
+  protected void waitUntilSolrHasAnyData() throws IOException, SolrServerException, InterruptedException {
+    boolean solrHasData = false;
+    int maxTries = 60;
+    String lastExceptionMessage = null;
+    for (int tries = 1; tries < maxTries; tries++) {
+      try {
+        SolrClient solrClient = StoryDataRegistry.INSTANCE.getSolrClient();
+        SolrQuery solrQuery = new SolrQuery();
+        solrQuery.setQuery("*:*");
+        QueryResponse queryResponse = solrClient.query(solrQuery);
+        SolrDocumentList list = queryResponse.getResults();
+        if (list.size() > 0) {
+          solrHasData = true;
+          break;
+        } else {
+          Thread.sleep(2000);
+          LOG.info("Solr has no data yet. Retrying... ({} tries)", tries);
+        }
+      } catch (Exception e) {
+        LOG.info("Error occurred during checking solr. Retrying... ({} tries)", tries);
+        lastExceptionMessage = e.getMessage();
+        Thread.sleep(2000);
+      }
+    }
+    if (!solrHasData) {
+      throw new IllegalStateException(String.format("Solr has no data after %d tries. Exception: %s", maxTries, lastExceptionMessage));
+    }
+  }
+
+
+  protected void checkHostAndPortReachable(String host, int port, String serviceName) throws InterruptedException {
+    boolean reachable = false;
+    int maxTries = 60;
+    for (int tries = 1; tries < maxTries; tries++ ) {
+      try (Socket socket = new Socket()) {
+        socket.connect(new InetSocketAddress(host, port), 1000);
+        reachable = true;
+        break;
+      } catch (IOException e) {
+        Thread.sleep(2000);
+        LOG.info("{} is not reachable yet. Retrying... ({} tries)", serviceName, tries);
+      }
+    }
+    if (!reachable) {
+      throw new IllegalStateException(String.format("%s is not reachable after %s tries", serviceName, maxTries));
+    }
+  }
+
+
+  protected String runCommand(String[] command) {
+    try {
+      LOG.info("Exec command: {}", StringUtils.join(command, " "));
+      Process process = Runtime.getRuntime().exec(command);
+      BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream()));
+      return reader.readLine();
+    } catch (Exception e) {
+      throw new RuntimeException("Error during execute shell command: ", e);
+    }
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/steps/LogSearchDockerSteps.java b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/steps/LogSearchDockerSteps.java
index 5f8f9bf..cb67fcc 100644
--- a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/steps/LogSearchDockerSteps.java
+++ b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/steps/LogSearchDockerSteps.java
@@ -18,23 +18,13 @@
  */
 package org.apache.ambari.logsearch.steps;
 
-import com.github.dockerjava.api.DockerClient;
-import com.github.dockerjava.api.command.CreateContainerResponse;
-import com.github.dockerjava.api.model.Bind;
-import com.github.dockerjava.api.model.Container;
-import com.github.dockerjava.api.model.ExposedPort;
-import com.github.dockerjava.api.model.Ports;
-import com.github.dockerjava.api.model.Volume;
-import com.github.dockerjava.core.DockerClientBuilder;
-import com.github.dockerjava.core.DockerClientConfig;
-import com.github.dockerjava.core.command.BuildImageResultCallback;
-import com.google.common.base.Preconditions;
 import org.apache.ambari.logsearch.domain.StoryDataRegistry;
-import org.apache.commons.lang.ArrayUtils;
+import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.LBHttpSolrClient;
 import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.client.solrj.response.SolrPingResponse;
 import org.apache.solr.common.SolrDocumentList;
 import org.jbehave.core.annotations.AfterStories;
 import org.jbehave.core.annotations.BeforeStories;
@@ -43,202 +33,35 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.BufferedReader;
 import java.io.File;
 import java.io.IOException;
+import java.io.InputStreamReader;
 import java.net.InetSocketAddress;
 import java.net.Socket;
 import java.net.URL;
-import java.util.List;
 
-public class LogSearchDockerSteps {
+public class LogSearchDockerSteps extends AbstractLogSearchSteps {
 
   private static final Logger LOG = LoggerFactory.getLogger(LogSearchDockerSteps.class);
 
   @Given("logsearch docker container")
   public void setupLogSearchContainer() throws Exception {
-    boolean logsearchStarted = StoryDataRegistry.INSTANCE.isLogsearchContainerStarted();
-    if (!logsearchStarted) {
-      DockerClient dockerClient = StoryDataRegistry.INSTANCE.getDockerClient();
-      LOG.info("Create new docker container for Log Search ..");
-      URL location = LogSearchDockerSteps.class.getProtectionDomain().getCodeSource().getLocation();
-      String ambariFolder = new File(location.toURI()).getParentFile().getParentFile().getParentFile().getParent();
-      StoryDataRegistry.INSTANCE.setAmbariFolder(ambariFolder);
-      String dockerBaseDirectory = ambariFolder + "/ambari-logsearch/docker";
-      String dockerFileLocation = dockerBaseDirectory + "/Dockerfile";
-
-      String imageId = dockerClient.buildImageCmd()
-        .withTag("ambari-logsearch:v1.0")
-        .withBaseDirectory(new File(dockerBaseDirectory))
-        .withDockerfile(new File(dockerFileLocation))
-        .exec(new BuildImageResultCallback())
-        .awaitImageId();
-      LOG.info("Docker image id: {}", imageId);
-
-      removeLogSearchContainerIfExists();
-
-      // volume bindings
-      Volume testLogsVolume = new Volume("/root/test-logs");
-      Volume testConfigVolume = new Volume("/root/test-config");
-      Volume ambariVolume = new Volume("/root/ambari");
-      Volume logfeederClassesVolume = new Volume("/root/ambari/ambari-logsearch/ambari-logsearch-logfeeder/target/package/classes");
-      Volume logsearchClassesVolume = new Volume("/root/ambari/ambari-logsearch/ambari-logsearch-portal/target/package/classes");
-      Volume logsearchWebappVolume = new Volume("/root/ambari/ambari-logsearch/ambari-logsearch-portal/target/package/classes/webapps/app");
-      Bind testLogsBind = new Bind(ambariFolder +"/ambari-logsearch/docker/test-logs", testLogsVolume);
-      Bind testConfigBind = new Bind(ambariFolder +"/ambari-logsearch/docker/test-config", testConfigVolume);
-      Bind ambariRootBind = new Bind(ambariFolder, ambariVolume);
-      Bind logfeederClassesBind = new Bind(ambariFolder + "/ambari-logsearch/ambari-logsearch-logfeeder/target/classes", logfeederClassesVolume);
-      Bind logsearchClassesBind = new Bind(ambariFolder + "/ambari-logsearch/ambari-logsearch-portal/target/classes", logsearchClassesVolume);
-      Bind logsearchWebappBind = new Bind(ambariFolder + "/ambari-logsearch/ambari-logsearch-portal/src/main/webapp", logsearchWebappVolume);
-
-      // port bindings
-      Ports ports = new Ports();
-      ports.bind(new ExposedPort(5005), new Ports.Binding("0.0.0.0", "5005"));
-      ports.bind(new ExposedPort(5006), new Ports.Binding("0.0.0.0", "5006"));
-      ports.bind(new ExposedPort(StoryDataRegistry.INSTANCE.getSolrPort()), new Ports.Binding("0.0.0.0", "8886"));
-      ports.bind(new ExposedPort(StoryDataRegistry.INSTANCE.getLogsearchPort()), new Ports.Binding("0.0.0.0", "61888"));
-      ports.bind(new ExposedPort(StoryDataRegistry.INSTANCE.getZookeeperPort()), new Ports.Binding("0.0.0.0", "9983"));
-
-      LOG.info("Creating docker cointainer...");
-      CreateContainerResponse createResponse = dockerClient.createContainerCmd("ambari-logsearch:v1.0")
-        .withHostName("logsearch.apache.org")
-        .withName("logsearch")
-        .withVolumes(testLogsVolume, testConfigVolume, ambariVolume, logfeederClassesVolume, logsearchClassesVolume, logsearchWebappVolume)
-        .withBinds(testLogsBind, testConfigBind, ambariRootBind, logfeederClassesBind, logsearchClassesBind, logsearchWebappBind)
-        .withExposedPorts(
-          new ExposedPort(StoryDataRegistry.INSTANCE.getLogsearchPort()),
-          new ExposedPort(5005),
-          new ExposedPort(5006),
-          new ExposedPort(StoryDataRegistry.INSTANCE.getSolrPort()),
-          new ExposedPort(StoryDataRegistry.INSTANCE.getZookeeperPort()))
-        .withPortBindings(ports)
-        .exec();
-      LOG.info("Created docker container id: {}", createResponse.getId());
-
-      dockerClient.startContainerCmd(createResponse.getId()).exec();
-      StoryDataRegistry.INSTANCE.setLogsearchContainerStarted(true);
-      String dockerHostFromUri = StoryDataRegistry.INSTANCE.getDockerClientConfig().getDockerHost().getHost();
-      StoryDataRegistry.INSTANCE.setDockerHost(dockerHostFromUri);
-      checkHostAndPortReachable(dockerHostFromUri, StoryDataRegistry.INSTANCE.getLogsearchPort(), "LogSearch");
-      waitUntilSolrHasAnyData();
-
-      LOG.info("Waiting for logfeeder to finish the test log parsings... (10 sec)");
-      Thread.sleep(10000);
-    }
+    initDockerContainer();
   }
 
   @When("logfeeder started (parse logs & send data to solr)")
   public void logfeederStarted() throws Exception {
     // TODO: run ps aux to check LogFeeder process with docker exec
-    /**
-    DockerClient dockerClient = StoryDataRegistry.INSTANCE.getDockerClient();
-    ExecCreateCmdResponse execResp = dockerClient
-      .execCreateCmd(containerId)
-      .withAttachStdout(true)
-      .withCmd("ps", "aux").exec();
-    execResp.getId();
-    ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
-    ExecStartResultCallback res = dockerClient
-      .execStartCmd(execResp.getId())
-      .withDetach(true)
-      .withTty(true)
-      .exec(new ExecStartResultCallback(outputStream,  outputStream)).awaitCompletion();
-     **/
   }
 
   @BeforeStories
-  public void checkDockerApi() {
-    LOG.info("Tries to setup docker client configuration");
-    final String dockerHost = System.getenv("DOCKER_HOST");
-    final String dockerCertPath = System.getenv("DOCKER_CERT_PATH");
-    final String dockerApiVersion = System.getenv("DOCKER_API_VERSION") == null ? "1.20" : System.getenv("DOCKER_API_VERSION");
-
-    Preconditions.checkArgument(dockerHost != null, "Set 'DOCKER_HOST' env variable");
-    Preconditions.checkArgument(dockerCertPath != null, "Set 'DOCKER_CERT_PATH' env variable");
-    LOG.info("DOCKER_HOST: {}", dockerHost);
-    LOG.info("DOCKER_CERT_PATH: {}", dockerCertPath);
-    LOG.info("DOCKER_API_VERSION: {}", dockerApiVersion);
-    DockerClientConfig dockerClientConfig = DockerClientConfig.createDefaultConfigBuilder()
-      .withDockerHost(dockerHost)
-      .withDockerCertPath(dockerCertPath)
-      .withApiVersion(dockerApiVersion)
-      .withDockerTlsVerify(true)
-      .build();
-    StoryDataRegistry.INSTANCE.setDockerClientConfig(dockerClientConfig);
-    DockerClient dockerClient = DockerClientBuilder.getInstance(dockerClientConfig).build();
-    StoryDataRegistry.INSTANCE.setDockerClient(dockerClient);
-    LOG.info("Docker client setup successfully.");
+  public void initDocker() throws Exception {
+    // TODO: check docker is up
   }
 
   @AfterStories
   public void removeLogSearchContainer() {
-    removeLogSearchContainerIfExists();
-  }
-
-  private void removeLogSearchContainerIfExists() {
-    DockerClient dockerClient = StoryDataRegistry.INSTANCE.getDockerClient();
-    List<Container> containerList = dockerClient
-      .listContainersCmd()
-      .withShowAll(true)
-      .exec();
-
-    boolean isLogSearchContainerExists = false;
-    String containerId = null;
-    for (Container container : containerList) {
-      isLogSearchContainerExists = ArrayUtils.contains(container.getNames(), "/logsearch");
-      if (isLogSearchContainerExists) {
-        containerId = container.getId();
-        break;
-      }
-    }
-
-    if (isLogSearchContainerExists) {
-      LOG.info("Remove logsearch container: {}", containerId);
-      dockerClient.removeContainerCmd(containerId).withForce(true).exec();
-    }
-  }
-
-  private void waitUntilSolrHasAnyData() throws IOException, SolrServerException, InterruptedException {
-    boolean solrHasData = false;
-    CloudSolrClient solrClient = new CloudSolrClient(String.format("%s:%d",
-      StoryDataRegistry.INSTANCE.getDockerHost(),
-      StoryDataRegistry.INSTANCE.getZookeeperPort()));
-    StoryDataRegistry.INSTANCE.setCloudSolrClient(solrClient);
-    SolrQuery solrQuery = new SolrQuery();
-    solrQuery.setQuery("*:*");
-
-    int maxTries = 60;
-    for (int tries = 1; tries < maxTries; tries++) {
-      QueryResponse queryResponse = solrClient.query(StoryDataRegistry.INSTANCE.getServiceLogsCollection(), solrQuery);
-      SolrDocumentList list = queryResponse.getResults();
-      if (list.size() > 0) {
-        solrHasData = true;
-        break;
-      } else {
-        Thread.sleep(2000);
-        LOG.info("Solr has no data yet, retrying...");
-      }
-    }
-    if (!solrHasData) {
-      throw new IllegalStateException(String.format("Solr has no data after %d tries", maxTries));
-    }
-  }
-
-
-  private void checkHostAndPortReachable(String host, int port, String serviceName) throws InterruptedException {
-    boolean reachable = false;
-    int maxTries = 60;
-    for (int tries = 1; tries < maxTries; tries++ ) {
-      try (Socket socket = new Socket()) {
-        socket.connect(new InetSocketAddress(host, port), 1000);
-        reachable = true;
-        break;
-      } catch (IOException e) {
-        Thread.sleep(2000);
-        LOG.info("{} is not reachable yet, retrying..", serviceName);
-      }
-    }
-    if (!reachable) {
-      throw new IllegalStateException(String.format("%s is not reachable after %s tries", serviceName, maxTries));
-    }
+    runCommand(new String[]{StoryDataRegistry.INSTANCE.getShellScriptLocation(), "stop"});
   }
 }
diff --git a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/steps/LogSearchUISteps.java b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/steps/LogSearchUISteps.java
new file mode 100644
index 0000000..b40a2bc
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/steps/LogSearchUISteps.java
@@ -0,0 +1,212 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.steps;
+
+import junit.framework.Assert;
+import org.apache.ambari.logsearch.domain.StoryDataRegistry;
+import org.apache.ambari.logsearch.web.Home;
+import org.jbehave.core.annotations.AfterScenario;
+import org.jbehave.core.annotations.AfterStories;
+import org.jbehave.core.annotations.AfterStory;
+import org.jbehave.core.annotations.BeforeScenario;
+import org.jbehave.core.annotations.BeforeStories;
+import org.jbehave.core.annotations.BeforeStory;
+import org.jbehave.core.annotations.Given;
+import org.jbehave.core.annotations.Named;
+import org.jbehave.core.annotations.Then;
+import org.jbehave.core.annotations.When;
+import org.jbehave.web.selenium.WebDriverProvider;
+import org.openqa.selenium.By;
+import org.openqa.selenium.NoSuchElementException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.TimeUnit;
+
+public class LogSearchUISteps extends AbstractLogSearchSteps {
+
+  private static final Logger LOG = LoggerFactory.getLogger(LogSearchUISteps.class);
+
+  private final WebDriverProvider driverProvider;
+
+  private Home home;
+
+  public LogSearchUISteps(WebDriverProvider driverProvider) {
+    this.driverProvider = driverProvider;
+  }
+
+  @BeforeScenario
+  public void initHomePage() {
+    home = new Home(driverProvider);
+    LOG.info("Init home page: {}", home.getCurrentUrl());
+  }
+
+  @AfterScenario
+  public void deleteCookies() {
+    LOG.info("Delete all cookies...");
+    home.manage().deleteAllCookies();
+  }
+
+  @BeforeStories
+  public void beforeStories() throws Exception {
+    initDockerContainer();
+    LOG.info("Initialize web driver...");
+    StoryDataRegistry.INSTANCE.getWebDriverProvider().initialize();
+    LOG.info("Web driver details: {}",  StoryDataRegistry.INSTANCE.getWebDriverProvider().get().toString());
+  }
+
+  @AfterStory
+  public void closePage() throws Exception {
+    LOG.info("Closing web driver");
+    StoryDataRegistry.INSTANCE.getWebDriverProvider().end();
+  }
+
+  @Given("open logsearch home page")
+  public void initBrowser() {
+    LOG.info("Delete all cookies...");
+    home.manage().deleteAllCookies();
+    LOG.info("Open home page: {}", home.getCurrentUrl());
+    home.open();
+  }
+
+  @When("login with $username / $password")
+  public void login(@Named("username") String userName, @Named("password") String password) {
+    LOG.info("Type username: {}", userName);
+    home.findElement(By.id("username")).sendKeys(userName);
+    LOG.info("Type password: {}", password);
+    home.findElement(By.id("password")).sendKeys(password);
+    LOG.info("Click on Sign In button.");
+    home.findElement(By.className("custLogin")).click();
+    closeTourPopup();
+  }
+
+  @Then("page contains text: '$text'")
+  public void contains(@Named("text") String text) {
+    LOG.info("Check page contains text: '{}'", text);
+    home.found(text);
+  }
+
+  @Then("page does not contain text: '$text'")
+  public void notContains(@Named("text") String text) {
+    LOG.info("Check page does not contain text: '{}'", text);
+    home.notFound(text);
+  }
+
+  @When("wait $seconds seconds")
+  public void waitSeconds(@Named("second") String second) {
+    LOG.info("Wait {} seconds...", second);
+    home.manage().timeouts().implicitlyWait(Integer.parseInt(second), TimeUnit.SECONDS);
+  }
+
+  @When("click on element: $xpath (xpath)")
+  public void clickOnElementByXPath(@Named("xpath") String xPath) {
+    LOG.info("Click on element by xpath: '{}'", xPath);
+    driverProvider.get().findElement(By.xpath(xPath)).click();
+  }
+
+  @When("click on element: $id (id)")
+  public void clickOnElementById(@Named("id") String id) {
+    LOG.info("Click on element by id: '{}'", id);
+    driverProvider.get().findElement(By.xpath(id)).click();
+  }
+
+  @When("click on element: $css (css selector)")
+  public void clickOnElementByCssSelector(@Named("css") String cssSelector) {
+    LOG.info("Click on element by css selector: '{}'", cssSelector);
+    driverProvider.get().findElement(By.cssSelector(cssSelector)).click();
+  }
+
+  @Then("element exists with xpath: $xpath")
+  public void findByXPath(@Named("xpath") String xPath) {
+    LOG.info("Find element by xpath: '{}'", xPath);
+    Assert.assertNotNull(home.findElement(By.xpath(xPath)));
+  }
+
+  @Then("element exists with xpath: $id")
+  public void findById(@Named("id") String id) {
+    LOG.info("Find element by id: '{}'", id);
+    Assert.assertNotNull(home.findElement(By.id(id)));
+  }
+
+  @Then("element exists with css selector: $css")
+  public void findByCssSelector(@Named("css") String cssSelector) {
+    LOG.info("Find element by css selector: '{}'", cssSelector);
+    Assert.assertNotNull(home.findElement(By.cssSelector(cssSelector)));
+  }
+
+  @Then("element text equals '$text', with xpath $xpath")
+  public void equalsByXPath(@Named("text") String text, @Named("xpath") String xPath) {
+    LOG.info("Check text of the element (xpath: '{}') equals with '{}'", xPath, text);
+    Assert.assertEquals(text, home.findElement(By.xpath(xPath)).getText());
+  }
+
+  @Then("element text equals '$text' with id $id")
+  public void equalsyId(@Named("text") String text, @Named("id") String id) {
+    LOG.info("Check text of the element (id: '{}') equals with '{}'", id, text);
+    Assert.assertEquals(text, home.findElement(By.id(id)).getText());
+  }
+
+  @Then("element text equals '$text' with css selector $css")
+  public void equalsCssSelector(@Named("text") String text, @Named("css") String cssSelector) {
+    LOG.info("Check text of the element (css selector: '{}') equals with '{}'", cssSelector, text);
+    Assert.assertEquals(text, home.findElement(By.cssSelector(cssSelector)).getText());
+  }
+
+  @Then("element does not exist with xpath: $xpath")
+  public void doNotFindByXPath(@Named("xpath") String xPath) {
+    try {
+      LOG.info("Check that element does not exist with xpath: {}", xPath);
+      home.findElement(By.xpath(xPath));
+      Assert.fail(String.format("Element is found. xPath: '%s'", xPath));
+    } catch (NoSuchElementException e) {
+      // success
+    }
+  }
+
+  @Then("element does not exist with xpath: $id")
+  public void doNotFindById(@Named("id") String id) {
+    try {
+      LOG.info("Check that element does not exist with id: {}", id);
+      home.findElement(By.xpath(id));
+      Assert.fail(String.format("Element is found. id: '%s'", id));
+    } catch (NoSuchElementException e) {
+      // success
+    }
+  }
+
+  @Then("element does not exist with css selector: $css")
+  public void doNotFindByCssSelector(@Named("css") String cssSelector) {
+    try {
+      LOG.info("Check that element does not exist with css selector: {}", cssSelector);
+      home.findElement(By.xpath(cssSelector));
+      Assert.fail(String.format("Element is found. css selector: '%s'", cssSelector));
+    } catch (NoSuchElementException e) {
+      // success
+    }
+  }
+
+  private void closeTourPopup() {
+    LOG.info("Close Tour popup if needed.");
+    try {
+      home.findElement(By.cssSelector("div.modal-footer > button.btn.btn-default")).click();
+    } catch (NoSuchElementException ex) {
+      // do nothing - no popup
+    }
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/steps/SolrSteps.java b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/steps/SolrSteps.java
index 7c72ca7..4420540 100644
--- a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/steps/SolrSteps.java
+++ b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/steps/SolrSteps.java
@@ -35,12 +35,12 @@
   @Then("the number of <component> docs is: <docSize>")
   public void numberOfDocsForComponent(@Named("component") String component, @Named("docSize") int docSize)
     throws IOException, SolrServerException, InterruptedException {
-    SolrClient solrClient = StoryDataRegistry.INSTANCE.getCloudSolrClient();
+    SolrClient solrClient = StoryDataRegistry.INSTANCE.getSolrClient();
     SolrQuery solrQuery = new SolrQuery();
     solrQuery.setQuery(String.format("type:%s", component));
     solrQuery.setStart(0);
     solrQuery.setRows(20);
-    QueryResponse queryResponse = solrClient.query(StoryDataRegistry.INSTANCE.getServiceLogsCollection(), solrQuery);
+    QueryResponse queryResponse = solrClient.query(solrQuery);
     SolrDocumentList list = queryResponse.getResults();
     Assert.assertEquals(docSize, list.size());
   }
diff --git a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchApiQueryStory.java b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchApiQueryStory.java
deleted file mode 100644
index 45455bf..0000000
--- a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchApiQueryStory.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.ambari.logsearch.story;
-
-public class LogSearchApiQueryStory extends LogSearchStory {
-}
diff --git a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchBackendStories.java b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchBackendStories.java
new file mode 100644
index 0000000..fa7a527
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchBackendStories.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.story;
+
+import com.google.common.base.Predicate;
+import com.google.common.collect.Collections2;
+import com.google.common.collect.Lists;
+import org.apache.ambari.logsearch.steps.LogSearchApiSteps;
+import org.apache.ambari.logsearch.steps.SolrSteps;
+import org.apache.ambari.logsearch.steps.LogSearchDockerSteps;
+import org.jbehave.core.configuration.Configuration;
+import org.jbehave.core.configuration.MostUsefulConfiguration;
+import org.jbehave.core.junit.JUnitStories;
+import org.jbehave.core.reporters.Format;
+import org.jbehave.core.reporters.StoryReporterBuilder;
+import org.jbehave.core.steps.InjectableStepsFactory;
+import org.jbehave.core.steps.InstanceStepsFactory;
+import org.junit.Test;
+
+import java.util.List;
+
+public class LogSearchBackendStories extends JUnitStories {
+
+  private static final String BACKEND_STORIES_LOCATION_PROPERTY = "backend.stories.location";
+  private static final String STORY_SUFFIX = ".story";
+
+  @Override
+  public Configuration configuration() {
+    return new MostUsefulConfiguration()
+      .useStoryLoader(LogSearchStoryLocator.getStoryLoader(BACKEND_STORIES_LOCATION_PROPERTY, this.getClass()))
+      .useStoryReporterBuilder(
+        new StoryReporterBuilder().withFailureTrace(true).withDefaultFormats().withFormats(Format.CONSOLE, Format.TXT));
+  }
+
+  @Override
+  public InjectableStepsFactory stepsFactory() {
+    return new InstanceStepsFactory(configuration(),
+      new LogSearchDockerSteps(),
+      new SolrSteps(),
+      new LogSearchApiSteps());
+  }
+
+  @Test
+  public void run() throws Throwable {
+    super.run();
+  }
+
+  @Override
+  protected List<String> storyPaths() {
+    List<String> backendStories = LogSearchStoryLocator.findStories(BACKEND_STORIES_LOCATION_PROPERTY, STORY_SUFFIX, this.getClass());
+    return Lists.newArrayList(Collections2.filter(backendStories, new Predicate<String>() {
+      @Override
+      public boolean apply(String storyFileName) {
+        return !storyFileName.endsWith("ui.story");
+      }
+    }));
+  }
+
+}
diff --git a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchStory.java b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchStory.java
deleted file mode 100644
index ce6b9cb..0000000
--- a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchStory.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.ambari.logsearch.story;
-
-import org.apache.ambari.logsearch.steps.LogSearchApiSteps;
-import org.apache.ambari.logsearch.steps.SolrSteps;
-import org.apache.ambari.logsearch.steps.LogSearchDockerSteps;
-import org.jbehave.core.configuration.Configuration;
-import org.jbehave.core.configuration.MostUsefulConfiguration;
-import org.jbehave.core.io.LoadFromClasspath;
-import org.jbehave.core.io.StoryPathResolver;
-import org.jbehave.core.io.UnderscoredCamelCaseResolver;
-import org.jbehave.core.junit.JUnitStory;
-import org.jbehave.core.reporters.Format;
-import org.jbehave.core.reporters.StoryReporterBuilder;
-import org.jbehave.core.steps.InjectableStepsFactory;
-import org.jbehave.core.steps.InstanceStepsFactory;
-import org.junit.Test;
-
-abstract public class LogSearchStory extends JUnitStory {
-  @Override
-  public Configuration configuration() {
-    StoryPathResolver storyPathResolver = new UnderscoredCamelCaseResolver(".story");
-    return new MostUsefulConfiguration()
-      .useStoryPathResolver(storyPathResolver)
-      .useStoryLoader(new LoadFromClasspath(this.getClass()))
-      .useStoryReporterBuilder(
-        new StoryReporterBuilder().withFailureTrace(true).withDefaultFormats().withFormats(Format.CONSOLE, Format.TXT));
-  }
-
-  @Override
-  public InjectableStepsFactory stepsFactory() {
-    return new InstanceStepsFactory(configuration(),
-      new LogSearchDockerSteps(),
-      new SolrSteps(),
-      new LogSearchApiSteps());
-  }
-
-  @Test
-  public void run() throws Throwable {
-    super.run();
-  }
-
-}
diff --git a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchStoryLocator.java b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchStoryLocator.java
new file mode 100644
index 0000000..bed7999
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchStoryLocator.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.story;
+
+import com.google.common.collect.Lists;
+import org.apache.commons.lang.StringUtils;
+import org.jbehave.core.io.LoadFromClasspath;
+import org.jbehave.core.io.LoadFromRelativeFile;
+import org.jbehave.core.io.StoryFinder;
+import org.jbehave.core.io.StoryLoader;
+
+import java.io.File;
+import java.net.URL;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.jbehave.core.io.CodeLocations.codeLocationFromClass;
+
+/**
+ * Helper class for loading story files from the classpath or externally - based on system properties
+ */
+public class LogSearchStoryLocator {
+
+  private LogSearchStoryLocator() {
+  }
+
+  /**
+   * Get the proper story loader based on story location property (if empty or NONE - use story loading from classpath)
+   * @param property Story location property (absolute path - folder)
+   * @param clazz Class of the *Stories object
+   */
+  public static StoryLoader getStoryLoader(String property, Class clazz) {
+    boolean useExternalStoryLocation = useExternalStoryLocation(property);
+    if (useExternalStoryLocation) {
+      try {
+        return new LoadFromRelativeFile(new URL("file://" + System.getProperty(property)));
+      } catch (Exception e) {
+        throw new RuntimeException("Cannot load story files from url: file://" + System.getProperty(property));
+      }
+    } else {
+      return new LoadFromClasspath(clazz);
+    }
+  }
+
+
+  /**
+   * Find stories based on story location property, if the property is not set or NONE, then the story files will be loaded from the classpath
+   * @param property Story location property (absolute path - folder)
+   * @param suffix Story suffix for specific stories - i.e. : .ui.story
+   * @param clazz Class of the *Stories object
+   */
+  public static List<String> findStories(String property, String suffix, Class clazz) {
+    List<String> stories = null;
+    if (useExternalStoryLocation(property)) {
+      stories = findStoriesInFolder(System.getProperty(property), suffix);
+    } else {
+      stories = new StoryFinder()
+        .findPaths(codeLocationFromClass(clazz).getFile(), Arrays.asList(String.format("**/*%s", suffix)), null);
+    }
+    return stories;
+  }
+
+  private static List<String> findStoriesInFolder(String folderAbsolutePath, String suffix) {
+    List<String> results = Lists.newArrayList();
+    File folder = new File(folderAbsolutePath);
+    File[] listOfFiles = folder.listFiles();
+    if (listOfFiles != null) {
+      for (File file : listOfFiles) {
+        if (file.getName().endsWith(suffix)) {
+          results.add(file.getName());
+        }
+      }
+    }
+    return results;
+  }
+
+  private static boolean useExternalStoryLocation(String property) {
+    String storyLocationProp = System.getProperty(property);
+    return StringUtils.isNotEmpty(storyLocationProp) && !"NONE".equals(storyLocationProp);
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchUIStories.java b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchUIStories.java
new file mode 100644
index 0000000..5417ab1
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchUIStories.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.story;
+
+import org.apache.ambari.logsearch.domain.StoryDataRegistry;
+import org.apache.ambari.logsearch.steps.LogSearchDockerSteps;
+import org.apache.ambari.logsearch.steps.LogSearchUISteps;
+import org.jbehave.core.configuration.Configuration;
+import org.jbehave.core.Embeddable;
+import org.jbehave.core.embedder.executors.SameThreadExecutors;
+import org.jbehave.core.junit.JUnitStories;
+import org.jbehave.core.reporters.StoryReporterBuilder;
+import org.jbehave.core.steps.InjectableStepsFactory;
+import org.jbehave.core.steps.InstanceStepsFactory;
+import org.jbehave.web.selenium.RemoteWebDriverProvider;
+import org.jbehave.web.selenium.SeleniumConfiguration;
+import org.jbehave.web.selenium.SeleniumContext;
+import org.jbehave.web.selenium.WebDriverProvider;
+import org.jbehave.web.selenium.WebDriverScreenshotOnFailure;
+import org.openqa.selenium.Platform;
+import org.openqa.selenium.remote.DesiredCapabilities;
+
+import java.util.Arrays;
+import java.util.List;
+
+import static org.jbehave.core.io.CodeLocations.codeLocationFromClass;
+import static org.jbehave.core.reporters.Format.CONSOLE;
+import static org.jbehave.core.reporters.Format.HTML;
+import static org.jbehave.core.reporters.Format.TXT;
+import static org.jbehave.core.reporters.Format.XML;
+
+public class LogSearchUIStories extends JUnitStories {
+
+  private WebDriverProvider driverProvider;
+  private SeleniumContext context;
+
+  private static final String UI_STORIES_LOCATION_PROPERTY = "ui.stories.location";
+  private static final String STORY_SUFFIX = ".ui.story";
+
+  public LogSearchUIStories() {
+    String dockerHost = System.getProperty("docker.host") != null ? System.getProperty("docker.host") : "localhost";
+    System.setProperty("REMOTE_WEBDRIVER_URL", String.format("http://%s:4444/wd/hub", dockerHost));
+    DesiredCapabilities capability = DesiredCapabilities.firefox();
+    capability.setPlatform(Platform.LINUX);
+    capability.setVersion("45.8.0");
+    driverProvider = new RemoteWebDriverProvider(capability);
+    StoryDataRegistry.INSTANCE.setWebDriverProvider(driverProvider);
+    context = new SeleniumContext();
+    configuredEmbedder().useExecutorService(new SameThreadExecutors().create(configuredEmbedder().embedderControls()));
+  }
+
+  @Override
+  public Configuration configuration() {
+    Class<? extends Embeddable> embeddableClass = this.getClass();
+    return new SeleniumConfiguration()
+      .useSeleniumContext(context)
+      .useWebDriverProvider(driverProvider)
+      .useStoryLoader(LogSearchStoryLocator.getStoryLoader(UI_STORIES_LOCATION_PROPERTY, this.getClass()))
+      .useStoryReporterBuilder(new StoryReporterBuilder()
+        .withCodeLocation(codeLocationFromClass(embeddableClass))
+        .withDefaultFormats()
+        .withFormats(CONSOLE, TXT, HTML, XML));
+  }
+
+  @Override
+  public InjectableStepsFactory stepsFactory() {
+    Configuration configuration = configuration();
+    return new InstanceStepsFactory(configuration, new LogSearchDockerSteps(), new LogSearchUISteps(driverProvider),
+      new WebDriverScreenshotOnFailure(driverProvider, configuration.storyReporterBuilder()));
+  }
+
+  @Override
+  protected List<String> storyPaths() {
+    return LogSearchStoryLocator.findStories(UI_STORIES_LOCATION_PROPERTY, STORY_SUFFIX, this.getClass());
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogfeederParsingStory.java b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogfeederParsingStory.java
deleted file mode 100644
index c502cc4..0000000
--- a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogfeederParsingStory.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.ambari.logsearch.story;
-
-public class LogfeederParsingStory extends LogSearchStory {
-}
diff --git a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/web/AbstractPage.java b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/web/AbstractPage.java
new file mode 100644
index 0000000..b6d0a58
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/web/AbstractPage.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.web;
+
+import org.jbehave.web.selenium.WebDriverPage;
+import org.jbehave.web.selenium.WebDriverProvider;
+
+import java.util.List;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.is;
+import static org.junit.Assert.fail;
+
+public abstract class AbstractPage extends WebDriverPage {
+
+  public AbstractPage(WebDriverProvider driverProvider) {
+    super(driverProvider);
+  }
+
+  public void found(String text) {
+    found(getPageSource(), text);
+  }
+
+  public void found(String pageSource, String text) {
+    if (!pageSource.contains(escapeHtml(text))) {
+      fail("Text: '" + text + "' not found in page '" + pageSource + "'");
+    }
+  }
+
+  public void found(List<String> texts) {
+    for (String text : texts) {
+      found(text);
+    }
+  }
+
+  public void notFound(String text) {
+    notFound(getPageSource(), text);
+  }
+
+  public void notFound(String pageSource, String text) {
+    assertThat(pageSource.contains(escapeHtml(text)), is(false));
+  }
+
+  private String escapeHtml(String text) {
+    return text.replace("<", "&lt;").replace(">", "&gt;");
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/web/Home.java b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/web/Home.java
new file mode 100644
index 0000000..6c576d4
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/web/Home.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.web;
+
+import org.apache.ambari.logsearch.domain.StoryDataRegistry;
+import org.jbehave.web.selenium.WebDriverProvider;
+
+import java.util.concurrent.TimeUnit;
+
+public class Home extends AbstractPage {
+
+  public Home(WebDriverProvider driverProvider) {
+    super(driverProvider);
+  }
+
+  public void open() {
+    get(String.format("http://%s:%d/index.html",
+      StoryDataRegistry.INSTANCE.getDockerHost(),
+      StoryDataRegistry.INSTANCE.getLogsearchPort()));
+    manage().timeouts().implicitlyWait(10, TimeUnit.SECONDS);
+  }
+
+}
diff --git a/ambari-logsearch/ambari-logsearch-it/src/test/resources/org/apache/ambari/logsearch/story/log_search_api_query_story.story b/ambari-logsearch/ambari-logsearch-it/src/test/resources/org/apache/ambari/logsearch/story/log_search_api_query_story.story
deleted file mode 100644
index 5abe8b4..0000000
--- a/ambari-logsearch/ambari-logsearch-it/src/test/resources/org/apache/ambari/logsearch/story/log_search_api_query_story.story
+++ /dev/null
@@ -1,17 +0,0 @@
-Meta:
-
-Narrative:
-As a user
-I want to perform queries against Log Search api
-So that I can validate the json outputs
-
-Scenario: scenario description
-
-Given logsearch docker container
-When LogSearch api query sent: <apiQuery>
-Then The api query result is <jsonResult>
-
-Examples:
-|apiQuery|jsonResult|
-|/api/v1/service/logs/schema/fields|service-log-schema.json|
-|/api/v1/service/logs/levels/counts/namevalues?page=0&pageSize=25&startIndex=0&q=*%3A*|service-log-level-counts-values.json|
\ No newline at end of file
diff --git a/ambari-logsearch/ambari-logsearch-it/src/test/resources/stories/backend/log_search_api_tests.story b/ambari-logsearch/ambari-logsearch-it/src/test/resources/stories/backend/log_search_api_tests.story
new file mode 100644
index 0000000..0af00f5
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-it/src/test/resources/stories/backend/log_search_api_tests.story
@@ -0,0 +1,17 @@
+Meta:
+
+Narrative:
+As a user
+I want to perform queries against Log Search api
+So that I can validate the json outputs
+
+Scenario: Log Search API JSON responses
+
+Given logsearch docker container
+When LogSearch api query sent: <apiQuery>
+Then The api query result is <jsonResult>
+
+Examples:
+|apiQuery|jsonResult|
+|/api/v1/service/logs/schema/fields|service-log-schema.json|
+|/api/v1/service/logs/levels/counts?page=0&pageSize=25&startIndex=0&q=*%3A*|service-log-level-counts-values.json|
\ No newline at end of file
diff --git a/ambari-logsearch/ambari-logsearch-it/src/test/resources/org/apache/ambari/logsearch/story/logfeeder_parsing_story.story b/ambari-logsearch/ambari-logsearch-it/src/test/resources/stories/backend/logfeeder_parsing_tests.story
similarity index 100%
rename from ambari-logsearch/ambari-logsearch-it/src/test/resources/org/apache/ambari/logsearch/story/logfeeder_parsing_story.story
rename to ambari-logsearch/ambari-logsearch-it/src/test/resources/stories/backend/logfeeder_parsing_tests.story
diff --git a/ambari-logsearch/ambari-logsearch-it/src/test/resources/stories/selenium/login.ui.story b/ambari-logsearch/ambari-logsearch-it/src/test/resources/stories/selenium/login.ui.story
new file mode 100644
index 0000000..543c211
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-it/src/test/resources/stories/selenium/login.ui.story
@@ -0,0 +1,20 @@
+Meta:
+
+Narrative:
+As a user
+I want to start LogSearch services and login to the UI
+So that I can validate the proper user
+
+Scenario: login with admin/admin
+
+Given logsearch docker container
+And open logsearch home page
+When login with admin / admin
+Then page contains text: 'Service Logs'
+
+Scenario: login with admin and wrong password
+
+Given logsearch docker container
+And open logsearch home page
+When login with admin / wrongpassword
+Then page does not contain text: 'Service Logs'
\ No newline at end of file
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/pom.xml b/ambari-logsearch/ambari-logsearch-logfeeder/pom.xml
index 25e4306..5d6f8b6 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/pom.xml
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/pom.xml
@@ -44,6 +44,11 @@
       <version>${project.version}</version>
     </dependency>
     <dependency>
+      <groupId>org.apache.ambari</groupId>
+      <artifactId>ambari-logsearch-config-zookeeper</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
       <groupId>commons-codec</groupId>
       <artifactId>commons-codec</artifactId>
     </dependency>
@@ -88,7 +93,6 @@
       <artifactId>commons-logging</artifactId>
       <version>1.1.1</version>
     </dependency>
-
     <dependency>
       <groupId>com.google.guava</groupId>
       <artifactId>guava</artifactId>
@@ -125,9 +129,9 @@
       <version>${project.version}</version>
     </dependency>
     <dependency>
-    <groupId>com.amazonaws</groupId>
-    <artifactId>aws-java-sdk-s3</artifactId>
-    <version>1.11.5</version>
+      <groupId>com.amazonaws</groupId>
+      <artifactId>aws-java-sdk-s3</artifactId>
+      <version>1.11.5</version>
     </dependency>
     <dependency>
       <groupId>org.apache.commons</groupId>
@@ -135,26 +139,40 @@
       <version>1.11</version>
     </dependency>
     <dependency>
-    <groupId>com.amazonaws</groupId>
-    <artifactId>aws-java-sdk-iam</artifactId>
-    <version>1.11.5</version>
-  </dependency>
-   <dependency>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-common</artifactId>
-    <version>${hadoop.version}</version>
-  </dependency>
-  <dependency>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-hdfs</artifactId>
-    <version>${hadoop.version}</version>
-  </dependency>
-  <dependency>
-    <groupId>commons-io</groupId>
-    <artifactId>commons-io</artifactId>
-    <version>${common.io.version}</version>
-  </dependency>
- </dependencies>
+      <groupId>com.amazonaws</groupId>
+      <artifactId>aws-java-sdk-iam</artifactId>
+      <version>1.11.5</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <version>${hadoop.version}</version>
+      <exclusions>
+        <exclusion>
+          <groupId>org.apache.curator</groupId>
+          <artifactId>curator-framework</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.curator</groupId>
+          <artifactId>curator-client</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.curator</groupId>
+          <artifactId>curator-recipes</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <version>${hadoop.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>commons-io</groupId>
+      <artifactId>commons-io</artifactId>
+      <version>${common.io.version}</version>
+    </dependency>
+  </dependencies>
   <build>
     <finalName>LogFeeder</finalName>
     <pluginManagement>
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/LogFeeder.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/LogFeeder.java
index a47c71f..8d7c69f 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/LogFeeder.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/LogFeeder.java
@@ -19,60 +19,37 @@
 
 package org.apache.ambari.logfeeder;
 
-import java.io.BufferedInputStream;
-import java.io.File;
-import java.lang.reflect.Type;
 import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
 import java.util.List;
-import java.util.Map;
-import java.util.Set;
 
-import org.apache.ambari.logfeeder.filter.Filter;
-import org.apache.ambari.logfeeder.input.Input;
+import org.apache.ambari.logfeeder.common.ConfigHandler;
+import org.apache.ambari.logsearch.config.api.LogSearchConfig;
+import org.apache.ambari.logsearch.config.api.LogSearchConfigFactory;
+import org.apache.ambari.logsearch.config.api.LogSearchConfig.Component;
+import org.apache.ambari.logsearch.config.zookeeper.LogSearchConfigZK;
+import org.apache.ambari.logfeeder.input.InputConfigUploader;
 import org.apache.ambari.logfeeder.input.InputManager;
-import org.apache.ambari.logfeeder.input.InputSimulate;
-import org.apache.ambari.logfeeder.logconfig.LogConfigHandler;
+import org.apache.ambari.logfeeder.loglevelfilter.LogLevelFilterHandler;
 import org.apache.ambari.logfeeder.metrics.MetricData;
 import org.apache.ambari.logfeeder.metrics.MetricsManager;
-import org.apache.ambari.logfeeder.output.Output;
-import org.apache.ambari.logfeeder.output.OutputManager;
-import org.apache.ambari.logfeeder.util.AliasUtil;
-import org.apache.ambari.logfeeder.util.FileUtil;
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
 import org.apache.ambari.logfeeder.util.SSLUtil;
-import org.apache.commons.collections.CollectionUtils;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.ambari.logfeeder.util.AliasUtil.AliasType;
+import com.google.common.collect.Maps;
 import org.apache.hadoop.util.ShutdownHookManager;
-import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 
-import com.google.gson.reflect.TypeToken;
-
 public class LogFeeder {
   private static final Logger LOG = Logger.getLogger(LogFeeder.class);
 
   private static final int LOGFEEDER_SHUTDOWN_HOOK_PRIORITY = 30;
   private static final int CHECKPOINT_CLEAN_INTERVAL_MS = 24 * 60 * 60 * 60 * 1000; // 24 hours
 
-  private OutputManager outputManager = new OutputManager();
+  private ConfigHandler configHandler = new ConfigHandler();
+  private LogSearchConfig config;
+  
   private InputManager inputManager = new InputManager();
   private MetricsManager metricsManager = new MetricsManager();
 
-  public static Map<String, Object> globalConfigs = new HashMap<>();
-
-  private List<Map<String, Object>> inputConfigList = new ArrayList<>();
-  private List<Map<String, Object>> filterConfigList = new ArrayList<>();
-  private List<Map<String, Object>> outputConfigList = new ArrayList<>();
-  
   private long lastCheckPointCleanedMS = 0;
   private boolean isLogfeederCompleted = false;
   private Thread statLoggerThread = null;
@@ -91,329 +68,23 @@
   }
 
   private void init() throws Throwable {
-    Date startTime = new Date();
+    long startTime = System.currentTimeMillis();
 
-    loadConfigFiles();
-    addSimulatedInputs();
-    mergeAllConfigs();
-    
+    configHandler.init();
     SSLUtil.ensureStorePasswords();
     
-    outputManager.init();
-    inputManager.init();
-    metricsManager.init();
+    config = LogSearchConfigFactory.createLogSearchConfig(Component.LOGFEEDER,
+        Maps.fromProperties(LogFeederUtil.getProperties()), LogSearchConfigZK.class);
+    LogLevelFilterHandler.init(config);
+    InputConfigUploader.load(config);
+    config.monitorInputConfigChanges(configHandler, new LogLevelFilterHandler());
     
-    LogConfigHandler.handleConfig();
+    metricsManager.init();
     
     LOG.debug("==============");
     
-    Date endTime = new Date();
-    LOG.info("Took " + (endTime.getTime() - startTime.getTime()) + " ms to initialize");
-  }
-
-  private void loadConfigFiles() throws Exception {
-    List<String> configFiles = getConfigFiles();
-    for (String configFileName : configFiles) {
-      LOG.info("Going to load config file:" + configFileName);
-      configFileName = configFileName.replace("\\ ", "%20");
-      File configFile = new File(configFileName);
-      if (configFile.exists() && configFile.isFile()) {
-        LOG.info("Config file exists in path." + configFile.getAbsolutePath());
-        loadConfigsUsingFile(configFile);
-      } else {
-        LOG.info("Trying to load config file from classloader: " + configFileName);
-        loadConfigsUsingClassLoader(configFileName);
-        LOG.info("Loaded config file from classloader: " + configFileName);
-      }
-    }
-  }
-
-  private List<String> getConfigFiles() {
-    List<String> configFiles = new ArrayList<>();
-    
-    String logfeederConfigFilesProperty = LogFeederUtil.getStringProperty("logfeeder.config.files");
-    LOG.info("logfeeder.config.files=" + logfeederConfigFilesProperty);
-    if (logfeederConfigFilesProperty != null) {
-      configFiles.addAll(Arrays.asList(logfeederConfigFilesProperty.split(",")));
-    }
-
-    String inputConfigDir = LogFeederUtil.getStringProperty("input_config_dir");
-    if (StringUtils.isNotEmpty(inputConfigDir)) {
-      File configDirFile = new File(inputConfigDir);
-      List<File> inputConfigFiles = FileUtil.getAllFileFromDir(configDirFile, "json", false);
-      for (File inputConfigFile : inputConfigFiles) {
-        configFiles.add(inputConfigFile.getAbsolutePath());
-      }
-    }
-    
-    if (CollectionUtils.isEmpty(configFiles)) {
-      String configFileProperty = LogFeederUtil.getStringProperty("config.file", "config.json");
-      configFiles.addAll(Arrays.asList(configFileProperty.split(",")));
-    }
-    
-    return configFiles;
-  }
-
-  private void loadConfigsUsingFile(File configFile) throws Exception {
-    try {
-      String configData = FileUtils.readFileToString(configFile);
-      loadConfigs(configData);
-    } catch (Exception t) {
-      LOG.error("Error opening config file. configFilePath=" + configFile.getAbsolutePath());
-      throw t;
-    }
-  }
-
-  private void loadConfigsUsingClassLoader(String configFileName) throws Exception {
-    try (BufferedInputStream fis = (BufferedInputStream) this.getClass().getClassLoader().getResourceAsStream(configFileName)) {
-      String configData = IOUtils.toString(fis);
-      loadConfigs(configData);
-    }
-  }
-
-  @SuppressWarnings("unchecked")
-  private void loadConfigs(String configData) throws Exception {
-    Type type = new TypeToken<Map<String, Object>>() {}.getType();
-    Map<String, Object> configMap = LogFeederUtil.getGson().fromJson(configData, type);
-
-    // Get the globals
-    for (String key : configMap.keySet()) {
-      switch (key) {
-        case "global" :
-          globalConfigs.putAll((Map<String, Object>) configMap.get(key));
-          break;
-        case "input" :
-          List<Map<String, Object>> inputConfig = (List<Map<String, Object>>) configMap.get(key);
-          inputConfigList.addAll(inputConfig);
-          break;
-        case "filter" :
-          List<Map<String, Object>> filterConfig = (List<Map<String, Object>>) configMap.get(key);
-          filterConfigList.addAll(filterConfig);
-          break;
-        case "output" :
-          List<Map<String, Object>> outputConfig = (List<Map<String, Object>>) configMap.get(key);
-          outputConfigList.addAll(outputConfig);
-          break;
-        default :
-          LOG.warn("Unknown config key: " + key);
-      }
-    }
-  }
-  
-  private void addSimulatedInputs() {
-    int simulatedInputNumber = LogFeederUtil.getIntProperty("logfeeder.simulate.input_number", 0);
-    if (simulatedInputNumber == 0)
-      return;
-    
-    InputSimulate.loadTypeToFilePath(inputConfigList);
-    inputConfigList.clear();
-    
-    for (int i = 0; i < simulatedInputNumber; i++) {
-      HashMap<String, Object> mapList = new HashMap<String, Object>();
-      mapList.put("source", "simulate");
-      mapList.put("rowtype", "service");
-      inputConfigList.add(mapList);
-    }
-  }
-
-  private void mergeAllConfigs() {
-    loadOutputs();
-    loadInputs();
-    loadFilters();
-    
-    assignOutputsToInputs();
-  }
-
-  private void loadOutputs() {
-    for (Map<String, Object> map : outputConfigList) {
-      if (map == null) {
-        continue;
-      }
-      mergeBlocks(globalConfigs, map);
-
-      String value = (String) map.get("destination");
-      if (StringUtils.isEmpty(value)) {
-        LOG.error("Output block doesn't have destination element");
-        continue;
-      }
-      Output output = (Output) AliasUtil.getClassInstance(value, AliasType.OUTPUT);
-      if (output == null) {
-        LOG.error("Output object could not be found");
-        continue;
-      }
-      output.setDestination(value);
-      output.loadConfig(map);
-
-      // We will only check for is_enabled out here. Down below we will check whether this output is enabled for the input
-      if (output.getBooleanValue("is_enabled", true)) {
-        output.logConfigs(Level.INFO);
-        outputManager.add(output);
-      } else {
-        LOG.info("Output is disabled. So ignoring it. " + output.getShortDescription());
-      }
-    }
-  }
-
-  private void loadInputs() {
-    for (Map<String, Object> map : inputConfigList) {
-      if (map == null) {
-        continue;
-      }
-      mergeBlocks(globalConfigs, map);
-
-      String value = (String) map.get("source");
-      if (StringUtils.isEmpty(value)) {
-        LOG.error("Input block doesn't have source element");
-        continue;
-      }
-      Input input = (Input) AliasUtil.getClassInstance(value, AliasType.INPUT);
-      if (input == null) {
-        LOG.error("Input object could not be found");
-        continue;
-      }
-      input.setType(value);
-      input.loadConfig(map);
-
-      if (input.isEnabled()) {
-        input.setOutputManager(outputManager);
-        input.setInputManager(inputManager);
-        inputManager.add(input);
-        input.logConfigs(Level.INFO);
-      } else {
-        LOG.info("Input is disabled. So ignoring it. " + input.getShortDescription());
-      }
-    }
-  }
-
-  private void loadFilters() {
-    sortFilters();
-
-    List<Input> toRemoveInputList = new ArrayList<Input>();
-    for (Input input : inputManager.getInputList()) {
-      for (Map<String, Object> map : filterConfigList) {
-        if (map == null) {
-          continue;
-        }
-        mergeBlocks(globalConfigs, map);
-
-        String value = (String) map.get("filter");
-        if (StringUtils.isEmpty(value)) {
-          LOG.error("Filter block doesn't have filter element");
-          continue;
-        }
-        Filter filter = (Filter) AliasUtil.getClassInstance(value, AliasType.FILTER);
-        if (filter == null) {
-          LOG.error("Filter object could not be found");
-          continue;
-        }
-        filter.loadConfig(map);
-        filter.setInput(input);
-
-        if (filter.isEnabled()) {
-          filter.setOutputManager(outputManager);
-          input.addFilter(filter);
-          filter.logConfigs(Level.INFO);
-        } else {
-          LOG.debug("Ignoring filter " + filter.getShortDescription() + " for input " + input.getShortDescription());
-        }
-      }
-      
-      if (input.getFirstFilter() == null) {
-        toRemoveInputList.add(input);
-      }
-    }
-
-    for (Input toRemoveInput : toRemoveInputList) {
-      LOG.warn("There are no filters, we will ignore this input. " + toRemoveInput.getShortDescription());
-      inputManager.removeInput(toRemoveInput);
-    }
-  }
-
-  private void sortFilters() {
-    Collections.sort(filterConfigList, new Comparator<Map<String, Object>>() {
-
-      @Override
-      public int compare(Map<String, Object> o1, Map<String, Object> o2) {
-        Object o1Sort = o1.get("sort_order");
-        Object o2Sort = o2.get("sort_order");
-        if (o1Sort == null || o2Sort == null) {
-          return 0;
-        }
-        
-        int o1Value = parseSort(o1, o1Sort);
-        int o2Value = parseSort(o2, o2Sort);
-        
-        return o1Value - o2Value;
-      }
-
-      private int parseSort(Map<String, Object> map, Object o) {
-        if (!(o instanceof Number)) {
-          try {
-            return (new Double(Double.parseDouble(o.toString()))).intValue();
-          } catch (Throwable t) {
-            LOG.error("Value is not of type Number. class=" + o.getClass().getName() + ", value=" + o.toString()
-              + ", map=" + map.toString());
-            return 0;
-          }
-        } else {
-          return ((Number) o).intValue();
-        }
-      }
-    });
-  }
-
-  private void assignOutputsToInputs() {
-    Set<Output> usedOutputSet = new HashSet<Output>();
-    for (Input input : inputManager.getInputList()) {
-      for (Output output : outputManager.getOutputs()) {
-        if (LogFeederUtil.isEnabled(output.getConfigs(), input.getConfigs())) {
-          usedOutputSet.add(output);
-          input.addOutput(output);
-        }
-      }
-    }
-    
-    // In case of simulation copies of the output are added for each simulation instance, these must be added to the manager
-    for (Output output : InputSimulate.getSimulateOutputs()) {
-      outputManager.add(output);
-      usedOutputSet.add(output);
-    }
-    
-    outputManager.retainUsedOutputs(usedOutputSet);
-  }
-
-  @SuppressWarnings("unchecked")
-  private void mergeBlocks(Map<String, Object> fromMap, Map<String, Object> toMap) {
-    for (String key : fromMap.keySet()) {
-      Object objValue = fromMap.get(key);
-      if (objValue == null) {
-        continue;
-      }
-      if (objValue instanceof Map) {
-        Map<String, Object> globalFields = LogFeederUtil.cloneObject((Map<String, Object>) objValue);
-
-        Map<String, Object> localFields = (Map<String, Object>) toMap.get(key);
-        if (localFields == null) {
-          localFields = new HashMap<String, Object>();
-          toMap.put(key, localFields);
-        }
-
-        if (globalFields != null) {
-          for (String fieldKey : globalFields.keySet()) {
-            if (!localFields.containsKey(fieldKey)) {
-              localFields.put(fieldKey, globalFields.get(fieldKey));
-            }
-          }
-        }
-      }
-    }
-
-    // Let's add the rest of the top level fields if missing
-    for (String key : fromMap.keySet()) {
-      if (!toMap.containsKey(key)) {
-        toMap.put(key, fromMap.get(key));
-      }
-    }
+    long endTime = System.currentTimeMillis();
+    LOG.info("Took " + (endTime - startTime) + " ms to initialize");
   }
 
   private class JVMShutdownHook extends Thread {
@@ -422,10 +93,8 @@
       try {
         LOG.info("Processing is shutting down.");
 
-        inputManager.close();
-        outputManager.close();
-        inputManager.checkInAll();
-
+        configHandler.close();
+        config.close();
         logStats();
 
         LOG.info("LogSearch is exiting.");
@@ -436,7 +105,6 @@
   }
 
   private void monitor() throws Exception {
-    inputManager.monitor();
     JVMShutdownHook logfeederJVMHook = new JVMShutdownHook();
     ShutdownHookManager.get().addShutdownHook(logfeederJVMHook, LOGFEEDER_SHUTDOWN_HOOK_PRIORITY);
     
@@ -458,7 +126,7 @@
 
           if (System.currentTimeMillis() > (lastCheckPointCleanedMS + CHECKPOINT_CLEAN_INTERVAL_MS)) {
             lastCheckPointCleanedMS = System.currentTimeMillis();
-            inputManager.cleanCheckPointFiles();
+            configHandler.cleanCheckPointFiles();
           }
 
           if (isLogfeederCompleted) {
@@ -474,13 +142,11 @@
   }
 
   private void logStats() {
-    inputManager.logStats();
-    outputManager.logStats();
+    configHandler.logStats();
 
     if (metricsManager.isMetricsEnabled()) {
       List<MetricData> metricsList = new ArrayList<MetricData>();
-      inputManager.addMetricsContainers(metricsList);
-      outputManager.addMetricsContainers(metricsList);
+      configHandler.addMetrics(metricsList);
       metricsManager.useMetrics(metricsList);
     }
   }
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/ConfigBlock.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/ConfigBlock.java
index 68897e8..cfcc199 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/ConfigBlock.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/ConfigBlock.java
@@ -20,54 +20,19 @@
 package org.apache.ambari.logfeeder.common;
 
 import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
 
-import org.apache.ambari.logfeeder.metrics.MetricData;
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
-import org.apache.commons.collections.MapUtils;
 import org.apache.commons.lang3.StringUtils;
-import org.apache.log4j.Logger;
 import org.apache.log4j.Priority;
 
 
-public abstract class ConfigBlock {
-  private static final Logger LOG = Logger.getLogger(ConfigBlock.class);
-
-  private boolean drain = false;
-
+public abstract class ConfigBlock extends ConfigItem {
   protected Map<String, Object> configs;
   protected Map<String, String> contextFields = new HashMap<String, String>();
-  public MetricData statMetric = new MetricData(getStatMetricName(), false);
-  protected String getStatMetricName() {
-    return null;
-  }
-  
   public ConfigBlock() {
   }
 
-  /**
-   * Used while logging. Keep it short and meaningful
-   */
-  public abstract String getShortDescription();
-
-  /**
-   * Every implementor need to give name to the thread they create
-   */
-  public String getNameForThread() {
-    return this.getClass().getSimpleName();
-  }
-
-  public void addMetricsContainers(List<MetricData> metricsList) {
-    metricsList.add(statMetric);
-  }
-
-  /**
-   * This method needs to be overwritten by deriving classes.
-   */
-  public void init() throws Exception {
-  }
-
   public void loadConfig(Map<String, Object> map) {
     configs = LogFeederUtil.cloneObject(map);
 
@@ -81,46 +46,6 @@
     return configs;
   }
 
-  @SuppressWarnings("unchecked")
-  public boolean isEnabled() {
-    boolean isEnabled = getBooleanValue("is_enabled", true);
-    if (isEnabled) {
-      // Let's check for static conditions
-      Map<String, Object> conditions = (Map<String, Object>) configs.get("conditions");
-      boolean allow = true;
-      if (MapUtils.isNotEmpty(conditions)) {
-        allow = false;
-        for (String conditionType : conditions.keySet()) {
-          if (conditionType.equalsIgnoreCase("fields")) {
-            Map<String, Object> fields = (Map<String, Object>) conditions.get("fields");
-            for (String fieldName : fields.keySet()) {
-              Object values = fields.get(fieldName);
-              if (values instanceof String) {
-                allow = isFieldConditionMatch(fieldName, (String) values);
-              } else {
-                List<String> listValues = (List<String>) values;
-                for (String stringValue : listValues) {
-                  allow = isFieldConditionMatch(fieldName, stringValue);
-                  if (allow) {
-                    break;
-                  }
-                }
-              }
-              if (allow) {
-                break;
-              }
-            }
-          }
-          if (allow) {
-            break;
-          }
-        }
-        isEnabled = allow;
-      }
-    }
-    return isEnabled;
-  }
-
   public boolean isFieldConditionMatch(String fieldName, String stringValue) {
     boolean allow = false;
     String fieldValue = (String) configs.get(fieldName);
@@ -207,27 +132,17 @@
     return retValue;
   }
 
+  @Override
+  public boolean isEnabled() {
+    return getBooleanValue("is_enabled", true);
+  }
+
   public Map<String, String> getContextFields() {
     return contextFields;
   }
 
-  public void incrementStat(int count) {
-    statMetric.value += count;
-  }
-
-  public void logStatForMetric(MetricData metric, String prefixStr) {
-    LogFeederUtil.logStatForMetric(metric, prefixStr, ", key=" + getShortDescription());
-  }
-
-  public synchronized void logStat() {
-    logStatForMetric(statMetric, "Stat");
-  }
-
   public boolean logConfigs(Priority level) {
-    if (level.toInt() == Priority.INFO_INT && !LOG.isInfoEnabled()) {
-      return false;
-    }
-    if (level.toInt() == Priority.DEBUG_INT && !LOG.isDebugEnabled()) {
+    if (!super.logConfigs(level)) {
       return false;
     }
     LOG.log(level, "Printing configuration Block=" + getShortDescription());
@@ -235,12 +150,4 @@
     LOG.log(level, "contextFields=" + contextFields);
     return true;
   }
-
-  public boolean isDrain() {
-    return drain;
-  }
-
-  public void setDrain(boolean drain) {
-    this.drain = drain;
-  }
 }
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/ConfigHandler.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/ConfigHandler.java
new file mode 100644
index 0000000..726ff27
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/ConfigHandler.java
@@ -0,0 +1,420 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logfeeder.common;
+
+import java.io.BufferedInputStream;
+import java.io.File;
+import java.lang.reflect.Type;
+import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.ambari.logfeeder.filter.Filter;
+import org.apache.ambari.logfeeder.input.Input;
+import org.apache.ambari.logfeeder.input.InputManager;
+import org.apache.ambari.logfeeder.input.InputSimulate;
+import org.apache.ambari.logfeeder.metrics.MetricData;
+import org.apache.ambari.logfeeder.output.Output;
+import org.apache.ambari.logfeeder.output.OutputManager;
+import org.apache.ambari.logfeeder.util.AliasUtil;
+import org.apache.ambari.logfeeder.util.FileUtil;
+import org.apache.ambari.logfeeder.util.LogFeederUtil;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang.BooleanUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.ambari.logfeeder.util.AliasUtil.AliasType;
+import org.apache.ambari.logsearch.config.api.InputConfigMonitor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputConfig;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.FilterDescriptorImpl;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputConfigImpl;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputDescriptorImpl;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+
+import com.google.gson.reflect.TypeToken;
+
+public class ConfigHandler implements InputConfigMonitor {
+  private static final Logger LOG = Logger.getLogger(ConfigHandler.class);
+
+  private final OutputManager outputManager = new OutputManager();
+  private final InputManager inputManager = new InputManager();
+
+  private final Map<String, Object> globalConfigs = new HashMap<>();
+  private final List<String> globalConfigJsons = new ArrayList<String>();
+
+  private final List<InputDescriptor> inputConfigList = new ArrayList<>();
+  private final List<FilterDescriptor> filterConfigList = new ArrayList<>();
+  private final List<Map<String, Object>> outputConfigList = new ArrayList<>();
+  
+  private boolean simulateMode = false;
+  
+  public ConfigHandler() {}
+  
+  public void init() throws Exception {
+    loadConfigFiles();
+    loadOutputs();
+    simulateIfNeeded();
+    
+    inputManager.init();
+    outputManager.init();
+  }
+  
+  private void loadConfigFiles() throws Exception {
+    List<String> configFiles = getConfigFiles();
+    for (String configFileName : configFiles) {
+      LOG.info("Going to load config file:" + configFileName);
+      configFileName = configFileName.replace("\\ ", "%20");
+      File configFile = new File(configFileName);
+      if (configFile.exists() && configFile.isFile()) {
+        LOG.info("Config file exists in path." + configFile.getAbsolutePath());
+        loadConfigsUsingFile(configFile);
+      } else {
+        LOG.info("Trying to load config file from classloader: " + configFileName);
+        loadConfigsUsingClassLoader(configFileName);
+        LOG.info("Loaded config file from classloader: " + configFileName);
+      }
+    }
+  }
+
+  private List<String> getConfigFiles() {
+    List<String> configFiles = new ArrayList<>();
+    
+    String logfeederConfigFilesProperty = LogFeederUtil.getStringProperty("logfeeder.config.files");
+    LOG.info("logfeeder.config.files=" + logfeederConfigFilesProperty);
+    if (logfeederConfigFilesProperty != null) {
+      configFiles.addAll(Arrays.asList(logfeederConfigFilesProperty.split(",")));
+    }
+
+    String inputConfigDir = LogFeederUtil.getStringProperty("input_config_dir");
+    if (StringUtils.isNotEmpty(inputConfigDir)) {
+      File configDirFile = new File(inputConfigDir);
+      List<File> inputConfigFiles = FileUtil.getAllFileFromDir(configDirFile, "json", false);
+      for (File inputConfigFile : inputConfigFiles) {
+        configFiles.add(inputConfigFile.getAbsolutePath());
+      }
+    }
+    
+    if (CollectionUtils.isEmpty(configFiles)) {
+      String configFileProperty = LogFeederUtil.getStringProperty("config.file", "config.json");
+      configFiles.addAll(Arrays.asList(configFileProperty.split(",")));
+    }
+    
+    return configFiles;
+  }
+
+  private void loadConfigsUsingFile(File configFile) throws Exception {
+    try {
+      String configData = FileUtils.readFileToString(configFile, Charset.defaultCharset());
+      loadConfigs(configData);
+    } catch (Exception t) {
+      LOG.error("Error opening config file. configFilePath=" + configFile.getAbsolutePath());
+      throw t;
+    }
+  }
+
+  private void loadConfigsUsingClassLoader(String configFileName) throws Exception {
+    try (BufferedInputStream fis = (BufferedInputStream) this.getClass().getClassLoader().getResourceAsStream(configFileName)) {
+      String configData = IOUtils.toString(fis, Charset.defaultCharset());
+      loadConfigs(configData);
+    }
+  }
+  
+  @Override
+  public void loadInputConfigs(String serviceName, InputConfig inputConfig) throws Exception {
+    inputConfigList.clear();
+    filterConfigList.clear();
+    
+    inputConfigList.addAll(inputConfig.getInput());
+    filterConfigList.addAll(inputConfig.getFilter());
+    
+    if (simulateMode) {
+      InputSimulate.loadTypeToFilePath(inputConfigList);
+    } else {
+      loadInputs(serviceName);
+      loadFilters(serviceName);
+      assignOutputsToInputs(serviceName);
+      
+      inputManager.startInputs(serviceName);
+    }
+  }
+
+  @Override
+  public void removeInputs(String serviceName) {
+    inputManager.removeInputsForService(serviceName);
+  }
+
+  @SuppressWarnings("unchecked")
+  public void loadConfigs(String configData) throws Exception {
+    Type type = new TypeToken<Map<String, Object>>() {}.getType();
+    Map<String, Object> configMap = LogFeederUtil.getGson().fromJson(configData, type);
+
+    // Get the globals
+    for (String key : configMap.keySet()) {
+      switch (key) {
+        case "global" :
+          globalConfigs.putAll((Map<String, Object>) configMap.get(key));
+          globalConfigJsons.add(configData);
+          break;
+        case "output" :
+          List<Map<String, Object>> outputConfig = (List<Map<String, Object>>) configMap.get(key);
+          outputConfigList.addAll(outputConfig);
+          break;
+        default :
+          LOG.warn("Unknown config key: " + key);
+      }
+    }
+  }
+  
+  @Override
+  public List<String> getGlobalConfigJsons() {
+    return globalConfigJsons;
+  }
+  
+  private void simulateIfNeeded() throws Exception {
+    int simulatedInputNumber = LogFeederUtil.getIntProperty("logfeeder.simulate.input_number", 0);
+    if (simulatedInputNumber == 0)
+      return;
+    
+    InputConfigImpl simulateInputConfig = new InputConfigImpl();
+    List<InputDescriptorImpl> inputConfigDescriptors = new ArrayList<>();
+    simulateInputConfig.setInput(inputConfigDescriptors);
+    simulateInputConfig.setFilter(new ArrayList<FilterDescriptorImpl>());
+    for (int i = 0; i < simulatedInputNumber; i++) {
+      InputDescriptorImpl inputDescriptor = new InputDescriptorImpl() {};
+      inputDescriptor.setSource("simulate");
+      inputDescriptor.setRowtype("service");
+      inputDescriptor.setAddFields(new HashMap<String, String>());
+      inputConfigDescriptors.add(inputDescriptor);
+    }
+    
+    loadInputConfigs("Simulation", simulateInputConfig);
+    
+    simulateMode = true;
+  }
+
+  private void loadOutputs() {
+    for (Map<String, Object> map : outputConfigList) {
+      if (map == null) {
+        continue;
+      }
+      mergeBlocks(globalConfigs, map);
+
+      String value = (String) map.get("destination");
+      if (StringUtils.isEmpty(value)) {
+        LOG.error("Output block doesn't have destination element");
+        continue;
+      }
+      Output output = (Output) AliasUtil.getClassInstance(value, AliasType.OUTPUT);
+      if (output == null) {
+        LOG.error("Output object could not be found");
+        continue;
+      }
+      output.setDestination(value);
+      output.loadConfig(map);
+
+      // We will only check for is_enabled out here. Down below we will check whether this output is enabled for the input
+      if (output.isEnabled()) {
+        output.logConfigs(Level.INFO);
+        outputManager.add(output);
+      } else {
+        LOG.info("Output is disabled. So ignoring it. " + output.getShortDescription());
+      }
+    }
+  }
+
+  private void loadInputs(String serviceName) {
+    for (InputDescriptor inputDescriptor : inputConfigList) {
+      if (inputDescriptor == null) {
+        continue;
+      }
+
+      String source = (String) inputDescriptor.getSource();
+      if (StringUtils.isEmpty(source)) {
+        LOG.error("Input block doesn't have source element");
+        continue;
+      }
+      Input input = (Input) AliasUtil.getClassInstance(source, AliasType.INPUT);
+      if (input == null) {
+        LOG.error("Input object could not be found");
+        continue;
+      }
+      input.setType(source);
+      input.loadConfig(inputDescriptor);
+
+      if (input.isEnabled()) {
+        input.setOutputManager(outputManager);
+        input.setInputManager(inputManager);
+        inputManager.add(serviceName, input);
+        input.logConfigs(Level.INFO);
+      } else {
+        LOG.info("Input is disabled. So ignoring it. " + input.getShortDescription());
+      }
+    }
+  }
+
+  private void loadFilters(String serviceName) {
+    sortFilters();
+
+    List<Input> toRemoveInputList = new ArrayList<Input>();
+    for (Input input : inputManager.getInputList(serviceName)) {
+      for (FilterDescriptor filterDescriptor : filterConfigList) {
+        if (filterDescriptor == null) {
+          continue;
+        }
+        if (BooleanUtils.isFalse(filterDescriptor.isEnabled())) {
+          LOG.debug("Ignoring filter " + filterDescriptor.getFilter() + " because it is disabled");
+          continue;
+        }
+        if (!input.isFilterRequired(filterDescriptor)) {
+          LOG.debug("Ignoring filter " + filterDescriptor.getFilter() + " for input " + input.getShortDescription());
+          continue;
+        }
+
+        String value = filterDescriptor.getFilter();
+        if (StringUtils.isEmpty(value)) {
+          LOG.error("Filter block doesn't have filter element");
+          continue;
+        }
+        Filter filter = (Filter) AliasUtil.getClassInstance(value, AliasType.FILTER);
+        if (filter == null) {
+          LOG.error("Filter object could not be found");
+          continue;
+        }
+        filter.loadConfig(filterDescriptor);
+        filter.setInput(input);
+
+        filter.setOutputManager(outputManager);
+        input.addFilter(filter);
+        filter.logConfigs(Level.INFO);
+      }
+      
+      if (input.getFirstFilter() == null) {
+        toRemoveInputList.add(input);
+      }
+    }
+
+    for (Input toRemoveInput : toRemoveInputList) {
+      LOG.warn("There are no filters, we will ignore this input. " + toRemoveInput.getShortDescription());
+      inputManager.removeInput(toRemoveInput);
+    }
+  }
+
+  private void sortFilters() {
+    Collections.sort(filterConfigList, new Comparator<FilterDescriptor>() {
+      @Override
+      public int compare(FilterDescriptor o1, FilterDescriptor o2) {
+        Integer o1Sort = o1.getSortOrder();
+        Integer o2Sort = o2.getSortOrder();
+        if (o1Sort == null || o2Sort == null) {
+          return 0;
+        }
+        
+        return o1Sort - o2Sort;
+      }
+    } );
+  }
+
+  private void assignOutputsToInputs(String serviceName) {
+    Set<Output> usedOutputSet = new HashSet<Output>();
+    for (Input input : inputManager.getInputList(serviceName)) {
+      for (Output output : outputManager.getOutputs()) {
+        if (input.isOutputRequired(output)) {
+          usedOutputSet.add(output);
+          input.addOutput(output);
+        }
+      }
+    }
+    
+    // In case of simulation copies of the output are added for each simulation instance, these must be added to the manager
+    for (Output output : InputSimulate.getSimulateOutputs()) {
+      outputManager.add(output);
+      usedOutputSet.add(output);
+    }
+  }
+
+  @SuppressWarnings("unchecked")
+  private void mergeBlocks(Map<String, Object> fromMap, Map<String, Object> toMap) {
+    for (String key : fromMap.keySet()) {
+      Object objValue = fromMap.get(key);
+      if (objValue == null) {
+        continue;
+      }
+      if (objValue instanceof Map) {
+        Map<String, Object> globalFields = LogFeederUtil.cloneObject((Map<String, Object>) objValue);
+
+        Map<String, Object> localFields = (Map<String, Object>) toMap.get(key);
+        if (localFields == null) {
+          localFields = new HashMap<String, Object>();
+          toMap.put(key, localFields);
+        }
+
+        if (globalFields != null) {
+          for (String fieldKey : globalFields.keySet()) {
+            if (!localFields.containsKey(fieldKey)) {
+              localFields.put(fieldKey, globalFields.get(fieldKey));
+            }
+          }
+        }
+      }
+    }
+
+    // Let's add the rest of the top level fields if missing
+    for (String key : fromMap.keySet()) {
+      if (!toMap.containsKey(key)) {
+        toMap.put(key, fromMap.get(key));
+      }
+    }
+  }
+
+  public void cleanCheckPointFiles() {
+    inputManager.cleanCheckPointFiles();
+  }
+
+  public void logStats() {
+    inputManager.logStats();
+    outputManager.logStats();
+  }
+  
+  public void addMetrics(List<MetricData> metricsList) {
+    inputManager.addMetricsContainers(metricsList);
+    outputManager.addMetricsContainers(metricsList);
+  }
+
+  public void waitOnAllInputs() {
+    inputManager.waitOnAllInputs();
+  }
+
+  public void close() {
+    inputManager.close();
+    outputManager.close();
+    inputManager.checkInAll();
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/ConfigItem.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/ConfigItem.java
new file mode 100644
index 0000000..5c20a8e
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/common/ConfigItem.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logfeeder.common;
+
+import java.util.List;
+
+import org.apache.ambari.logfeeder.metrics.MetricData;
+import org.apache.ambari.logfeeder.util.LogFeederUtil;
+import org.apache.log4j.Logger;
+import org.apache.log4j.Priority;
+
+public abstract class ConfigItem {
+
+  protected static final Logger LOG = Logger.getLogger(ConfigBlock.class);
+  private boolean drain = false;
+  public MetricData statMetric = new MetricData(getStatMetricName(), false);
+
+  public ConfigItem() {
+    super();
+  }
+
+  protected String getStatMetricName() {
+    return null;
+  }
+
+  /**
+   * Used while logging. Keep it short and meaningful
+   */
+  public abstract String getShortDescription();
+
+  /**
+   * Every implementor need to give name to the thread they create
+   */
+  public String getNameForThread() {
+    return this.getClass().getSimpleName();
+  }
+
+  public void addMetricsContainers(List<MetricData> metricsList) {
+    metricsList.add(statMetric);
+  }
+
+  /**
+   * This method needs to be overwritten by deriving classes.
+   */
+  public void init() throws Exception {
+  }
+
+  public abstract boolean isEnabled();
+
+  public void incrementStat(int count) {
+    statMetric.value += count;
+  }
+
+  public void logStatForMetric(MetricData metric, String prefixStr) {
+    LogFeederUtil.logStatForMetric(metric, prefixStr, ", key=" + getShortDescription());
+  }
+
+  public synchronized void logStat() {
+    logStatForMetric(statMetric, "Stat");
+  }
+
+  public boolean logConfigs(Priority level) {
+    if (level.toInt() == Priority.INFO_INT && !LOG.isInfoEnabled()) {
+      return false;
+    }
+    if (level.toInt() == Priority.DEBUG_INT && !LOG.isDebugEnabled()) {
+      return false;
+    }
+    return true;
+  }
+
+  public boolean isDrain() {
+    return drain;
+  }
+
+  public void setDrain(boolean drain) {
+    this.drain = drain;
+  }
+
+}
\ No newline at end of file
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/Filter.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/Filter.java
index afd903e..fd02497 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/Filter.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/Filter.java
@@ -24,7 +24,7 @@
 import java.util.List;
 import java.util.Map;
 
-import org.apache.ambari.logfeeder.common.ConfigBlock;
+import org.apache.ambari.logfeeder.common.ConfigItem;
 import org.apache.ambari.logfeeder.common.LogfeederException;
 import org.apache.ambari.logfeeder.input.Input;
 import org.apache.ambari.logfeeder.input.InputMarker;
@@ -33,18 +33,28 @@
 import org.apache.ambari.logfeeder.output.OutputManager;
 import org.apache.ambari.logfeeder.util.AliasUtil;
 import org.apache.ambari.logfeeder.util.AliasUtil.AliasType;
-import org.apache.log4j.Logger;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.PostMapValues;
+import org.apache.commons.lang.BooleanUtils;
 import org.apache.log4j.Priority;
 
-public abstract class Filter extends ConfigBlock {
-  private static final Logger LOG = Logger.getLogger(Filter.class);
-
+public abstract class Filter extends ConfigItem {
+  protected FilterDescriptor filterDescriptor;
   protected Input input;
   private Filter nextFilter = null;
   private OutputManager outputManager;
 
   private Map<String, List<Mapper>> postFieldValueMappers = new HashMap<String, List<Mapper>>();
 
+  public void loadConfig(FilterDescriptor filterDescriptor) {
+    this.filterDescriptor = filterDescriptor;
+  }
+
+  public FilterDescriptor getFilterDescriptor() {
+    return filterDescriptor;
+  }
+
   @Override
   public void init() throws Exception {
     super.init();
@@ -55,28 +65,22 @@
     }
   }
 
-  @SuppressWarnings("unchecked")
   private void initializePostMapValues() {
-    Map<String, Object> postMapValues = (Map<String, Object>) getConfigValue("post_map_values");
+    Map<String, ? extends List<? extends PostMapValues>> postMapValues = filterDescriptor.getPostMapValues();
     if (postMapValues == null) {
       return;
     }
     for (String fieldName : postMapValues.keySet()) {
-      List<Map<String, Object>> mapList = null;
-      Object values = postMapValues.get(fieldName);
-      if (values instanceof List<?>) {
-        mapList = (List<Map<String, Object>>) values;
-      } else {
-        mapList = new ArrayList<Map<String, Object>>();
-        mapList.add((Map<String, Object>) values);
-      }
-      for (Map<String, Object> mapObject : mapList) {
-        for (String mapClassCode : mapObject.keySet()) {
+      List<? extends PostMapValues> values = postMapValues.get(fieldName);
+      for (PostMapValues pmv : values) {
+        for (MapFieldDescriptor mapFieldDescriptor : pmv.getMappers()) {
+          String mapClassCode = mapFieldDescriptor.getJsonName();
           Mapper mapper = (Mapper) AliasUtil.getClassInstance(mapClassCode, AliasType.MAPPER);
           if (mapper == null) {
-            break;
+            LOG.warn("Unknown mapper type: " + mapClassCode);
+            continue;
           }
-          if (mapper.init(getInput().getShortDescription(), fieldName, mapClassCode, mapObject.get(mapClassCode))) {
+          if (mapper.init(getInput().getShortDescription(), fieldName, mapClassCode, mapFieldDescriptor)) {
             List<Mapper> fieldMapList = postFieldValueMappers.get(fieldName);
             if (fieldMapList == null) {
               fieldMapList = new ArrayList<Mapper>();
@@ -156,15 +160,8 @@
   }
 
   @Override
-  public boolean isFieldConditionMatch(String fieldName, String stringValue) {
-    if (!super.isFieldConditionMatch(fieldName, stringValue)) {
-      if (input != null) {
-        return input.isFieldConditionMatch(fieldName, stringValue);
-      } else {
-        return false;
-      }
-    }
-    return true;
+  public boolean isEnabled() {
+    return BooleanUtils.isNotFalse(filterDescriptor.isEnabled());
   }
 
   @Override
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterGrok.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterGrok.java
index 7e2da70..70aea65 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterGrok.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterGrok.java
@@ -38,6 +38,8 @@
 import org.apache.ambari.logfeeder.input.InputMarker;
 import org.apache.ambari.logfeeder.metrics.MetricData;
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterGrokDescriptor;
+import org.apache.commons.lang3.BooleanUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
@@ -75,11 +77,10 @@
     super.init();
 
     try {
-      messagePattern = escapePattern(getStringValue("message_pattern"));
-      multilinePattern = escapePattern(getStringValue("multiline_pattern"));
-      sourceField = getStringValue("source_field");
-      removeSourceField = getBooleanValue("remove_source_field",
-        removeSourceField);
+      messagePattern = escapePattern(((FilterGrokDescriptor)filterDescriptor).getMessagePattern());
+      multilinePattern = escapePattern(((FilterGrokDescriptor)filterDescriptor).getMultilinePattern());
+      sourceField = ((FilterGrokDescriptor)filterDescriptor).getSourceField();
+      removeSourceField = BooleanUtils.toBooleanDefaultIfNull(filterDescriptor.isRemoveSourceField(), removeSourceField);
 
       LOG.info("init() done. grokPattern=" + messagePattern + ", multilinePattern=" + multilinePattern + ", " +
       getShortDescription());
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterJSON.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterJSON.java
index 35f692e..cfccdeb 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterJSON.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterJSON.java
@@ -25,12 +25,9 @@
 import org.apache.ambari.logfeeder.input.InputMarker;
 import org.apache.ambari.logfeeder.util.DateUtil;
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
-import org.apache.log4j.Logger;
 
 public class FilterJSON extends Filter {
   
-  private static final Logger LOG  = Logger.getLogger(FilterJSON.class);
-
   @Override
   public void apply(String inputStr, InputMarker inputMarker) throws LogfeederException {
     Map<String, Object> jsonMap = null;
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterKeyValue.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterKeyValue.java
index b04a439..f2a4186 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterKeyValue.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/filter/FilterKeyValue.java
@@ -28,13 +28,11 @@
 import org.apache.ambari.logfeeder.input.InputMarker;
 import org.apache.ambari.logfeeder.metrics.MetricData;
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterKeyValueDescriptor;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
 
 public class FilterKeyValue extends Filter {
-  private static final Logger LOG = Logger.getLogger(FilterKeyValue.class);
-
   private String sourceField = null;
   private String valueSplit = "=";
   private String fieldSplit = "\t";
@@ -46,10 +44,10 @@
   public void init() throws Exception {
     super.init();
 
-    sourceField = getStringValue("source_field");
-    valueSplit = getStringValue("value_split", valueSplit);
-    fieldSplit = getStringValue("field_split", fieldSplit);
-    valueBorders = getStringValue("value_borders");
+    sourceField = filterDescriptor.getSourceField();
+    valueSplit = StringUtils.defaultString(((FilterKeyValueDescriptor)filterDescriptor).getValueSplit(), valueSplit);
+    fieldSplit = StringUtils.defaultString(((FilterKeyValueDescriptor)filterDescriptor).getFieldSplit(), fieldSplit);
+    valueBorders = ((FilterKeyValueDescriptor)filterDescriptor).getValueBorders();
 
     LOG.info("init() done. source_field=" + sourceField + ", value_split=" + valueSplit + ", " + ", field_split=" +
         fieldSplit + ", " + getShortDescription());
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/AbstractInputFile.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/AbstractInputFile.java
index 41a1fa5..cfa1903 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/AbstractInputFile.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/AbstractInputFile.java
@@ -29,14 +29,14 @@
 import java.util.Map;
 
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputFileBaseDescriptor;
+import org.apache.commons.lang.BooleanUtils;
+import org.apache.commons.lang.ObjectUtils;
 import org.apache.commons.lang3.ArrayUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
 
 public abstract class AbstractInputFile extends Input {
-  protected static final Logger LOG = Logger.getLogger(AbstractInputFile.class);
-
   private static final int DEFAULT_CHECKPOINT_INTERVAL_MS = 5 * 1000;
 
   protected File[] logFiles;
@@ -73,16 +73,16 @@
 
     // Let's close the file and set it to true after we start monitoring it
     setClosed(true);
-    logPath = getStringValue("path");
-    tail = getBooleanValue("tail", tail);
-    checkPointIntervalMS = getIntValue("checkpoint.interval.ms", DEFAULT_CHECKPOINT_INTERVAL_MS);
+    logPath = inputDescriptor.getPath();
+    tail = BooleanUtils.toBooleanDefaultIfNull(inputDescriptor.isTail(), tail);
+    checkPointIntervalMS = (int) ObjectUtils.defaultIfNull(((InputFileBaseDescriptor)inputDescriptor).getCheckpointIntervalMs(), DEFAULT_CHECKPOINT_INTERVAL_MS);
 
     if (StringUtils.isEmpty(logPath)) {
       LOG.error("path is empty for file input. " + getShortDescription());
       return;
     }
 
-    String startPosition = getStringValue("start_position");
+    String startPosition = inputDescriptor.getStartPosition();
     if (StringUtils.isEmpty(startPosition) || startPosition.equalsIgnoreCase("beginning") ||
         startPosition.equalsIgnoreCase("begining") || !tail) {
       isStartFromBegining = true;
@@ -313,7 +313,7 @@
 
   @Override
   public String getShortDescription() {
-    return "input:source=" + getStringValue("source") + ", path=" +
+    return "input:source=" + inputDescriptor.getSource() + ", path=" +
         (!ArrayUtils.isEmpty(logFiles) ? logFiles[0].getAbsolutePath() : logPath);
   }
 }
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/Input.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/Input.java
index 9f54d8a..fba596d 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/Input.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/Input.java
@@ -21,23 +21,25 @@
 
 import java.io.File;
 import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
 import org.apache.ambari.logfeeder.input.cache.LRUCache;
-import org.apache.ambari.logfeeder.common.ConfigBlock;
+import org.apache.ambari.logfeeder.common.ConfigItem;
 import org.apache.ambari.logfeeder.common.LogfeederException;
 import org.apache.ambari.logfeeder.filter.Filter;
 import org.apache.ambari.logfeeder.metrics.MetricData;
 import org.apache.ambari.logfeeder.output.Output;
 import org.apache.ambari.logfeeder.output.OutputManager;
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
-import org.apache.log4j.Logger;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.Conditions;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.Fields;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
+import org.apache.commons.lang.BooleanUtils;
+import org.apache.log4j.Priority;
 
-public abstract class Input extends ConfigBlock implements Runnable {
-  private static final Logger LOG = Logger.getLogger(Input.class);
-
+public abstract class Input extends ConfigItem implements Runnable {
   private static final boolean DEFAULT_TAIL = true;
   private static final boolean DEFAULT_USE_EVENT_MD5 = false;
   private static final boolean DEFAULT_GEN_EVENT_MD5 = true;
@@ -47,12 +49,8 @@
   private static final long DEFAULT_CACHE_DEDUP_INTERVAL = 1000;
   private static final String DEFAULT_CACHE_KEY_FIELD = "log_message";
 
-  private static final String CACHE_ENABLED = "cache_enabled";
-  private static final String CACHE_KEY_FIELD = "cache_key_field";
-  private static final String CACHE_LAST_DEDUP_ENABLED = "cache_last_dedup_enabled";
-  private static final String CACHE_SIZE = "cache_size";
-  private static final String CACHE_DEDUP_INTERVAL = "cache_dedup_interval";
-
+  protected InputDescriptor inputDescriptor;
+  
   protected InputManager inputManager;
   protected OutputManager outputManager;
   private List<Output> outputList = new ArrayList<Output>();
@@ -75,21 +73,12 @@
     return null;
   }
   
-  @Override
-  public void loadConfig(Map<String, Object> map) {
-    super.loadConfig(map);
-    String typeValue = getStringValue("type");
-    if (typeValue != null) {
-      // Explicitly add type and value to field list
-      contextFields.put("type", typeValue);
-      @SuppressWarnings("unchecked")
-      Map<String, Object> addFields = (Map<String, Object>) map.get("add_fields");
-      if (addFields == null) {
-        addFields = new HashMap<String, Object>();
-        map.put("add_fields", addFields);
-      }
-      addFields.put("type", typeValue);
-    }
+  public void loadConfig(InputDescriptor inputDescriptor) {
+    this.inputDescriptor = inputDescriptor;
+  }
+
+  public InputDescriptor getInputDescriptor() {
+    return inputDescriptor;
   }
 
   public void setType(String type) {
@@ -104,6 +93,12 @@
     this.outputManager = outputManager;
   }
 
+  public boolean isFilterRequired(FilterDescriptor filterDescriptor) {
+    Conditions conditions = filterDescriptor.getConditions();
+    Fields fields = conditions.getFields();
+    return fields.getType().contains(inputDescriptor.getType());
+  }
+
   public void addFilter(Filter filter) {
     if (firstFilter == null) {
       firstFilter = filter;
@@ -116,6 +111,22 @@
     }
   }
 
+  @SuppressWarnings("unchecked")
+  public boolean isOutputRequired(Output output) {
+    Map<String, Object> conditions = (Map<String, Object>) output.getConfigs().get("conditions");
+    if (conditions == null) {
+      return false;
+    }
+    
+    Map<String, Object> fields = (Map<String, Object>) conditions.get("fields");
+    if (fields == null) {
+      return false;
+    }
+    
+    List<String> types = (List<String>) fields.get("rowtype");
+    return types.contains(inputDescriptor.getRowtype());
+  }
+
   public void addOutput(Output output) {
     outputList.add(output);
   }
@@ -124,9 +135,9 @@
   public void init() throws Exception {
     super.init();
     initCache();
-    tail = getBooleanValue("tail", DEFAULT_TAIL);
-    useEventMD5 = getBooleanValue("use_event_md5_as_id", DEFAULT_USE_EVENT_MD5);
-    genEventMD5 = getBooleanValue("gen_event_md5", DEFAULT_GEN_EVENT_MD5);
+    tail = BooleanUtils.toBooleanDefaultIfNull(inputDescriptor.isTail(), DEFAULT_TAIL);
+    useEventMD5 = BooleanUtils.toBooleanDefaultIfNull(inputDescriptor.isUseEventMd5AsId(), DEFAULT_USE_EVENT_MD5);
+    genEventMD5 = BooleanUtils.toBooleanDefaultIfNull(inputDescriptor.isGenEventMd5(), DEFAULT_GEN_EVENT_MD5);
 
     if (firstFilter != null) {
       firstFilter.init();
@@ -236,26 +247,26 @@
   }
 
   private void initCache() {
-    boolean cacheEnabled = getConfigValue(CACHE_ENABLED) != null
-      ? getBooleanValue(CACHE_ENABLED, DEFAULT_CACHE_ENABLED)
+    boolean cacheEnabled = inputDescriptor.isCacheEnabled() != null
+      ? inputDescriptor.isCacheEnabled()
       : LogFeederUtil.getBooleanProperty("logfeeder.cache.enabled", DEFAULT_CACHE_ENABLED);
     if (cacheEnabled) {
-      String cacheKeyField = getConfigValue(CACHE_KEY_FIELD) != null
-        ? getStringValue(CACHE_KEY_FIELD)
+      String cacheKeyField = inputDescriptor.getCacheKeyField() != null
+        ? inputDescriptor.getCacheKeyField()
         : LogFeederUtil.getStringProperty("logfeeder.cache.key.field", DEFAULT_CACHE_KEY_FIELD);
 
-      setCacheKeyField(getStringValue(cacheKeyField));
+      setCacheKeyField(cacheKeyField);
 
-      boolean cacheLastDedupEnabled = getConfigValue(CACHE_LAST_DEDUP_ENABLED) != null
-        ? getBooleanValue(CACHE_LAST_DEDUP_ENABLED, DEFAULT_CACHE_DEDUP_LAST)
+      boolean cacheLastDedupEnabled = inputDescriptor.getCacheLastDedupEnabled() != null
+        ? inputDescriptor.getCacheLastDedupEnabled()
         : LogFeederUtil.getBooleanProperty("logfeeder.cache.last.dedup.enabled", DEFAULT_CACHE_DEDUP_LAST);
 
-      int cacheSize = getConfigValue(CACHE_SIZE) != null
-        ? getIntValue(CACHE_SIZE, DEFAULT_CACHE_SIZE)
+      int cacheSize = inputDescriptor.getCacheSize() != null
+        ? inputDescriptor.getCacheSize()
         : LogFeederUtil.getIntProperty("logfeeder.cache.size", DEFAULT_CACHE_SIZE);
 
-      long cacheDedupInterval = getConfigValue(CACHE_DEDUP_INTERVAL) != null
-        ? getLongValue(CACHE_DEDUP_INTERVAL, DEFAULT_CACHE_DEDUP_INTERVAL)
+      long cacheDedupInterval = inputDescriptor.getCacheDedupInterval() != null
+        ? inputDescriptor.getCacheDedupInterval()
         : Long.parseLong(LogFeederUtil.getStringProperty("logfeeder.cache.dedup.interval", String.valueOf(DEFAULT_CACHE_DEDUP_INTERVAL)));
 
       setCache(new LRUCache(cacheSize, filePath, cacheDedupInterval, cacheLastDedupEnabled));
@@ -319,6 +330,11 @@
   }
 
   @Override
+  public boolean isEnabled() {
+    return BooleanUtils.isNotFalse(inputDescriptor.isEnabled());
+  }
+
+  @Override
   public String getNameForThread() {
     if (filePath != null) {
       try {
@@ -331,7 +347,17 @@
   }
 
   @Override
+  public boolean logConfigs(Priority level) {
+    if (!super.logConfigs(level)) {
+      return false;
+    }
+    LOG.log(level, "Printing Input=" + getShortDescription());
+    LOG.log(level, "description=" + inputDescriptor.getPath());
+    return true;
+  }
+
+  @Override
   public String toString() {
     return getShortDescription();
   }
-}
+}
\ No newline at end of file
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputConfigUploader.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputConfigUploader.java
new file mode 100644
index 0000000..8aec690
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputConfigUploader.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logfeeder.input;
+
+import java.io.File;
+import java.io.FilenameFilter;
+import java.nio.charset.Charset;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.ambari.logsearch.config.api.LogSearchConfig;
+import org.apache.ambari.logfeeder.util.LogFeederUtil;
+import org.apache.log4j.Logger;
+
+import com.google.common.io.Files;
+
+public class InputConfigUploader extends Thread {
+  protected static final Logger LOG = Logger.getLogger(InputConfigUploader.class);
+
+  private static final long SLEEP_BETWEEN_CHECK = 2000;
+
+  private final File configDir;
+  private final FilenameFilter inputConfigFileFilter = new FilenameFilter() {
+    @Override
+    public boolean accept(File dir, String name) {
+      return name.startsWith("input.config-") && name.endsWith(".json");
+    }
+  };
+  private final Set<String> filesHandled = new HashSet<>();
+  private final Pattern serviceNamePattern = Pattern.compile("input.config-(.+).json");
+  private final LogSearchConfig config;
+  private final String clusterName = LogFeederUtil.getStringProperty("cluster.name");
+  
+  public static void load(LogSearchConfig config) {
+    new InputConfigUploader(config).start();
+  }
+  
+  private InputConfigUploader(LogSearchConfig config) {
+    super("Input Config Loader");
+    setDaemon(true);
+    
+    this.configDir = new File(LogFeederUtil.getStringProperty("logfeeder.config.dir"));
+    this.config = config;
+  }
+  
+  @Override
+  public void run() {
+    while (true) {
+      File[] inputConfigFiles = configDir.listFiles(inputConfigFileFilter);
+      for (File inputConfigFile : inputConfigFiles) {
+        if (!filesHandled.contains(inputConfigFile.getAbsolutePath())) {
+          try {
+            Matcher m = serviceNamePattern.matcher(inputConfigFile.getName());
+            m.find();
+            String serviceName = m.group(1);
+            String inputConfig = Files.toString(inputConfigFile, Charset.defaultCharset());
+            
+            if (!config.inputConfigExists(clusterName, serviceName)) {
+              config.createInputConfig(clusterName, serviceName, inputConfig);
+            }
+            filesHandled.add(inputConfigFile.getAbsolutePath());
+          } catch (Exception e) {
+            LOG.warn("Error handling file " + inputConfigFile.getAbsolutePath(), e);
+          }
+        }
+      }
+      
+      try {
+        Thread.sleep(SLEEP_BETWEEN_CHECK);
+      } catch (InterruptedException e) {
+        LOG.debug("Interrupted during sleep", e);
+      }
+    }
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputFile.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputFile.java
index 3737839..fc40ca4 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputFile.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputFile.java
@@ -25,7 +25,9 @@
 
 import org.apache.ambari.logfeeder.input.reader.LogsearchReaderFactory;
 import org.apache.ambari.logfeeder.util.FileUtil;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputFileDescriptor;
 import org.apache.commons.io.filefilter.WildcardFileFilter;
+import org.apache.commons.lang.BooleanUtils;
 import org.apache.commons.lang3.ArrayUtils;
 import org.apache.solr.common.util.Base64;
 
@@ -62,7 +64,7 @@
 
   @Override
   void start() throws Exception {
-    boolean isProcessFile = getBooleanValue("process_file", true);
+    boolean isProcessFile = BooleanUtils.toBooleanDefaultIfNull(((InputFileDescriptor)inputDescriptor).getProcessFile(), true);
     if (isProcessFile) {
       if (tail) {
         processFile(logFiles[0]);
@@ -100,7 +102,7 @@
   }
 
   private void copyFiles(File[] files) {
-    boolean isCopyFile = getBooleanValue("copy_file", false);
+    boolean isCopyFile = BooleanUtils.toBooleanDefaultIfNull(((InputFileDescriptor)inputDescriptor).getCopyFile(), false);
     if (isCopyFile && files != null) {
       for (File file : files) {
         try {
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputManager.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputManager.java
index 8e70850..8c76785 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputManager.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputManager.java
@@ -25,6 +25,7 @@
 import java.io.IOException;
 import java.io.RandomAccessFile;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
@@ -46,101 +47,163 @@
   private static final String CHECKPOINT_SUBFOLDER_NAME = "logfeeder_checkpoints";
   public static final String DEFAULT_CHECKPOINT_EXTENSION = ".cp";
   
-  private List<Input> inputList = new ArrayList<Input>();
+  private Map<String, List<Input>> inputs = new HashMap<>();
   private Set<Input> notReadyList = new HashSet<Input>();
 
   private boolean isDrain = false;
-  private boolean isAnyInputTail = false;
-
-  private File checkPointFolderFile = null;
-
-  private MetricData filesCountMetric = new MetricData("input.files.count", true);
 
   private String checkPointExtension;
-  
-  private Thread inputIsReadyMonitor = null;
+  private File checkPointFolderFile;
 
-  public List<Input> getInputList() {
-    return inputList;
+  private MetricData filesCountMetric = new MetricData("input.files.count", true);
+  
+  private Thread inputIsReadyMonitor;
+
+  public List<Input> getInputList(String serviceName) {
+    return inputs.get(serviceName);
   }
 
-  public void add(Input input) {
+  public void add(String serviceName, Input input) {
+    List<Input> inputList = inputs.get(serviceName);
+    if (inputList == null) {
+      inputList = new ArrayList<>();
+      inputs.put(serviceName, inputList);
+    }
     inputList.add(input);
   }
 
+  public void removeInputsForService(String serviceName) {
+    List<Input> inputList = inputs.get(serviceName);
+    for (Input input : inputList) {
+      input.setDrain(true);
+    }
+    inputList.clear();
+    inputs.remove(serviceName);
+  }
+
   public void removeInput(Input input) {
     LOG.info("Trying to remove from inputList. " + input.getShortDescription());
-    Iterator<Input> iter = inputList.iterator();
-    while (iter.hasNext()) {
-      Input iterInput = iter.next();
-      if (iterInput.equals(input)) {
-        LOG.info("Removing Input from inputList. " + input.getShortDescription());
-        iter.remove();
+    for (List<Input> inputList : inputs.values()) {
+      Iterator<Input> iter = inputList.iterator();
+      while (iter.hasNext()) {
+        Input iterInput = iter.next();
+        if (iterInput.equals(input)) {
+          LOG.info("Removing Input from inputList. " + input.getShortDescription());
+          iter.remove();
+        }
       }
     }
   }
 
   private int getActiveFilesCount() {
     int count = 0;
-    for (Input input : inputList) {
-      if (input.isReady()) {
-        count++;
+    for (List<Input> inputList : inputs.values()) {
+      for (Input input : inputList) {
+        if (input.isReady()) {
+          count++;
+        }
       }
     }
     return count;
   }
 
   public void init() {
+    initCheckPointSettings();
+    startMonitorThread();
+  }
+  
+  private void initCheckPointSettings() {
     checkPointExtension = LogFeederUtil.getStringProperty("logfeeder.checkpoint.extension", DEFAULT_CHECKPOINT_EXTENSION);
-    for (Input input : inputList) {
+    LOG.info("Determining valid checkpoint folder");
+    boolean isCheckPointFolderValid = false;
+    // We need to keep track of the files we are reading.
+    String checkPointFolder = LogFeederUtil.getStringProperty("logfeeder.checkpoint.folder");
+    if (!StringUtils.isEmpty(checkPointFolder)) {
+      checkPointFolderFile = new File(checkPointFolder);
+      isCheckPointFolderValid = verifyCheckPointFolder(checkPointFolderFile);
+    }
+    if (!isCheckPointFolderValid) {
+      // Let's try home folder
+      String userHome = LogFeederUtil.getStringProperty("user.home");
+      if (userHome != null) {
+        checkPointFolderFile = new File(userHome, CHECKPOINT_SUBFOLDER_NAME);
+        LOG.info("Checking if home folder can be used for checkpoints. Folder=" + checkPointFolderFile);
+        isCheckPointFolderValid = verifyCheckPointFolder(checkPointFolderFile);
+      }
+    }
+    if (!isCheckPointFolderValid) {
+      // Let's use tmp folder
+      String tmpFolder = LogFeederUtil.getStringProperty("java.io.tmpdir");
+      if (tmpFolder == null) {
+        tmpFolder = "/tmp";
+      }
+      checkPointFolderFile = new File(tmpFolder, CHECKPOINT_SUBFOLDER_NAME);
+      LOG.info("Checking if tmps folder can be used for checkpoints. Folder=" + checkPointFolderFile);
+      isCheckPointFolderValid = verifyCheckPointFolder(checkPointFolderFile);
+      if (isCheckPointFolderValid) {
+        LOG.warn("Using tmp folder " + checkPointFolderFile + " to store check points. This is not recommended." +
+            "Please set logfeeder.checkpoint.folder property");
+      }
+    }
+    
+    if (isCheckPointFolderValid) {
+      LOG.info("Using folder " + checkPointFolderFile + " for storing checkpoints");
+    }
+  }
+
+  private void startMonitorThread() {
+    inputIsReadyMonitor = new Thread("InputIsReadyMonitor") {
+      @Override
+      public void run() {
+        LOG.info("Going to monitor for these missing files: " + notReadyList.toString());
+        while (true) {
+          if (isDrain) {
+            LOG.info("Exiting missing file monitor.");
+            break;
+          }
+          try {
+            Iterator<Input> iter = notReadyList.iterator();
+            while (iter.hasNext()) {
+              Input input = iter.next();
+              try {
+                if (input.isReady()) {
+                  input.monitor();
+                  iter.remove();
+                }
+              } catch (Throwable t) {
+                LOG.error("Error while enabling monitoring for input. " + input.getShortDescription());
+              }
+            }
+            Thread.sleep(30 * 1000);
+          } catch (Throwable t) {
+            // Ignore
+          }
+        }
+      }
+    };
+    
+    inputIsReadyMonitor.start();
+  }
+  
+  public void startInputs(String serviceName) {
+    for (Input input : inputs.get(serviceName)) {
       try {
         input.init();
-        if (input.isTail()) {
-          isAnyInputTail = true;
+        if (input.isReady()) {
+          input.monitor();
+        } else {
+          if (input.isTail()) {
+            LOG.info("Adding input to not ready list. Note, it is possible this component is not run on this host. " +
+                "So it might not be an issue. " + input.getShortDescription());
+            notReadyList.add(input);
+          } else {
+            LOG.info("Input is not ready, so going to ignore it " + input.getShortDescription());
+          }
         }
       } catch (Exception e) {
         LOG.error("Error initializing input. " + input.getShortDescription(), e);
       }
     }
-
-    if (isAnyInputTail) {
-      LOG.info("Determining valid checkpoint folder");
-      boolean isCheckPointFolderValid = false;
-      // We need to keep track of the files we are reading.
-      String checkPointFolder = LogFeederUtil.getStringProperty("logfeeder.checkpoint.folder");
-      if (!StringUtils.isEmpty(checkPointFolder)) {
-        checkPointFolderFile = new File(checkPointFolder);
-        isCheckPointFolderValid = verifyCheckPointFolder(checkPointFolderFile);
-      }
-      if (!isCheckPointFolderValid) {
-        // Let's try home folder
-        String userHome = LogFeederUtil.getStringProperty("user.home");
-        if (userHome != null) {
-          checkPointFolderFile = new File(userHome, CHECKPOINT_SUBFOLDER_NAME);
-          LOG.info("Checking if home folder can be used for checkpoints. Folder=" + checkPointFolderFile);
-          isCheckPointFolderValid = verifyCheckPointFolder(checkPointFolderFile);
-        }
-      }
-      if (!isCheckPointFolderValid) {
-        // Let's use tmp folder
-        String tmpFolder = LogFeederUtil.getStringProperty("java.io.tmpdir");
-        if (tmpFolder == null) {
-          tmpFolder = "/tmp";
-        }
-        checkPointFolderFile = new File(tmpFolder, CHECKPOINT_SUBFOLDER_NAME);
-        LOG.info("Checking if tmps folder can be used for checkpoints. Folder=" + checkPointFolderFile);
-        isCheckPointFolderValid = verifyCheckPointFolder(checkPointFolderFile);
-        if (isCheckPointFolderValid) {
-          LOG.warn("Using tmp folder " + checkPointFolderFile + " to store check points. This is not recommended." +
-              "Please set logfeeder.checkpoint.folder property");
-        }
-      }
-
-      if (isCheckPointFolderValid) {
-        LOG.info("Using folder " + checkPointFolderFile + " for storing checkpoints");
-      }
-    }
-
   }
 
   private boolean verifyCheckPointFolder(File folderPathFile) {
@@ -171,70 +234,25 @@
     return checkPointFolderFile;
   }
 
-  public void monitor() {
-    for (Input input : inputList) {
-      if (input.isReady()) {
-        input.monitor();
-      } else {
-        if (input.isTail()) {
-          LOG.info("Adding input to not ready list. Note, it is possible this component is not run on this host. " +
-              "So it might not be an issue. " + input.getShortDescription());
-          notReadyList.add(input);
-        } else {
-          LOG.info("Input is not ready, so going to ignore it " + input.getShortDescription());
-        }
-      }
-    }
-    // Start the monitoring thread if any file is in tail mode
-    if (isAnyInputTail) {
-       inputIsReadyMonitor = new Thread("InputIsReadyMonitor") {
-        @Override
-        public void run() {
-          LOG.info("Going to monitor for these missing files: " + notReadyList.toString());
-          while (true) {
-            if (isDrain) {
-              LOG.info("Exiting missing file monitor.");
-              break;
-            }
-            try {
-              Iterator<Input> iter = notReadyList.iterator();
-              while (iter.hasNext()) {
-                Input input = iter.next();
-                try {
-                  if (input.isReady()) {
-                    input.monitor();
-                    iter.remove();
-                  }
-                } catch (Throwable t) {
-                  LOG.error("Error while enabling monitoring for input. " + input.getShortDescription());
-                }
-              }
-              Thread.sleep(30 * 1000);
-            } catch (Throwable t) {
-              // Ignore
-            }
-          }
-        }
-      };
-      inputIsReadyMonitor.start();
-    }
-  }
-
   void addToNotReady(Input notReadyInput) {
     notReadyList.add(notReadyInput);
   }
 
   public void addMetricsContainers(List<MetricData> metricsList) {
-    for (Input input : inputList) {
-      input.addMetricsContainers(metricsList);
+    for (List<Input> inputList : inputs.values()) {
+      for (Input input : inputList) {
+        input.addMetricsContainers(metricsList);
+      }
     }
     filesCountMetric.value = getActiveFilesCount();
     metricsList.add(filesCountMetric);
   }
 
   public void logStats() {
-    for (Input input : inputList) {
-      input.logStat();
+    for (List<Input> inputList : inputs.values()) {
+      for (Input input : inputList) {
+        input.logStat();
+      }
     }
 
     filesCountMetric.value = getActiveFilesCount();
@@ -308,14 +326,16 @@
 
   public void waitOnAllInputs() {
     //wait on inputs
-    for (Input input : inputList) {
-      if (input != null) {
-        Thread inputThread = input.getThread();
-        if (inputThread != null) {
-          try {
-            inputThread.join();
-          } catch (InterruptedException e) {
-            // ignore
+    for (List<Input> inputList : inputs.values()) {
+      for (Input input : inputList) {
+        if (input != null) {
+          Thread inputThread = input.getThread();
+          if (inputThread != null) {
+            try {
+              inputThread.join();
+            } catch (InterruptedException e) {
+              // ignore
+            }
           }
         }
       }
@@ -332,17 +352,21 @@
   }
 
   public void checkInAll() {
-    for (Input input : inputList) {
-      input.lastCheckIn();
+    for (List<Input> inputList : inputs.values()) {
+      for (Input input : inputList) {
+        input.lastCheckIn();
+      }
     }
   }
 
   public void close() {
-    for (Input input : inputList) {
-      try {
-        input.setDrain(true);
-      } catch (Throwable t) {
-        LOG.error("Error while draining. input=" + input.getShortDescription(), t);
+    for (List<Input> inputList : inputs.values()) {
+      for (Input input : inputList) {
+        try {
+          input.setDrain(true);
+        } catch (Throwable t) {
+          LOG.error("Error while draining. input=" + input.getShortDescription(), t);
+        }
       }
     }
     isDrain = true;
@@ -352,14 +376,16 @@
     int waitTimeMS = 1000;
     for (int i = 0; i < iterations; i++) {
       boolean allClosed = true;
-      for (Input input : inputList) {
-        if (!input.isClosed()) {
-          try {
-            allClosed = false;
-            LOG.warn("Waiting for input to close. " + input.getShortDescription() + ", " + (iterations - i) + " more seconds");
-            Thread.sleep(waitTimeMS);
-          } catch (Throwable t) {
-            // Ignore
+      for (List<Input> inputList : inputs.values()) {
+        for (Input input : inputList) {
+          if (!input.isClosed()) {
+            try {
+              allClosed = false;
+              LOG.warn("Waiting for input to close. " + input.getShortDescription() + ", " + (iterations - i) + " more seconds");
+              Thread.sleep(waitTimeMS);
+            } catch (Throwable t) {
+              // Ignore
+            }
           }
         }
       }
@@ -370,9 +396,11 @@
     }
     
     LOG.warn("Some inputs were not closed after " + iterations + " iterations");
-    for (Input input : inputList) {
-      if (!input.isClosed()) {
-        LOG.warn("Input not closed. Will ignore it." + input.getShortDescription());
+    for (List<Input> inputList : inputs.values()) {
+      for (Input input : inputList) {
+        if (!input.isClosed()) {
+          LOG.warn("Input not closed. Will ignore it." + input.getShortDescription());
+        }
       }
     }
   }
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputS3File.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputS3File.java
index f560379..4bf162b 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputS3File.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputS3File.java
@@ -23,6 +23,7 @@
 import java.io.IOException;
 
 import org.apache.ambari.logfeeder.util.S3Util;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputS3FileDescriptor;
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.solr.common.util.Base64;
 
@@ -78,8 +79,8 @@
 
   @Override
   protected BufferedReader openLogFile(File logPathFile) throws IOException {
-    String s3AccessKey = getStringValue("s3_access_key");
-    String s3SecretKey = getStringValue("s3_secret_key");
+    String s3AccessKey = ((InputS3FileDescriptor)inputDescriptor).getS3AccessKey();
+    String s3SecretKey = ((InputS3FileDescriptor)inputDescriptor).getS3SecretKey();
     BufferedReader br = S3Util.getReader(logPathFile.getPath(), s3AccessKey, s3SecretKey);
     fileKey = getFileKey(logPathFile);
     base64FileKey = Base64.byteArrayToBase64(fileKey.toString().getBytes());
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputSimulate.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputSimulate.java
index 2222f93..5e7bdb3 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputSimulate.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputSimulate.java
@@ -21,7 +21,6 @@
 import java.net.InetAddress;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.Date;
 import java.util.HashMap;
 import java.util.List;
@@ -35,22 +34,23 @@
 import org.apache.ambari.logfeeder.filter.FilterJSON;
 import org.apache.ambari.logfeeder.output.Output;
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
-import org.apache.log4j.Logger;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.FilterJsonDescriptorImpl;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputDescriptorImpl;
+import org.apache.commons.collections.MapUtils;
 import org.apache.solr.common.util.Base64;
 
 import com.google.common.base.Joiner;
 
 public class InputSimulate extends Input {
-  private static final Logger LOG = Logger.getLogger(InputSimulate.class);
-
   private static final String LOG_TEXT_PATTERN = "{ logtime=\"%d\", level=\"%s\", log_message=\"%s\", host=\"%s\"}";
   
   private static final Map<String, String> typeToFilePath = new HashMap<>();
-  public static void loadTypeToFilePath(List<Map<String, Object>> inputList) {
-    for (Map<String, Object> input : inputList) {
-      if (input.containsKey("type") && input.containsKey("path")) {
-        typeToFilePath.put((String)input.get("type"), (String)input.get("path"));
-      }
+  private static final List<String> inputTypes = new ArrayList<>();
+  public static void loadTypeToFilePath(List<InputDescriptor> inputList) {
+    for (InputDescriptor input : inputList) {
+      typeToFilePath.put(input.getType(), input.getPath());
+      inputTypes.add(input.getType());
     }
   }
   
@@ -83,20 +83,16 @@
     this.host = "#" + hostNumber.incrementAndGet() + "-" + LogFeederUtil.hostName;
     
     Filter filter = new FilterJSON();
-    filter.loadConfig(Collections.<String, Object> emptyMap());
+    filter.loadConfig(new FilterJsonDescriptorImpl());
     filter.setInput(this);
     addFilter(filter);
   }
   
   private List<String> getSimulatedLogTypes() {
     String logsToSimulate = LogFeederUtil.getStringProperty("logfeeder.simulate.log_ids");
-    if (logsToSimulate == null) {
-      return new ArrayList<>(typeToFilePath.keySet());
-    } else {
-      List<String> simulatedLogTypes = Arrays.asList(logsToSimulate.split(","));
-      simulatedLogTypes.retainAll(typeToFilePath.keySet());
-      return simulatedLogTypes;
-    }
+    return (logsToSimulate == null) ?
+      inputTypes :
+      Arrays.asList(logsToSimulate.split(","));
   }
 
   @Override
@@ -120,11 +116,12 @@
 
   @Override
   void start() throws Exception {
-    if (types.isEmpty())
-      return;
-    
     getFirstFilter().setOutputManager(outputManager);
     while (true) {
+      if (types.isEmpty()) {
+        try { Thread.sleep(sleepMillis); } catch(Exception e) { /* Ignore */ }
+        continue;
+      }
       String type = imitateRandomLogFile();
       
       String line = getLine();
@@ -139,9 +136,9 @@
   private String imitateRandomLogFile() {
     int typePos = random.nextInt(types.size());
     String type = types.get(typePos);
-    String filePath = typeToFilePath.get(type);
+    String filePath = MapUtils.getString(typeToFilePath, type, "path of " + type);
     
-    configs.put("type", type);
+    ((InputDescriptorImpl)inputDescriptor).setType(type);
     setFilePath(filePath);
     
     return type;
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/FilterLogData.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/FilterLogData.java
deleted file mode 100644
index a05a916..0000000
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/FilterLogData.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.ambari.logfeeder.logconfig;
-
-import java.util.List;
-import java.util.Map;
-
-import org.apache.ambari.logfeeder.common.LogFeederConstants;
-import org.apache.ambari.logfeeder.input.InputMarker;
-import org.apache.ambari.logfeeder.util.LogFeederUtil;
-import org.apache.commons.collections.CollectionUtils;
-import org.apache.commons.collections.MapUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.log4j.Logger;
-
-/**
- * Read configuration from solr and filter the log
- */
-public enum FilterLogData {
-  INSTANCE;
-  
-  private static final Logger LOG = Logger.getLogger(FilterLogData.class);
-  
-  private static final boolean DEFAULT_VALUE = true;
-
-  public boolean isAllowed(String jsonBlock, InputMarker inputMarker) {
-    if (StringUtils.isEmpty(jsonBlock)) {
-      return DEFAULT_VALUE;
-    }
-    Map<String, Object> jsonObj = LogFeederUtil.toJSONObject(jsonBlock);
-    return isAllowed(jsonObj, inputMarker);
-  }
-
-  public boolean isAllowed(Map<String, Object> jsonObj, InputMarker inputMarker) {
-    if ("audit".equals(inputMarker.input.getConfigs().get(LogFeederConstants.ROW_TYPE)))
-      return true;
-    
-    boolean isAllowed = applyFilter(jsonObj);
-    if (!isAllowed) {
-      LOG.trace("Filter block the content :" + LogFeederUtil.getGson().toJson(jsonObj));
-    }
-    return isAllowed;
-  }
-  
-
-  private boolean applyFilter(Map<String, Object> jsonObj) {
-    if (MapUtils.isEmpty(jsonObj)) {
-      LOG.warn("Output jsonobj is empty");
-      return DEFAULT_VALUE;
-    }
-    
-    String hostName = (String) jsonObj.get(LogFeederConstants.SOLR_HOST);
-    String componentName = (String) jsonObj.get(LogFeederConstants.SOLR_COMPONENT);
-    String level = (String) jsonObj.get(LogFeederConstants.SOLR_LEVEL);
-    if (StringUtils.isNotBlank(hostName) && StringUtils.isNotBlank(componentName) && StringUtils.isNotBlank(level)) {
-      LogFeederFilter componentFilter = LogConfigHandler.findComponentFilter(componentName);
-      if (componentFilter == null) {
-        return DEFAULT_VALUE;
-      }
-      List<String> allowedLevels = LogConfigHandler.getAllowedLevels(hostName, componentFilter);
-      if (CollectionUtils.isEmpty(allowedLevels)) {
-        allowedLevels.add(LogFeederConstants.ALL);
-      }
-      return LogFeederUtil.isListContains(allowedLevels, level, false);
-    }
-    else {
-      return DEFAULT_VALUE;
-    }
-  }
-}
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogConfigFetcher.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogConfigFetcher.java
deleted file mode 100644
index 12c744c..0000000
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogConfigFetcher.java
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.ambari.logfeeder.logconfig;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.ambari.logfeeder.common.LogFeederConstants;
-import org.apache.ambari.logfeeder.util.LogFeederUtil;
-import org.apache.commons.collections.CollectionUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.apache.solr.client.solrj.SolrClient;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.SolrRequest.METHOD;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.response.CollectionAdminResponse;
-import org.apache.solr.client.solrj.response.QueryResponse;
-import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.SolrException;
-
-public class LogConfigFetcher {
-  private static final Logger LOG = Logger.getLogger(LogConfigFetcher.class);
-  
-  private static LogConfigFetcher instance;
-  public synchronized static LogConfigFetcher getInstance() {
-    if (instance == null) {
-      try {
-        instance = new LogConfigFetcher();
-      } catch (Exception e) {
-        String logMessageKey = LogConfigFetcher.class.getSimpleName() + "_SOLR_UTIL";
-              LogFeederUtil.logErrorMessageByInterval(logMessageKey, "Error constructing solrUtil", e, LOG, Level.WARN);
-      }
-    }
-    return instance;
-  }
-
-  private SolrClient solrClient;
-
-  private String solrDetail = "";
-
-  public LogConfigFetcher() throws Exception {
-    String url = LogFeederUtil.getStringProperty("logfeeder.solr.url");
-    String zkConnectString = LogFeederUtil.getStringProperty("logfeeder.solr.zk_connect_string");
-    String collection = LogFeederUtil.getStringProperty("logfeeder.solr.core.config.name", "history");
-    connectToSolr(url, zkConnectString, collection);
-  }
-
-  private SolrClient connectToSolr(String url, String zkConnectString, String collection) throws Exception {
-    solrDetail = "zkConnectString=" + zkConnectString + ", collection=" + collection + ", url=" + url;
-
-    LOG.info("connectToSolr() " + solrDetail);
-    if (StringUtils.isEmpty(collection)) {
-      throw new Exception("For solr, collection name is mandatory. " + solrDetail);
-    }
-    
-    if (StringUtils.isEmpty(zkConnectString) && StringUtils.isBlank(url))
-      throw new Exception("Both zkConnectString and URL are empty. zkConnectString=" + zkConnectString + ", collection=" +
-          collection + ", url=" + url);
-    
-    if (StringUtils.isNotEmpty(zkConnectString)) {
-      solrDetail = "zkConnectString=" + zkConnectString + ", collection=" + collection;
-      LOG.info("Using zookeepr. " + solrDetail);
-      CloudSolrClient solrClouldClient = new CloudSolrClient(zkConnectString);
-      solrClouldClient.setDefaultCollection(collection);
-      solrClient = solrClouldClient;
-      checkSolrStatus(3 * 60 * 1000);
-    } else {
-      solrDetail = "collection=" + collection + ", url=" + url;
-      String collectionURL = url + "/" + collection;
-      LOG.info("Connecting to  solr : " + collectionURL);
-      solrClient = new HttpSolrClient(collectionURL);
-    }
-    return solrClient;
-  }
-
-  private boolean checkSolrStatus(int waitDurationMS) {
-    boolean status = false;
-    try {
-      long beginTimeMS = System.currentTimeMillis();
-      long waitIntervalMS = 2000;
-      int pingCount = 0;
-      while (true) {
-        pingCount++;
-        CollectionAdminResponse response = null;
-        try {
-          CollectionAdminRequest.List colListReq = new CollectionAdminRequest.List();
-          response = colListReq.process(solrClient);
-        } catch (Exception ex) {
-          LOG.error("Con't connect to Solr. solrDetail=" + solrDetail, ex);
-        }
-        if (response != null && response.getStatus() == 0) {
-          LOG.info("Solr getCollections() is success. solr=" + solrDetail);
-          status = true;
-          break;
-        }
-        if (System.currentTimeMillis() - beginTimeMS > waitDurationMS) {
-          LOG.error("Solr is not reachable even after " + (System.currentTimeMillis() - beginTimeMS)
-            + " ms. If you are using alias, then you might have to restart LogSearch after Solr is up and running. solr="
-            + solrDetail + ", response=" + response);
-          break;
-        } else {
-          LOG.warn("Solr is not reachable yet. getCollections() attempt count=" + pingCount + ". Will sleep for " +
-              waitIntervalMS + " ms and try again." + " solr=" + solrDetail + ", response=" + response);
-        }
-        Thread.sleep(waitIntervalMS);
-      }
-    } catch (Throwable t) {
-      LOG.error("Seems Solr is not up. solrDetail=" + solrDetail, t);
-    }
-    return status;
-  }
-
-  public Map<String, Object> getConfigDoc() {
-    HashMap<String, Object> configMap = new HashMap<String, Object>();
-    SolrQuery solrQuery = new SolrQuery();
-    solrQuery.setQuery("*:*");
-    String fq = LogFeederConstants.ROW_TYPE + ":" + LogFeederConstants.LOGFEEDER_FILTER_NAME;
-    solrQuery.setFilterQueries(fq);
-    try {
-      QueryResponse response = process(solrQuery);
-      if (response != null) {
-        SolrDocumentList documentList = response.getResults();
-        if (CollectionUtils.isNotEmpty(documentList)) {
-          SolrDocument configDoc = documentList.get(0);
-          String configJson = LogFeederUtil.getGson().toJson(configDoc);
-          configMap = (HashMap<String, Object>) LogFeederUtil.toJSONObject(configJson);
-        }
-      }
-    } catch (Exception e) {
-      String logMessageKey = this.getClass().getSimpleName() + "_FETCH_FILTER_CONFIG_ERROR";
-      LogFeederUtil.logErrorMessageByInterval(logMessageKey, "Error getting filter config from solr", e, LOG, Level.ERROR);
-    }
-    return configMap;
-  }
-
-  private QueryResponse process(SolrQuery solrQuery) throws SolrServerException, IOException, SolrException {
-    if (solrClient != null) {
-      QueryResponse queryResponse = solrClient.query(solrQuery, METHOD.POST);
-      return queryResponse;
-    } else {
-      LOG.error("solrClient can't be null");
-      return null;
-    }
-  }
-}
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogConfigHandler.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogConfigHandler.java
deleted file mode 100644
index 0ece637..0000000
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogConfigHandler.java
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.ambari.logfeeder.logconfig;
-
-import java.text.DateFormat;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.TimeZone;
-
-import org.apache.ambari.logfeeder.common.LogFeederConstants;
-import org.apache.ambari.logfeeder.util.LogFeederUtil;
-import org.apache.commons.collections.CollectionUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.log4j.Logger;
-
-public class LogConfigHandler extends Thread {
-  private static final Logger LOG = Logger.getLogger(LogConfigHandler.class);
-  
-  private static final int DEFAULT_SOLR_CONFIG_INTERVAL = 5;
-  private static final String DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSS";
-  private static final String TIMEZONE = "GMT";
-  private static final int RETRY_INTERVAL = 30;
-
-  static {
-    TimeZone.setDefault(TimeZone.getTimeZone(TIMEZONE));
-  }
-  
-  private static ThreadLocal<DateFormat> formatter = new ThreadLocal<DateFormat>() {
-    protected DateFormat initialValue() {
-      SimpleDateFormat dateFormat = new SimpleDateFormat(DATE_FORMAT);
-      dateFormat.setTimeZone(TimeZone.getTimeZone(TIMEZONE));
-      return dateFormat;
-    }
-  };
-  
-  private static boolean filterEnabled;
-  private static LogFeederFilterWrapper logFeederFilterWrapper;
-
-  private static boolean running = false;
-
-  public static void handleConfig() {
-    filterEnabled = LogFeederUtil.getBooleanProperty("logfeeder.log.filter.enable", false);
-    if (!filterEnabled) {
-      LOG.info("Logfeeder filter Scheduler is disabled.");
-      return;
-    }
-    if (!running) {
-      new LogConfigHandler().start();
-      running = true;
-      LOG.info("Logfeeder Filter Thread started!");
-    } else {
-      LOG.warn("Logfeeder Filter Thread is already running.");
-    }
-  }
-  
-  private LogConfigHandler() {
-    setName(getClass().getSimpleName());
-    setDaemon(true);
-  }
-
-  @Override
-  public void run() {
-    String zkConnectString = LogFeederUtil.getStringProperty("logfeeder.solr.zk_connect_string");
-    String solrUrl = LogFeederUtil.getStringProperty("logfeeder.solr.url");
-    if (StringUtils.isBlank(zkConnectString) && StringUtils.isBlank(solrUrl)) {
-      LOG.warn("Neither Solr ZK Connect String nor solr Url for UserConfig/History is set." +
-          "Won't look for level configuration from Solr.");
-      return;
-    }
-    
-    int solrConfigInterval = LogFeederUtil.getIntProperty("logfeeder.solr.config.interval", DEFAULT_SOLR_CONFIG_INTERVAL);
-    do {
-      LOG.debug("Updating config from solr after every " + solrConfigInterval + " sec.");
-      fetchConfig();
-      try {
-        Thread.sleep(1000 * solrConfigInterval);
-      } catch (InterruptedException e) {
-        LOG.error(e.getLocalizedMessage(), e.getCause());
-      }
-    } while (true);
-  }
-
-  private synchronized void fetchConfig() {
-    LogConfigFetcher fetcher = LogConfigFetcher.getInstance();
-    if (fetcher != null) {
-      Map<String, Object> configDocMap = fetcher.getConfigDoc();
-      String configJson = (String) configDocMap.get(LogFeederConstants.VALUES);
-      if (configJson != null) {
-        logFeederFilterWrapper = LogFeederUtil.getGson().fromJson(configJson, LogFeederFilterWrapper.class);
-      }
-    }
-  }
-
-  public static boolean isFilterAvailable() {
-    return logFeederFilterWrapper != null;
-  }
-
-  public static List<String> getAllowedLevels(String hostName, LogFeederFilter componentFilter) {
-    String componentName = componentFilter.getLabel();
-    List<String> hosts = componentFilter.getHosts();
-    List<String> defaultLevels = componentFilter.getDefaultLevels();
-    List<String> overrideLevels = componentFilter.getOverrideLevels();
-    String expiryTime = componentFilter.getExpiryTime();
-    
-    // check is user override or not
-    if (StringUtils.isNotEmpty(expiryTime) || CollectionUtils.isNotEmpty(overrideLevels) || CollectionUtils.isNotEmpty(hosts)) {
-      if (CollectionUtils.isEmpty(hosts)) { // hosts list is empty or null consider it apply on all hosts
-        hosts.add(LogFeederConstants.ALL);
-      }
-      
-      if (LogFeederUtil.isListContains(hosts, hostName, false)) {
-        if (isFilterExpired(componentFilter)) {
-          LOG.debug("Filter for component " + componentName + " and host :" + hostName + " is expired at " +
-              componentFilter.getExpiryTime());
-          return defaultLevels;
-        } else {
-          return overrideLevels;
-        }
-      }
-    }
-    return defaultLevels;
-  }
-
-  private static boolean isFilterExpired(LogFeederFilter logfeederFilter) {
-    if (logfeederFilter == null)
-      return false;
-    
-    Date filterEndDate = parseFilterExpireDate(logfeederFilter);
-    if (filterEndDate == null) {
-      return false;
-    }
-    
-    Date currentDate = new Date();
-    if (!currentDate.before(filterEndDate)) {
-      LOG.debug("Filter for  Component :" + logfeederFilter.getLabel() + " and Hosts : [" +
-          StringUtils.join(logfeederFilter.getHosts(), ',') + "] is expired because of filter endTime : " +
-          formatter.get().format(filterEndDate) + " is older than currentTime :" + formatter.get().format(currentDate));
-      return true;
-    } else {
-      return false;
-    }
-  }
-
-  private static Date parseFilterExpireDate(LogFeederFilter vLogfeederFilter) {
-    String expiryTime = vLogfeederFilter.getExpiryTime();
-    if (StringUtils.isNotEmpty(expiryTime)) {
-      try {
-        return formatter.get().parse(expiryTime);
-      } catch (ParseException e) {
-        LOG.error("Filter have invalid ExpiryTime : " + expiryTime + " for component :" + vLogfeederFilter.getLabel()
-          + " and hosts : [" + StringUtils.join(vLogfeederFilter.getHosts(), ',') + "]");
-      }
-    }
-    return null;
-  }
-  
-  public static LogFeederFilter findComponentFilter(String componentName) {
-    waitForFilter();
-    
-    if (logFeederFilterWrapper != null) {
-      HashMap<String, LogFeederFilter> filter = logFeederFilterWrapper.getFilter();
-      if (filter != null) {
-        LogFeederFilter componentFilter = filter.get(componentName);
-        if (componentFilter != null) {
-          return componentFilter;
-        }
-      }
-    }
-    LOG.trace("Filter is not there for component :" + componentName);
-    return null;
-  }
-
-  private static void waitForFilter() {
-    if (!filterEnabled || logFeederFilterWrapper != null) {
-      return;
-    }
-    
-    while (true) {
-      try {
-        Thread.sleep(RETRY_INTERVAL * 1000);
-      } catch (InterruptedException e) {
-        LOG.error(e);
-      }
-      
-      LOG.info("Checking if config is available");
-      if (logFeederFilterWrapper != null) {
-        LOG.info("Config is available");
-        return;
-      }
-    }
-  }
-}
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogFeederFilter.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogFeederFilter.java
deleted file mode 100644
index 60c8ae8..0000000
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogFeederFilter.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.ambari.logfeeder.logconfig;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlRootElement;
-
-import org.codehaus.jackson.annotate.JsonAutoDetect;
-import org.codehaus.jackson.annotate.JsonAutoDetect.Visibility;
-import org.codehaus.jackson.map.annotate.JsonSerialize;
-
-@JsonAutoDetect(getterVisibility = Visibility.NONE, setterVisibility = Visibility.NONE, fieldVisibility = Visibility.ANY)
-@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
-@XmlRootElement
-@XmlAccessorType(XmlAccessType.FIELD)
-public class LogFeederFilter {
-
-  private String label;
-  private List<String> hosts;
-  private List<String> defaultLevels;
-  private List<String> overrideLevels;
-  private String expiryTime;
-
-  public LogFeederFilter() {
-    hosts = new ArrayList<String>();
-    defaultLevels = new ArrayList<String>();
-    overrideLevels = new ArrayList<String>();
-  }
-
-  public String getLabel() {
-    return label;
-  }
-
-  public void setLabel(String label) {
-    this.label = label;
-  }
-
-  public List<String> getHosts() {
-    return hosts;
-  }
-
-  public void setHosts(List<String> hosts) {
-    this.hosts = hosts;
-  }
-
-  public List<String> getDefaultLevels() {
-    return defaultLevels;
-  }
-
-  public void setDefaultLevels(List<String> defaultLevels) {
-    this.defaultLevels = defaultLevels;
-  }
-
-  public List<String> getOverrideLevels() {
-    return overrideLevels;
-  }
-
-  public void setOverrideLevels(List<String> overrideLevels) {
-    this.overrideLevels = overrideLevels;
-  }
-
-  public String getExpiryTime() {
-    return expiryTime;
-  }
-
-  public void setExpiryTime(String expiryTime) {
-    this.expiryTime = expiryTime;
-  }
-
-}
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogFeederFilterWrapper.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogFeederFilterWrapper.java
deleted file mode 100644
index 9199cd3..0000000
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogFeederFilterWrapper.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.ambari.logfeeder.logconfig;
-
-import java.util.HashMap;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlRootElement;
-
-import org.codehaus.jackson.annotate.JsonAutoDetect;
-import org.codehaus.jackson.annotate.JsonAutoDetect.Visibility;
-import org.codehaus.jackson.map.annotate.JsonSerialize;
-
-@JsonAutoDetect(getterVisibility = Visibility.NONE, setterVisibility = Visibility.NONE, fieldVisibility = Visibility.ANY)
-@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
-@XmlRootElement
-@XmlAccessorType(XmlAccessType.FIELD)
-public class LogFeederFilterWrapper {
-
-  private HashMap<String, LogFeederFilter> filter;
-  private String id;
-
-  public HashMap<String, LogFeederFilter> getFilter() {
-    return filter;
-  }
-
-  public void setFilter(HashMap<String, LogFeederFilter> filter) {
-    this.filter = filter;
-  }
-
-  public String getId() {
-    return id;
-  }
-
-  public void setId(String id) {
-    this.id = id;
-  }
-}
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/loglevelfilter/FilterLogData.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/loglevelfilter/FilterLogData.java
new file mode 100644
index 0000000..6173f53
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/loglevelfilter/FilterLogData.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logfeeder.loglevelfilter;
+
+import java.util.Map;
+
+import org.apache.ambari.logfeeder.common.LogFeederConstants;
+import org.apache.ambari.logfeeder.input.InputMarker;
+import org.apache.ambari.logfeeder.util.LogFeederUtil;
+import org.apache.commons.collections.MapUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.log4j.Logger;
+
+public enum FilterLogData {
+  INSTANCE;
+  
+  private static final Logger LOG = Logger.getLogger(FilterLogData.class);
+  
+  private static final boolean DEFAULT_VALUE = true;
+
+  public boolean isAllowed(String jsonBlock, InputMarker inputMarker) {
+    if (StringUtils.isEmpty(jsonBlock)) {
+      return DEFAULT_VALUE;
+    }
+    Map<String, Object> jsonObj = LogFeederUtil.toJSONObject(jsonBlock);
+    return isAllowed(jsonObj, inputMarker);
+  }
+
+  public boolean isAllowed(Map<String, Object> jsonObj, InputMarker inputMarker) {
+    if ("audit".equals(inputMarker.input.getInputDescriptor().getRowtype()))
+      return true;
+    
+    boolean isAllowed = applyFilter(jsonObj);
+    if (!isAllowed) {
+      LOG.trace("Filter block the content :" + LogFeederUtil.getGson().toJson(jsonObj));
+    }
+    return isAllowed;
+  }
+  
+
+  private boolean applyFilter(Map<String, Object> jsonObj) {
+    if (MapUtils.isEmpty(jsonObj)) {
+      LOG.warn("Output jsonobj is empty");
+      return DEFAULT_VALUE;
+    }
+    
+    String hostName = (String) jsonObj.get(LogFeederConstants.SOLR_HOST);
+    String logId = (String) jsonObj.get(LogFeederConstants.SOLR_COMPONENT);
+    String level = (String) jsonObj.get(LogFeederConstants.SOLR_LEVEL);
+    if (StringUtils.isNotBlank(hostName) && StringUtils.isNotBlank(logId) && StringUtils.isNotBlank(level)) {
+      return LogLevelFilterHandler.isAllowed(hostName, logId, level);
+    } else {
+      return DEFAULT_VALUE;
+    }
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/loglevelfilter/LogLevelFilterHandler.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/loglevelfilter/LogLevelFilterHandler.java
new file mode 100644
index 0000000..8a4d953
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/loglevelfilter/LogLevelFilterHandler.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logfeeder.loglevelfilter;
+
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.Arrays;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.TimeZone;
+
+import org.apache.ambari.logfeeder.common.LogFeederConstants;
+import org.apache.ambari.logfeeder.util.LogFeederUtil;
+import org.apache.ambari.logsearch.config.api.LogLevelFilterMonitor;
+import org.apache.ambari.logsearch.config.api.LogSearchConfig;
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilter;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.lang.StringUtils;
+import org.apache.log4j.Logger;
+
+public class LogLevelFilterHandler implements LogLevelFilterMonitor {
+  private static final Logger LOG = Logger.getLogger(LogLevelFilterHandler.class);
+  
+  private static final String TIMEZONE = "GMT";
+  private static final String DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSS";
+  
+  private static ThreadLocal<DateFormat> formatter = new ThreadLocal<DateFormat>() {
+    protected DateFormat initialValue() {
+      SimpleDateFormat dateFormat = new SimpleDateFormat(DATE_FORMAT);
+      dateFormat.setTimeZone(TimeZone.getTimeZone(TIMEZONE));
+      return dateFormat;
+    }
+  };
+  
+  private static LogSearchConfig config;
+  private static String clusterName = LogFeederUtil.getStringProperty("cluster.name");
+  private static boolean filterEnabled;
+  private static List<String> defaultLogLevels;
+  private static Map<String, LogLevelFilter> filters = new HashMap<>();
+
+  public static void init(LogSearchConfig config_) {
+    config = config_;
+    filterEnabled = LogFeederUtil.getBooleanProperty("logfeeder.log.filter.enable", false);
+    defaultLogLevels = Arrays.asList(LogFeederUtil.getStringProperty("logfeeder.include.default.level").split(","));
+    TimeZone.setDefault(TimeZone.getTimeZone(TIMEZONE));
+  }
+
+  @Override
+  public void setLogLevelFilter(String logId, LogLevelFilter logLevelFilter) {
+    synchronized (LogLevelFilterHandler.class) {
+      filters.put(logId, logLevelFilter);
+    }
+  }
+
+  @Override
+  public void removeLogLevelFilter(String logId) {
+    synchronized (LogLevelFilterHandler.class) {
+      filters.remove(logId);
+    }
+  }
+
+  public static boolean isAllowed(String hostName, String logId, String level) {
+    if (!filterEnabled) {
+      return true;
+    }
+    
+    LogLevelFilter logFilter = findLogFilter(logId);
+    List<String> allowedLevels = getAllowedLevels(hostName, logFilter);
+    return allowedLevels.isEmpty() || allowedLevels.contains(level);
+  }
+
+  private static synchronized LogLevelFilter findLogFilter(String logId) {
+    LogLevelFilter logFilter = filters.get(logId);
+    if (logFilter != null) {
+      return logFilter;
+    }
+
+    LOG.info("Filter is not present for log " + logId + ", creating default filter");
+    LogLevelFilter defaultFilter = new LogLevelFilter();
+    defaultFilter.setLabel(logId);
+    defaultFilter.setDefaultLevels(defaultLogLevels);
+
+    try {
+      config.createLogLevelFilter(clusterName, logId, defaultFilter);
+      filters.put(logId, defaultFilter);
+    } catch (Exception e) {
+      LOG.warn("Could not persist the default filter for log " + logId, e);
+    }
+
+    return defaultFilter;
+  }
+
+  private static List<String> getAllowedLevels(String hostName, LogLevelFilter componentFilter) {
+    String componentName = componentFilter.getLabel();
+    List<String> hosts = componentFilter.getHosts();
+    List<String> defaultLevels = componentFilter.getDefaultLevels();
+    List<String> overrideLevels = componentFilter.getOverrideLevels();
+    Date expiryTime = componentFilter.getExpiryTime();
+
+    // check is user override or not
+    if (expiryTime != null || CollectionUtils.isNotEmpty(overrideLevels) || CollectionUtils.isNotEmpty(hosts)) {
+      if (CollectionUtils.isEmpty(hosts)) { // hosts list is empty or null consider it apply on all hosts
+        hosts.add(LogFeederConstants.ALL);
+      }
+
+      if (hosts.isEmpty() || hosts.contains(hostName)) {
+        if (isFilterExpired(componentFilter)) {
+          LOG.debug("Filter for component " + componentName + " and host :" + hostName + " is expired at " +
+              componentFilter.getExpiryTime());
+          return defaultLevels;
+        } else {
+          return overrideLevels;
+        }
+      }
+    }
+    return defaultLevels;
+  }
+
+  private static boolean isFilterExpired(LogLevelFilter logLevelFilter) {
+    if (logLevelFilter == null)
+      return false;
+
+    Date filterEndDate = logLevelFilter.getExpiryTime();
+    if (filterEndDate == null) {
+      return false;
+    }
+
+    Date currentDate = new Date();
+    if (!currentDate.before(filterEndDate)) {
+      LOG.debug("Filter for  Component :" + logLevelFilter.getLabel() + " and Hosts : [" +
+          StringUtils.join(logLevelFilter.getHosts(), ',') + "] is expired because of filter endTime : " +
+          formatter.get().format(filterEndDate) + " is older than currentTime :" + formatter.get().format(currentDate));
+      return true;
+    } else {
+      return false;
+    }
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/Mapper.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/Mapper.java
index 96709c0..5facf76 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/Mapper.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/Mapper.java
@@ -21,12 +21,14 @@
 
 import java.util.Map;
 
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldDescriptor;
+
 public abstract class Mapper {
   private String inputDesc;
   protected String fieldName;
   private String mapClassCode;
 
-  public abstract boolean init(String inputDesc, String fieldName, String mapClassCode, Object mapConfigs);
+  public abstract boolean init(String inputDesc, String fieldName, String mapClassCode, MapFieldDescriptor mapFieldDescriptor);
 
   protected void init(String inputDesc, String fieldName, String mapClassCode) {
     this.inputDesc = inputDesc;
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperDate.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperDate.java
index 6a7fad7..5d34c06 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperDate.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperDate.java
@@ -26,6 +26,8 @@
 
 import org.apache.ambari.logfeeder.common.LogFeederConstants;
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapDateDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldDescriptor;
 import org.apache.commons.lang.time.DateUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.log4j.Level;
@@ -39,18 +41,11 @@
   private SimpleDateFormat srcDateFormatter=null;
 
   @Override
-  public boolean init(String inputDesc, String fieldName, String mapClassCode, Object mapConfigs) {
+  public boolean init(String inputDesc, String fieldName, String mapClassCode, MapFieldDescriptor mapFieldDescriptor) {
     init(inputDesc, fieldName, mapClassCode);
-    if (!(mapConfigs instanceof Map)) {
-      LOG.fatal("Can't initialize object. mapConfigs class is not of type Map. " + mapConfigs.getClass().getName() +
-        ", map=" + this);
-      return false;
-    }
     
-    @SuppressWarnings("unchecked")
-    Map<String, Object> mapObjects = (Map<String, Object>) mapConfigs;
-    String targetDateFormat = (String) mapObjects.get("target_date_pattern");
-    String srcDateFormat = (String) mapObjects.get("src_date_pattern");
+    String targetDateFormat = ((MapDateDescriptor)mapFieldDescriptor).getTargetDatePattern();
+    String srcDateFormat = ((MapDateDescriptor)mapFieldDescriptor).getSourceDatePattern();
     if (StringUtils.isEmpty(targetDateFormat)) {
       LOG.fatal("Date format for map is empty. " + this);
     } else {
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldCopy.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldCopy.java
index 39e1ff4..a463f49 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldCopy.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldCopy.java
@@ -21,6 +21,8 @@
 
 import java.util.Map;
 
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldCopyDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldDescriptor;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.log4j.Logger;
 
@@ -33,16 +35,9 @@
   private String copyName = null;
 
   @Override
-  public boolean init(String inputDesc, String fieldName, String mapClassCode, Object mapConfigs) {
+  public boolean init(String inputDesc, String fieldName, String mapClassCode, MapFieldDescriptor mapFieldDescriptor) {
     init(inputDesc, fieldName, mapClassCode);
-    if (!(mapConfigs instanceof Map)) {
-      LOG.fatal("Can't initialize object. mapConfigs class is not of type Map. " + mapConfigs.getClass().getName());
-      return false;
-    }
-    
-    @SuppressWarnings("unchecked")
-    Map<String, Object> mapObjects = (Map<String, Object>) mapConfigs;
-    copyName = (String) mapObjects.get("copy_name");
+    copyName = ((MapFieldCopyDescriptor)mapFieldDescriptor).getCopyName();
     if (StringUtils.isEmpty(copyName)) {
       LOG.fatal("Map copy name is empty.");
       return false;
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldName.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldName.java
index 9b6e83c..3f160da 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldName.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldName.java
@@ -22,6 +22,8 @@
 import java.util.Map;
 
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldNameDescriptor;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
@@ -35,16 +37,10 @@
   private String newValue = null;
 
   @Override
-  public boolean init(String inputDesc, String fieldName, String mapClassCode, Object mapConfigs) {
+  public boolean init(String inputDesc, String fieldName, String mapClassCode, MapFieldDescriptor mapFieldDescriptor) {
     init(inputDesc, fieldName, mapClassCode);
-    if (!(mapConfigs instanceof Map)) {
-      LOG.fatal("Can't initialize object. mapConfigs class is not of type Map. " + mapConfigs.getClass().getName());
-      return false;
-    }
-    
-    @SuppressWarnings("unchecked")
-    Map<String, Object> mapObjects = (Map<String, Object>) mapConfigs;
-    newValue = (String) mapObjects.get("new_fieldname");
+
+    newValue = ((MapFieldNameDescriptor)mapFieldDescriptor).getNewFieldName();
     if (StringUtils.isEmpty(newValue)) {
       LOG.fatal("Map field value is empty.");
       return false;
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldValue.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldValue.java
index 87cda65..03ff95b 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldValue.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/mapper/MapperFieldValue.java
@@ -22,6 +22,8 @@
 import java.util.Map;
 
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldValueDescriptor;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
@@ -36,17 +38,11 @@
   private String newValue = null;
 
   @Override
-  public boolean init(String inputDesc, String fieldName, String mapClassCode, Object mapConfigs) {
+  public boolean init(String inputDesc, String fieldName, String mapClassCode, MapFieldDescriptor mapFieldDescriptor) {
     init(inputDesc, fieldName, mapClassCode);
-    if (!(mapConfigs instanceof Map)) {
-      LOG.fatal("Can't initialize object. mapConfigs class is not of type Map. " + mapConfigs.getClass().getName());
-      return false;
-    }
     
-    @SuppressWarnings("unchecked")
-    Map<String, Object> mapObjects = (Map<String, Object>) mapConfigs;
-    prevValue = (String) mapObjects.get("pre_value");
-    newValue = (String) mapObjects.get("post_value");
+    prevValue = ((MapFieldValueDescriptor)mapFieldDescriptor).getPreValue();
+    newValue = ((MapFieldValueDescriptor)mapFieldDescriptor).getPostValue();;
     if (StringUtils.isEmpty(newValue)) {
       LOG.fatal("Map field value is empty.");
       return false;
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/metrics/LogFeederAMSClient.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/metrics/LogFeederAMSClient.java
index 2d1bf40..39526a5 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/metrics/LogFeederAMSClient.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/metrics/LogFeederAMSClient.java
@@ -89,6 +89,16 @@
   }
 
   @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return false;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return 0;
+  }
+
+  @Override
   protected boolean emitMetrics(TimelineMetrics metrics) {
     return super.emitMetrics(metrics);
   }
@@ -103,4 +113,4 @@
     return collectorPort;
   }
 
-}
\ No newline at end of file
+}
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/Output.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/Output.java
index bc6a553..65b9e19 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/Output.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/Output.java
@@ -28,11 +28,8 @@
 import org.apache.ambari.logfeeder.input.InputMarker;
 import org.apache.ambari.logfeeder.metrics.MetricData;
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
-import org.apache.log4j.Logger;
 
 public abstract class Output extends ConfigBlock {
-  private static final Logger LOG = Logger.getLogger(Output.class);
-
   private String destination = null;
 
   protected MetricData writeBytesMetric = new MetricData(getWriteBytesMetricName(), false);
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputLineFilter.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputLineFilter.java
index fcf2695..8308a4f 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputLineFilter.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputLineFilter.java
@@ -41,7 +41,7 @@
   public Boolean apply(Map<String, Object> lineMap, Input input) {
     boolean isLogFilteredOut = false;
     LRUCache inputLruCache = input.getCache();
-    if (inputLruCache != null && "service".equals(input.getConfigs().get(LogFeederConstants.ROW_TYPE))) {
+    if (inputLruCache != null && "service".equals(input.getInputDescriptor().getRowtype())) {
       String logMessage = (String) lineMap.get(input.getCacheKeyField());
       Long timestamp = null;
       if (lineMap.containsKey((LogFeederConstants.IN_MEMORY_TIMESTAMP))) {
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputManager.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputManager.java
index 3c80e50..4d6c43b 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputManager.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputManager.java
@@ -21,7 +21,6 @@
 
 import java.io.File;
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.Date;
 import java.util.List;
 import java.util.Map;
@@ -30,10 +29,8 @@
 import org.apache.ambari.logfeeder.common.LogFeederConstants;
 import org.apache.ambari.logfeeder.input.Input;
 import org.apache.ambari.logfeeder.input.InputMarker;
-import org.apache.ambari.logfeeder.input.cache.LRUCache;
-import org.apache.ambari.logfeeder.logconfig.FilterLogData;
+import org.apache.ambari.logfeeder.loglevelfilter.FilterLogData;
 import org.apache.ambari.logfeeder.metrics.MetricData;
-import org.apache.ambari.logfeeder.util.DateUtil;
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
 import org.apache.ambari.logfeeder.util.MurmurHash;
 import org.apache.commons.lang3.StringUtils;
@@ -63,10 +60,6 @@
     this.outputs.add(output);
   }
 
-  public void retainUsedOutputs(Collection<Output> usedOutputs) {
-    outputs.retainAll(usedOutputs);
-  }
-
   public void init() throws Exception {
     for (Output output : outputs) {
       output.init();
@@ -77,8 +70,8 @@
     Input input = inputMarker.input;
 
     // Update the block with the context fields
-    for (Map.Entry<String, String> entry : input.getContextFields().entrySet()) {
-      if (jsonObj.get(entry.getKey()) == null) {
+    for (Map.Entry<String, String> entry : input.getInputDescriptor().getAddFields().entrySet()) {
+      if (jsonObj.get(entry.getKey()) == null || entry.getKey().equals("cluster") && "null".equals(jsonObj.get(entry.getKey()))) {
         jsonObj.put(entry.getKey(), entry.getValue());
       }
     }
@@ -86,13 +79,13 @@
     // TODO: Ideally most of the overrides should be configurable
 
     if (jsonObj.get("type") == null) {
-      jsonObj.put("type", input.getStringValue("type"));
+      jsonObj.put("type", input.getInputDescriptor().getType());
     }
     if (jsonObj.get("path") == null && input.getFilePath() != null) {
       jsonObj.put("path", input.getFilePath());
     }
-    if (jsonObj.get("path") == null && input.getStringValue("path") != null) {
-      jsonObj.put("path", input.getStringValue("path"));
+    if (jsonObj.get("path") == null && input.getInputDescriptor().getPath() != null) {
+      jsonObj.put("path", input.getInputDescriptor().getPath());
     }
     if (jsonObj.get("host") == null && LogFeederUtil.hostName != null) {
       jsonObj.put("host", LogFeederUtil.hostName);
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputS3File.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputS3File.java
index 26f1ddb..076d12d 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputS3File.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputS3File.java
@@ -19,9 +19,6 @@
 package org.apache.ambari.logfeeder.output;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.gson.Gson;
-import com.google.gson.GsonBuilder;
-import org.apache.ambari.logfeeder.LogFeeder;
 import org.apache.ambari.logfeeder.common.LogFeederConstants;
 import org.apache.ambari.logfeeder.filter.Filter;
 import org.apache.ambari.logfeeder.input.InputMarker;
@@ -31,11 +28,18 @@
 import org.apache.ambari.logfeeder.output.spool.RolloverHandler;
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
 import org.apache.ambari.logfeeder.util.S3Util;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputS3FileDescriptor;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputConfigGson;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputConfigImpl;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputDescriptorImpl;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputS3FileDescriptorImpl;
 import org.apache.log4j.Logger;
 
 import java.io.File;
-import java.util.*;
-import java.util.Map.Entry;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
 
 
 /**
@@ -50,7 +54,6 @@
 public class OutputS3File extends Output implements RolloverCondition, RolloverHandler {
   private static final Logger LOG = Logger.getLogger(OutputS3File.class);
 
-  public static final String INPUT_ATTRIBUTE_TYPE = "type";
   public static final String GLOBAL_CONFIG_S3_PATH_SUFFIX = "global.config.json";
 
   private LogSpooler logSpooler;
@@ -72,9 +75,9 @@
    */
   @Override
   public void copyFile(File inputFile, InputMarker inputMarker) {
-    String type = inputMarker.input.getStringValue(INPUT_ATTRIBUTE_TYPE);
+    String type = inputMarker.input.getInputDescriptor().getType();
     S3Uploader s3Uploader = new S3Uploader(s3OutputConfiguration, false, type);
-    String resolvedPath = s3Uploader.uploadFile(inputFile, inputMarker.input.getStringValue(INPUT_ATTRIBUTE_TYPE));
+    String resolvedPath = s3Uploader.uploadFile(inputFile, inputMarker.input.getInputDescriptor().getType());
 
     uploadConfig(inputMarker, type, s3OutputConfiguration, resolvedPath);
   }
@@ -82,43 +85,43 @@
   private void uploadConfig(InputMarker inputMarker, String type, S3OutputConfiguration s3OutputConfiguration,
       String resolvedPath) {
 
-    ArrayList<Map<String, Object>> filters = new ArrayList<>();
+    ArrayList<FilterDescriptor> filters = new ArrayList<>();
     addFilters(filters, inputMarker.input.getFirstFilter());
-    Map<String, Object> inputConfig = new HashMap<>();
-    inputConfig.putAll(inputMarker.input.getConfigs());
+    InputS3FileDescriptor inputS3FileDescriptorOriginal = (InputS3FileDescriptor) inputMarker.input.getInputDescriptor();
+    InputS3FileDescriptorImpl inputS3FileDescriptor = InputConfigGson.gson.fromJson(
+        InputConfigGson.gson.toJson(inputS3FileDescriptorOriginal), InputS3FileDescriptorImpl.class);
     String s3CompletePath = LogFeederConstants.S3_PATH_START_WITH + s3OutputConfiguration.getS3BucketName() +
         LogFeederConstants.S3_PATH_SEPARATOR + resolvedPath;
-    inputConfig.put("path", s3CompletePath);
+    inputS3FileDescriptor.setPath(s3CompletePath);
 
-    ArrayList<Map<String, Object>> inputConfigList = new ArrayList<>();
-    inputConfigList.add(inputConfig);
+    ArrayList<InputDescriptorImpl> inputConfigList = new ArrayList<>();
+    inputConfigList.add(inputS3FileDescriptor);
     // set source s3_file
-    // remove global config from filter config
-    removeGlobalConfig(inputConfigList);
-    removeGlobalConfig(filters);
+    // remove global config from input config
+    removeS3GlobalConfig(inputS3FileDescriptor);
     // write config into s3 file
-    Map<String, Object> config = new HashMap<>();
-    config.put("filter", filters);
-    config.put("input", inputConfigList);
-    writeConfigToS3(config, getComponentConfigFileName(type), s3OutputConfiguration);
+    InputConfigImpl inputConfig = new InputConfigImpl();
+    inputConfig.setInput(inputConfigList);
+    
+    writeConfigToS3(inputConfig, getComponentConfigFileName(type), s3OutputConfiguration);
     // write global config
     writeGlobalConfig(s3OutputConfiguration);
   }
 
-  private void addFilters(ArrayList<Map<String, Object>> filters, Filter filter) {
+  private void addFilters(ArrayList<FilterDescriptor> filters, Filter filter) {
     if (filter != null) {
-      Map<String, Object> filterConfig = new HashMap<String, Object>();
-      filterConfig.putAll(filter.getConfigs());
-      filters.add(filterConfig);
+      FilterDescriptor filterDescriptorOriginal = filter.getFilterDescriptor();
+      FilterDescriptor filterDescriptor = InputConfigGson.gson.fromJson(
+          InputConfigGson.gson.toJson(filterDescriptorOriginal), filterDescriptorOriginal.getClass());
+      filters.add(filterDescriptor);
       if (filter.getNextFilter() != null) {
         addFilters(filters, filter.getNextFilter());
       }
     }
   }
 
-  private void writeConfigToS3(Map<String, Object> configToWrite, String s3KeySuffix, S3OutputConfiguration s3OutputConfiguration) {
-    Gson gson = new GsonBuilder().setPrettyPrinting().create();
-    String configJson = gson.toJson(configToWrite);
+  private void writeConfigToS3(Object config, String s3KeySuffix, S3OutputConfiguration s3OutputConfiguration) {
+    String configJson = InputConfigGson.gson.toJson(config);
 
     String s3ResolvedKey = new S3LogPathResolver().getResolvedPath(getStringValue("s3_config_dir"), s3KeySuffix,
         s3OutputConfiguration.getCluster());
@@ -131,31 +134,14 @@
     return "input.config-" + componentName + ".json";
   }
 
-
-  private Map<String, Object> getGlobalConfig() {
-    Map<String, Object> globalConfig = LogFeeder.globalConfigs;
-    if (globalConfig == null) {
-      globalConfig = new HashMap<>();
-    }
-    return globalConfig;
-  }
-
-  private void removeGlobalConfig(List<Map<String, Object>> configList) {
-    Map<String, Object> globalConfig = getGlobalConfig();
-    if (configList != null && globalConfig != null) {
-      for (Entry<String, Object> globalConfigEntry : globalConfig.entrySet()) {
-        if (globalConfigEntry != null) {
-          String globalKey = globalConfigEntry.getKey();
-          if (globalKey != null && !globalKey.trim().isEmpty()) {
-            for (Map<String, Object> config : configList) {
-              if (config != null) {
-                config.remove(globalKey);
-              }
-            }
-          }
-        }
-      }
-    }
+  private void removeS3GlobalConfig(InputS3FileDescriptorImpl inputS3FileDescriptor) {
+    inputS3FileDescriptor.setSource(null);
+    inputS3FileDescriptor.setCopyFile(null);
+    inputS3FileDescriptor.setProcessFile(null);
+    inputS3FileDescriptor.setTail(null);
+    inputS3FileDescriptor.getAddFields().remove("ip");
+    inputS3FileDescriptor.getAddFields().remove("host");
+    inputS3FileDescriptor.getAddFields().remove("bundle_id");
   }
 
   /**
@@ -164,7 +150,7 @@
   @SuppressWarnings("unchecked")
   private synchronized void writeGlobalConfig(S3OutputConfiguration s3OutputConfiguration) {
     if (!uploadedGlobalConfig) {
-      Map<String, Object> globalConfig = LogFeederUtil.cloneObject(getGlobalConfig());
+      Map<String, Object> globalConfig = new HashMap<>();
       //updating global config before write to s3
       globalConfig.put("source", "s3_file");
       globalConfig.put("copy_file", false);
@@ -205,7 +191,7 @@
   public void write(String block, InputMarker inputMarker) throws Exception {
     if (logSpooler == null) {
       logSpooler = createSpooler(inputMarker.input.getFilePath());
-      s3Uploader = createUploader(inputMarker.input.getStringValue(INPUT_ATTRIBUTE_TYPE));
+      s3Uploader = createUploader(inputMarker.input.getInputDescriptor().getType());
     }
     logSpooler.add(block);
   }
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/util/LogFeederUtil.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/util/LogFeederUtil.java
index 73cf449..d8a1fbb 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/util/LogFeederUtil.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/util/LogFeederUtil.java
@@ -27,14 +27,11 @@
 import java.net.UnknownHostException;
 import java.util.HashMap;
 import java.util.Hashtable;
-import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 
 import org.apache.ambari.logfeeder.LogFeeder;
-import org.apache.ambari.logfeeder.common.LogFeederConstants;
 import org.apache.ambari.logfeeder.metrics.MetricData;
-import org.apache.commons.collections.MapUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
@@ -80,6 +77,9 @@
   }
   
   private static Properties props;
+  public static Properties getProperties() {
+    return props;
+  }
 
   /**
    * This method will read the properties from System, followed by propFile and finally from the map
@@ -203,55 +203,6 @@
     return retValue;
   }
 
-  @SuppressWarnings("unchecked")
-  public static boolean isEnabled(Map<String, Object> conditionConfigs, Map<String, Object> valueConfigs) {
-    Map<String, Object> conditions = (Map<String, Object>) conditionConfigs.get("conditions");
-    if (MapUtils.isEmpty(conditions)) {
-      return toBoolean((String) valueConfigs.get("is_enabled"), true);
-    }
-    
-    for (String conditionType : conditions.keySet()) {
-      if (!conditionType.equalsIgnoreCase("fields")) {
-        continue;
-      }
-      
-      Map<String, Object> fields = (Map<String, Object>) conditions.get("fields");
-      for (Map.Entry<String, Object> field : fields.entrySet()) {
-        if (field.getValue() instanceof String) {
-          if (isFieldConditionMatch(valueConfigs, field.getKey(), (String) field.getValue())) {
-            return true;
-          }
-        } else {
-          for (String stringValue : (List<String>) field.getValue()) {
-            if (isFieldConditionMatch(valueConfigs, field.getKey(), stringValue)) {
-              return true;
-            }
-          }
-        }
-      }
-    }
-    
-    return false;
-  }
-
-  private static boolean isFieldConditionMatch(Map<String, Object> configs, String fieldName, String stringValue) {
-    boolean allow = false;
-    String fieldValue = (String) configs.get(fieldName);
-    if (fieldValue != null && fieldValue.equalsIgnoreCase(stringValue)) {
-      allow = true;
-    } else {
-      @SuppressWarnings("unchecked")
-      Map<String, Object> addFields = (Map<String, Object>) configs.get("add_fields");
-      if (addFields != null && addFields.get(fieldName) != null) {
-        String addFieldValue = (String) addFields.get(fieldName);
-        if (stringValue.equalsIgnoreCase(addFieldValue)) {
-          allow = true;
-        }
-      }
-    }
-    return allow;
-  }
-
   public static void logStatForMetric(MetricData metric, String prefixStr, String postFix) {
     long currStat = metric.value;
     long currMS = System.currentTimeMillis();
@@ -308,24 +259,6 @@
       return false;
     }
   }
-
-  public static boolean isListContains(List<String> list, String str, boolean caseSensitive) {
-    if (list == null) {
-      return false;
-    }
-    
-    for (String value : list) {
-      if (value == null) {
-        continue;
-      }
-      
-      if (caseSensitive ? value.equals(str) : value.equalsIgnoreCase(str) ||
-          value.equalsIgnoreCase(LogFeederConstants.ALL)) {
-        return true;
-      }
-    }
-    return false;
-  }
   
   private static String logfeederTempDir = null;
   
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/util/SSLUtil.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/util/SSLUtil.java
index 80b34e0..d963de3 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/util/SSLUtil.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/util/SSLUtil.java
@@ -26,6 +26,7 @@
 import org.apache.log4j.Logger;
 
 import java.io.File;
+import java.nio.charset.Charset;
 
 public class SSLUtil {
   private static final Logger LOG = Logger.getLogger(SSLUtil.class);
@@ -120,10 +121,10 @@
     try {
       File pwdFile = new File(LOGFEEDER_CERT_DEFAULT_FOLDER, fileName);
       if (!pwdFile.exists()) {
-        FileUtils.writeStringToFile(pwdFile, LOGFEEDER_STORE_DEFAULT_PASSWORD);
+        FileUtils.writeStringToFile(pwdFile, LOGFEEDER_STORE_DEFAULT_PASSWORD, Charset.defaultCharset());
         return LOGFEEDER_STORE_DEFAULT_PASSWORD;
       } else {
-        return FileUtils.readFileToString(pwdFile);
+        return FileUtils.readFileToString(pwdFile, Charset.defaultCharset());
       }
     } catch (Exception e) {
       LOG.warn("Exception occurred during read/write password file for keystore/truststore.", e);
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log4j.xml b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log4j.xml
index 7ef967c..8a3d26d 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log4j.xml
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log4j.xml
@@ -38,9 +38,9 @@
   <appender name="rolling_file_json"
     class="org.apache.ambari.logsearch.appender.LogsearchRollingFileAppender">
     <param name="file" value="logs/logsearch-logfeeder.json" />
-		<param name="append" value="true" />
-		<param name="maxFileSize" value="10MB" />
-		<param name="maxBackupIndex" value="10" />
+    <param name="append" value="true" />
+    <param name="maxFileSize" value="10MB" />
+    <param name="maxBackupIndex" value="10" />
     <layout class="org.apache.ambari.logsearch.appender.LogsearchConversion" />
   </appender> 
  
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterGrokTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterGrokTest.java
index 99565c5..8d7e86c 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterGrokTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterGrokTest.java
@@ -18,12 +18,13 @@
 
 package org.apache.ambari.logfeeder.filter;
 
-import java.util.HashMap;
 import java.util.Map;
 
 import org.apache.ambari.logfeeder.input.Input;
 import org.apache.ambari.logfeeder.input.InputMarker;
 import org.apache.ambari.logfeeder.output.OutputManager;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterGrokDescriptor;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.FilterGrokDescriptorImpl;
 import org.apache.log4j.Logger;
 import org.easymock.Capture;
 import org.easymock.CaptureType;
@@ -43,12 +44,12 @@
   private OutputManager mockOutputManager;
   private Capture<Map<String, Object>> capture;
 
-  public void init(Map<String, Object> config) throws Exception {
+  public void init(FilterGrokDescriptor filterGrokDescriptor) throws Exception {
     mockOutputManager = EasyMock.strictMock(OutputManager.class);
     capture = EasyMock.newCapture(CaptureType.LAST);
 
     filterGrok = new FilterGrok();
-    filterGrok.loadConfig(config);
+    filterGrok.loadConfig(filterGrokDescriptor);
     filterGrok.setOutputManager(mockOutputManager);
     filterGrok.setInput(EasyMock.mock(Input.class));
     filterGrok.init();
@@ -58,10 +59,10 @@
   public void testFilterGrok_parseMessage() throws Exception {
     LOG.info("testFilterGrok_parseMessage()");
 
-    Map<String, Object> config = new HashMap<String, Object>();
-    config.put("message_pattern", "(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{GREEDYDATA:log_message}");
-    config.put("multiline_pattern", "^(%{TIMESTAMP_ISO8601:logtime})");
-    init(config);
+    FilterGrokDescriptorImpl filterGrokDescriptor = new FilterGrokDescriptorImpl();
+    filterGrokDescriptor.setMessagePattern("(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{GREEDYDATA:log_message}");
+    filterGrokDescriptor.setMultilinePattern("^(%{TIMESTAMP_ISO8601:logtime})");
+    init(filterGrokDescriptor);
 
     mockOutputManager.write(EasyMock.capture(capture), EasyMock.anyObject(InputMarker.class));
     EasyMock.expectLastCall();
@@ -84,10 +85,10 @@
   public void testFilterGrok_parseMultiLineMessage() throws Exception {
     LOG.info("testFilterGrok_parseMultiLineMessage()");
 
-    Map<String, Object> config = new HashMap<String, Object>();
-    config.put("message_pattern", "(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{GREEDYDATA:log_message}");
-    config.put("multiline_pattern", "^(%{TIMESTAMP_ISO8601:logtime})");
-    init(config);
+    FilterGrokDescriptorImpl filterGrokDescriptor = new FilterGrokDescriptorImpl();
+    filterGrokDescriptor.setMessagePattern("(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{GREEDYDATA:log_message}");
+    filterGrokDescriptor.setMultilinePattern("^(%{TIMESTAMP_ISO8601:logtime})");
+    init(filterGrokDescriptor);
 
     mockOutputManager.write(EasyMock.capture(capture), EasyMock.anyObject(InputMarker.class));
     EasyMock.expectLastCall();
@@ -114,10 +115,10 @@
   public void testFilterGrok_notMatchingMesagePattern() throws Exception {
     LOG.info("testFilterGrok_notMatchingMesagePattern()");
 
-    Map<String, Object> config = new HashMap<String, Object>();
-    config.put("message_pattern", "(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{GREEDYDATA:log_message}");
-    config.put("multiline_pattern", "^(%{TIMESTAMP_ISO8601:logtime})");
-    init(config);
+    FilterGrokDescriptorImpl filterGrokDescriptor = new FilterGrokDescriptorImpl();
+    filterGrokDescriptor.setMessagePattern("(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{GREEDYDATA:log_message}");
+    filterGrokDescriptor.setMultilinePattern("^(%{TIMESTAMP_ISO8601:logtime})");
+    init(filterGrokDescriptor);
 
     mockOutputManager.write(EasyMock.capture(capture), EasyMock.anyObject(InputMarker.class));
     EasyMock.expectLastCall().anyTimes();
@@ -134,9 +135,9 @@
   public void testFilterGrok_noMesagePattern() throws Exception {
     LOG.info("testFilterGrok_noMesagePattern()");
 
-    Map<String, Object> config = new HashMap<String, Object>();
-    config.put("multiline_pattern", "^(%{TIMESTAMP_ISO8601:logtime})");
-    init(config);
+    FilterGrokDescriptorImpl filterGrokDescriptor = new FilterGrokDescriptorImpl();
+    filterGrokDescriptor.setMultilinePattern("^(%{TIMESTAMP_ISO8601:logtime})");
+    init(filterGrokDescriptor);
 
     EasyMock.replay(mockOutputManager);
 
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterJSONTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterJSONTest.java
index 643dafc..8f75c3a 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterJSONTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterJSONTest.java
@@ -21,7 +21,6 @@
 import java.text.DateFormat;
 import java.text.SimpleDateFormat;
 import java.util.Date;
-import java.util.HashMap;
 import java.util.Map;
 import java.util.TimeZone;
 
@@ -29,6 +28,7 @@
 import org.apache.ambari.logfeeder.common.LogfeederException;
 import org.apache.ambari.logfeeder.input.InputMarker;
 import org.apache.ambari.logfeeder.output.OutputManager;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.FilterJsonDescriptorImpl;
 import org.apache.log4j.Logger;
 import org.easymock.Capture;
 import org.easymock.CaptureType;
@@ -47,12 +47,12 @@
   private OutputManager mockOutputManager;
   private Capture<Map<String, Object>> capture;
 
-  public void init(Map<String, Object> params) throws Exception {
+  public void init(FilterJsonDescriptorImpl filterJsonDescriptor) throws Exception {
     mockOutputManager = EasyMock.strictMock(OutputManager.class);
     capture = EasyMock.newCapture(CaptureType.LAST);
 
     filterJson = new FilterJSON();
-    filterJson.loadConfig(params);
+    filterJson.loadConfig(filterJsonDescriptor);
     filterJson.setOutputManager(mockOutputManager);
     filterJson.init();
   }
@@ -61,7 +61,7 @@
   public void testJSONFilterCode_convertFields() throws Exception {
     LOG.info("testJSONFilterCode_convertFields()");
 
-    init(new HashMap<String, Object>());
+    init(new FilterJsonDescriptorImpl());
 
     mockOutputManager.write(EasyMock.capture(capture), EasyMock.anyObject(InputMarker.class));
     EasyMock.expectLastCall();
@@ -86,7 +86,7 @@
   public void testJSONFilterCode_logTimeOnly() throws Exception {
     LOG.info("testJSONFilterCode_logTimeOnly()");
 
-    init(new HashMap<String, Object>());
+    init(new FilterJsonDescriptorImpl());
 
     mockOutputManager.write(EasyMock.capture(capture), EasyMock.anyObject(InputMarker.class));
     EasyMock.expectLastCall();
@@ -111,7 +111,7 @@
   public void testJSONFilterCode_lineNumberOnly() throws Exception {
     LOG.info("testJSONFilterCode_lineNumberOnly()");
 
-    init(new HashMap<String, Object>());
+    init(new FilterJsonDescriptorImpl());
 
     mockOutputManager.write(EasyMock.capture(capture), EasyMock.anyObject(InputMarker.class));
     EasyMock.expectLastCall();
@@ -131,7 +131,7 @@
   @Test
   public void testJSONFilterCode_invalidJson() throws Exception {
     LOG.info("testJSONFilterCode_invalidJson()");
-    init(new HashMap<String, Object>());
+    init(new FilterJsonDescriptorImpl());
     String inputStr="invalid json";
     try{
     filterJson.apply(inputStr,new InputMarker(null, null, 0));
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterKeyValueTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterKeyValueTest.java
index 05647e6..ae978fb 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterKeyValueTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/filter/FilterKeyValueTest.java
@@ -18,10 +18,11 @@
 
 package org.apache.ambari.logfeeder.filter;
 
-import java.util.HashMap;
 import java.util.Map;
 
 import org.apache.ambari.logfeeder.output.OutputManager;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterKeyValueDescriptor;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.FilterKeyValueDescriptorImpl;
 import org.apache.ambari.logfeeder.input.InputMarker;
 import org.apache.log4j.Logger;
 import org.easymock.Capture;
@@ -41,12 +42,12 @@
   private OutputManager mockOutputManager;
   private Capture<Map<String, Object>> capture;
 
-  public void init(Map<String, Object> config) throws Exception {
+  public void init(FilterKeyValueDescriptor filterKeyValueDescriptor) throws Exception {
     mockOutputManager = EasyMock.strictMock(OutputManager.class);
     capture = EasyMock.newCapture(CaptureType.LAST);
 
     filterKeyValue = new FilterKeyValue();
-    filterKeyValue.loadConfig(config);
+    filterKeyValue.loadConfig(filterKeyValueDescriptor);
     filterKeyValue.setOutputManager(mockOutputManager);
     filterKeyValue.init();
   }
@@ -55,11 +56,10 @@
   public void testFilterKeyValue_extraction() throws Exception {
     LOG.info("testFilterKeyValue_extraction()");
 
-    Map<String, Object> config = new HashMap<String, Object>();
-    config.put("source_field", "keyValueField");
-    config.put("field_split", "&");
-    // using default value split:
-    init(config);
+    FilterKeyValueDescriptorImpl filterKeyValueDescriptor = new FilterKeyValueDescriptorImpl();
+    filterKeyValueDescriptor.setSourceField("keyValueField");
+    filterKeyValueDescriptor.setFieldSplit("&");
+    init(filterKeyValueDescriptor);
 
     mockOutputManager.write(EasyMock.capture(capture), EasyMock.anyObject(InputMarker.class));
     EasyMock.expectLastCall();
@@ -80,11 +80,11 @@
   public void testFilterKeyValue_extractionWithBorders() throws Exception {
     LOG.info("testFilterKeyValue_extractionWithBorders()");
 
-    Map<String, Object> config = new HashMap<String, Object>();
-    config.put("source_field", "keyValueField");
-    config.put("field_split", "&");
-    config.put("value_borders", "()");
-    init(config);
+    FilterKeyValueDescriptorImpl filterKeyValueDescriptor = new FilterKeyValueDescriptorImpl();
+    filterKeyValueDescriptor.setSourceField("keyValueField");
+    filterKeyValueDescriptor.setFieldSplit("&");
+    filterKeyValueDescriptor.setValueBorders("()");
+    init(filterKeyValueDescriptor);
 
     mockOutputManager.write(EasyMock.capture(capture), EasyMock.anyObject(InputMarker.class));
     EasyMock.expectLastCall();
@@ -105,10 +105,9 @@
   public void testFilterKeyValue_missingSourceField() throws Exception {
     LOG.info("testFilterKeyValue_missingSourceField()");
 
-    Map<String, Object> config = new HashMap<String, Object>();
-    config.put("field_split", "&");
-    // using default value split: =
-    init(config);
+    FilterKeyValueDescriptorImpl filterKeyValueDescriptor = new FilterKeyValueDescriptorImpl();
+    filterKeyValueDescriptor.setFieldSplit("&");
+    init(filterKeyValueDescriptor);
 
     mockOutputManager.write(EasyMock.capture(capture), EasyMock.anyObject(InputMarker.class));
     EasyMock.expectLastCall().anyTimes();
@@ -124,10 +123,10 @@
   public void testFilterKeyValue_noSourceFieldPresent() throws Exception {
     LOG.info("testFilterKeyValue_noSourceFieldPresent()");
 
-    Map<String, Object> config = new HashMap<String, Object>();
-    config.put("source_field", "keyValueField");
-    config.put("field_split", "&");
-    init(config);
+    FilterKeyValueDescriptorImpl filterKeyValueDescriptor = new FilterKeyValueDescriptorImpl();
+    filterKeyValueDescriptor.setSourceField("keyValueField");
+    filterKeyValueDescriptor.setFieldSplit("&");
+    init(filterKeyValueDescriptor);
 
     // using default value split: =
     mockOutputManager.write(EasyMock.capture(capture), EasyMock.anyObject(InputMarker.class));
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/input/InputFileTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/input/InputFileTest.java
index 08aa564..3a5f31e 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/input/InputFileTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/input/InputFileTest.java
@@ -20,13 +20,13 @@
 
 import java.io.File;
 import java.io.IOException;
+import java.nio.charset.Charset;
 import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
 
 import org.apache.ambari.logfeeder.filter.Filter;
 import org.apache.ambari.logfeeder.input.InputMarker;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputFileDescriptorImpl;
 import org.apache.commons.io.FileUtils;
 import org.apache.log4j.Logger;
 import org.easymock.EasyMock;
@@ -77,15 +77,14 @@
   }
 
   public void init(String path) throws Exception {
-    Map<String, Object> config = new HashMap<String, Object>();
-    config.put("source", "file");
-    config.put("tail", "true");
-    config.put("gen_event_md5", "true");
-    config.put("start_position", "beginning");
-
-    config.put("type", "hdfs_datanode");
-    config.put("rowtype", "service");
-    config.put("path", path);
+    InputFileDescriptorImpl inputFileDescriptor = new InputFileDescriptorImpl();
+    inputFileDescriptor.setSource("file");
+    inputFileDescriptor.setTail(true);
+    inputFileDescriptor.setGenEventMd5(true);
+    inputFileDescriptor.setStartPosition("beginning");
+    inputFileDescriptor.setType("hdfs_datanode");
+    inputFileDescriptor.setRowtype("service");
+    inputFileDescriptor.setPath(path);
 
     Filter capture = new Filter() {
       @Override
@@ -103,7 +102,7 @@
     };
 
     inputFile = new InputFile();
-    inputFile.loadConfig(config);
+    inputFile.loadConfig(inputFileDescriptor);
     inputFile.addFilter(capture);
     inputFile.init();
   }
@@ -180,7 +179,7 @@
 
   private File createFile(String filename) throws IOException {
     File newFile = new File(FileUtils.getTempDirectoryPath() + TEST_DIR_NAME + filename);
-    FileUtils.writeStringToFile(newFile, TEST_LOG_FILE_CONTENT);
+    FileUtils.writeStringToFile(newFile, TEST_LOG_FILE_CONTENT, Charset.defaultCharset());
     return newFile;
   }
 
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/input/InputManagerTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/input/InputManagerTest.java
index 368a930..e9bbe7e 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/input/InputManagerTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/input/InputManagerTest.java
@@ -42,53 +42,31 @@
     replay(input1, input2, input3, input4);
     
     InputManager manager = new InputManager();
-    manager.add(input1);
-    manager.add(input2);
-    manager.add(input3);
+    manager.add("serviceName", input1);
+    manager.add("serviceName", input2);
+    manager.add("serviceName", input3);
     
     manager.removeInput(input3);
     manager.removeInput(input4);
     
     verify(input1, input2, input3, input4);
     
-    List<Input> inputList = manager.getInputList();
+    List<Input> inputList = manager.getInputList("serviceName");
     assertEquals(inputList.size(), 2);
     assertEquals(inputList.get(0), input1);
     assertEquals(inputList.get(1), input2);
   }
 
   @Test
-  public void testInputManager_init() throws Exception {
-    Input input1 = strictMock(Input.class);
-    Input input2 = strictMock(Input.class);
-    Input input3 = strictMock(Input.class);
-    
-    input1.init(); expectLastCall();
-    input2.init(); expectLastCall();
-    input3.init(); expectLastCall();
-    
-    expect(input1.isTail()).andReturn(false);
-    expect(input2.isTail()).andReturn(false);
-    expect(input3.isTail()).andReturn(false);
-    
-    replay(input1, input2, input3);
-    
-    InputManager manager = new InputManager();
-    manager.add(input1);
-    manager.add(input2);
-    manager.add(input3);
-    
-    manager.init();
-    
-    verify(input1, input2, input3);
-  }
-
-  @Test
   public void testInputManager_monitor() throws Exception {
     Input input1 = strictMock(Input.class);
     Input input2 = strictMock(Input.class);
     Input input3 = strictMock(Input.class);
     
+    input1.init(); expectLastCall();
+    input2.init(); expectLastCall();
+    input3.init(); expectLastCall();
+    
     expect(input1.isReady()).andReturn(true);
     expect(input2.isReady()).andReturn(true);
     expect(input3.isReady()).andReturn(false);
@@ -101,11 +79,11 @@
     replay(input1, input2, input3);
     
     InputManager manager = new InputManager();
-    manager.add(input1);
-    manager.add(input2);
-    manager.add(input3);
+    manager.add("serviceName", input1);
+    manager.add("serviceName", input2);
+    manager.add("serviceName", input3);
     
-    manager.monitor();
+    manager.startInputs("serviceName");
     
     verify(input1, input2, input3);
   }
@@ -130,9 +108,9 @@
     replay(input1, input2, input3);
     
     InputManager manager = new InputManager();
-    manager.add(input1);
-    manager.add(input2);
-    manager.add(input3);
+    manager.add("serviceName", input1);
+    manager.add("serviceName", input2);
+    manager.add("serviceName", input3);
     
     manager.addMetricsContainers(metrics);
     
@@ -156,9 +134,9 @@
     replay(input1, input2, input3);
     
     InputManager manager = new InputManager();
-    manager.add(input1);
-    manager.add(input2);
-    manager.add(input3);
+    manager.add("serviceName", input1);
+    manager.add("serviceName", input2);
+    manager.add("serviceName", input3);
     
     manager.logStats();
     
@@ -182,9 +160,9 @@
     replay(input1, input2, input3);
     
     InputManager manager = new InputManager();
-    manager.add(input1);
-    manager.add(input2);
-    manager.add(input3);
+    manager.add("serviceName", input1);
+    manager.add("serviceName", input2);
+    manager.add("serviceName", input3);
     
     manager.waitOnAllInputs();
     
@@ -204,9 +182,9 @@
     replay(input1, input2, input3);
     
     InputManager manager = new InputManager();
-    manager.add(input1);
-    manager.add(input2);
-    manager.add(input3);
+    manager.add("serviceName", input1);
+    manager.add("serviceName", input2);
+    manager.add("serviceName", input3);
     
     manager.checkInAll();
     
@@ -230,9 +208,9 @@
     replay(input1, input2, input3);
     
     InputManager manager = new InputManager();
-    manager.add(input1);
-    manager.add(input2);
-    manager.add(input3);
+    manager.add("serviceName", input1);
+    manager.add("serviceName", input2);
+    manager.add("serviceName", input3);
     
     manager.close();
     
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/logconfig/LogConfigHandlerTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/logconfig/LogConfigHandlerTest.java
index 266108f..4123dad 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/logconfig/LogConfigHandlerTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/logconfig/LogConfigHandlerTest.java
@@ -18,85 +18,82 @@
 
 package org.apache.ambari.logfeeder.logconfig;
 
-import java.lang.reflect.Field;
+import java.util.Arrays;
 import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
+import java.util.Date;
 
 import static org.easymock.EasyMock.*;
 import static org.junit.Assert.*;
 
-import org.apache.ambari.logfeeder.common.LogFeederConstants;
 import org.apache.ambari.logfeeder.input.Input;
 import org.apache.ambari.logfeeder.input.InputMarker;
+import org.apache.ambari.logfeeder.loglevelfilter.FilterLogData;
+import org.apache.ambari.logfeeder.loglevelfilter.LogLevelFilterHandler;
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
-import org.junit.AfterClass;
+import org.apache.ambari.logsearch.config.api.LogSearchConfig;
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilter;
+import org.apache.commons.lang.time.DateUtils;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputDescriptorImpl;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 public class LogConfigHandlerTest {
   
-  private static LogConfigFetcher mockFetcher;
-  
   private static InputMarker inputMarkerAudit;
   private static InputMarker inputMarkerService;
   static {
-    Map<String, Object> auditMap = new HashMap<String, Object>();
-    auditMap.put(LogFeederConstants.ROW_TYPE, "audit");
+    InputDescriptorImpl auditInputDescriptor = new InputDescriptorImpl() {};
+    auditInputDescriptor.setRowtype("audit");
+    
     Input auditInput = strictMock(Input.class);
-    expect(auditInput.getConfigs()).andReturn(auditMap).anyTimes();
+    expect(auditInput.getInputDescriptor()).andReturn(auditInputDescriptor).anyTimes();
     inputMarkerAudit = new InputMarker(auditInput, null, 0);
     
-    Map<String, Object> serviceMap = new HashMap<String, Object>();
-    serviceMap.put(LogFeederConstants.ROW_TYPE, "service");
+    InputDescriptorImpl serviceInputDescriptor = new InputDescriptorImpl() {};
+    serviceInputDescriptor.setRowtype("service");
+    
     Input serviceInput = strictMock(Input.class);
-    expect(serviceInput.getConfigs()).andReturn(serviceMap).anyTimes();
+    expect(serviceInput.getInputDescriptor()).andReturn(serviceInputDescriptor).anyTimes();
     inputMarkerService = new InputMarker(serviceInput, null, 0);
     
     replay(auditInput, serviceInput);
   }
   
-  private static final Map<String, Object> CONFIG_MAP = new HashMap<>();
-  static {
-    CONFIG_MAP.put("jsons",
-        "{'filter':{" +
-          "'configured_log_file':{" +
-            "'label':'configured_log_file'," +
-            "'hosts':[]," +
-            "'defaultLevels':['FATAL','ERROR','WARN','INFO']," +
-            "'overrideLevels':[]}," +
-          "'configured_log_file2':{" +
-            "'label':'configured_log_file2'," +
-            "'hosts':['host1']," +
-            "'defaultLevels':['FATAL','ERROR','WARN','INFO']," +
-            "'overrideLevels':['FATAL','ERROR','WARN','INFO','DEBUG','TRACE']," +
-            "'expiryTime':'3000-01-01T00:00:00.000Z'}," +
-          "'configured_log_file3':{" +
-            "'label':'configured_log_file3'," +
-            "'hosts':['host1']," +
-            "'defaultLevels':['FATAL','ERROR','WARN','INFO']," +
-            "'overrideLevels':['FATAL','ERROR','WARN','INFO','DEBUG','TRACE']," +
-            "'expiryTime':'1000-01-01T00:00:00.000Z'}" +
-          "}}");
-  }
-  
   @BeforeClass
   public static void init() throws Exception {
-    mockFetcher = strictMock(LogConfigFetcher.class);
-    Field f = LogConfigFetcher.class.getDeclaredField("instance");
-    f.setAccessible(true);
-    f.set(null, mockFetcher);
-    expect(mockFetcher.getConfigDoc()).andReturn(CONFIG_MAP).anyTimes();
-    replay(mockFetcher);
-    
     LogFeederUtil.loadProperties("logfeeder.properties", null);
-    LogConfigHandler.handleConfig();
-    Thread.sleep(1000);
+    
+    LogSearchConfig config = strictMock(LogSearchConfig.class);
+    config.createLogLevelFilter(anyString(), anyString(), anyObject(LogLevelFilter.class));
+    expectLastCall().anyTimes();
+    LogLevelFilterHandler.init(config);
+    
+    LogLevelFilter logLevelFilter1 = new LogLevelFilter();
+    logLevelFilter1.setHosts(Collections.<String> emptyList());
+    logLevelFilter1.setDefaultLevels(Arrays.asList("FATAL", "ERROR", "WARN", "INFO"));
+    logLevelFilter1.setOverrideLevels(Collections.<String> emptyList());
+    
+    LogLevelFilter logLevelFilter2 = new LogLevelFilter();
+    logLevelFilter2.setHosts(Arrays.asList("host1"));
+    logLevelFilter2.setDefaultLevels(Arrays.asList("FATAL", "ERROR", "WARN", "INFO"));
+    logLevelFilter2.setOverrideLevels(Arrays.asList("FATAL", "ERROR", "WARN", "INFO", "DEBUG", "TRACE"));
+    logLevelFilter2.setExpiryTime(DateUtils.addDays(new Date(), 1));
+    
+    LogLevelFilter logLevelFilter3 = new LogLevelFilter();
+    logLevelFilter3.setHosts(Arrays.asList("host1"));
+    logLevelFilter3.setDefaultLevels(Arrays.asList("FATAL", "ERROR", "WARN", "INFO"));
+    logLevelFilter3.setOverrideLevels(Arrays.asList("FATAL", "ERROR", "WARN", "INFO", "DEBUG", "TRACE"));
+    logLevelFilter3.setExpiryTime(DateUtils.addDays(new Date(), -1));
+    
+    LogLevelFilterHandler h = new LogLevelFilterHandler();
+    h.setLogLevelFilter("configured_log_file1", logLevelFilter1);
+    h.setLogLevelFilter("configured_log_file2", logLevelFilter2);
+    h.setLogLevelFilter("configured_log_file3", logLevelFilter3);
   }
   
   @Test
   public void testLogConfigHandler_auditAllowed() throws Exception {
-    assertTrue(FilterLogData.INSTANCE.isAllowed("{'host':'host1', 'type':'configured_log_file', 'level':'DEBUG'}",
+    assertTrue(FilterLogData.INSTANCE.isAllowed("{'host':'host1', 'type':'configured_log_file1', 'level':'DEBUG'}",
         inputMarkerAudit));
   }
   
@@ -109,19 +106,25 @@
   
   @Test
   public void testLogConfigHandler_notConfiguredLogAllowed() throws Exception {
-    assertTrue(FilterLogData.INSTANCE.isAllowed("{'host':'host1', 'type':'not_configured_log_file', 'level':'INFO'}",
+    assertTrue(FilterLogData.INSTANCE.isAllowed("{'host':'host1', 'type':'not_configured_log_file1', 'level':'WARN'}",
+        inputMarkerService));
+  }
+  
+  @Test
+  public void testLogConfigHandler_notConfiguredLogNotAllowed() throws Exception {
+    assertFalse(FilterLogData.INSTANCE.isAllowed("{'host':'host1', 'type':'not_configured_log_file1', 'level':'TRACE'}",
         inputMarkerService));
   }
   
   @Test
   public void testLogConfigHandler_configuredDataAllow() throws Exception {
-    assertTrue(FilterLogData.INSTANCE.isAllowed("{'host':'host1', 'type':'configured_log_file', 'level':'INFO'}",
+    assertTrue(FilterLogData.INSTANCE.isAllowed("{'host':'host1', 'type':'configured_log_file1', 'level':'INFO'}",
         inputMarkerService));
   }
   
   @Test
   public void testLogConfigHandler_configuredDataDontAllow() throws Exception {
-    assertFalse(FilterLogData.INSTANCE.isAllowed("{'host':'host1', 'type':'configured_log_file', 'level':'DEBUG'}",
+    assertFalse(FilterLogData.INSTANCE.isAllowed("{'host':'host1', 'type':'configured_log_file1', 'level':'DEBUG'}",
         inputMarkerService));
   }
   
@@ -142,9 +145,4 @@
     assertFalse(FilterLogData.INSTANCE.isAllowed("{'host':'host1', 'type':'configured_log_file3', 'level':'DEBUG'}",
         inputMarkerService));
   }
-  
-  @AfterClass
-  public static void finish() {
-    verify(mockFetcher);
-  }
 }
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperDateTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperDateTest.java
index 8beecda..0a0a9fd 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperDateTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperDateTest.java
@@ -25,6 +25,7 @@
 import java.util.Map;
 
 import org.apache.ambari.logfeeder.common.LogFeederConstants;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.MapDateDescriptorImpl;
 import org.apache.commons.lang3.time.DateUtils;
 import org.apache.log4j.Logger;
 import org.junit.Test;
@@ -40,11 +41,11 @@
   public void testMapperDate_epoch() {
     LOG.info("testMapperDate_epoch()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
-    mapConfigs.put("target_date_pattern", "epoch");
+    MapDateDescriptorImpl mapDateDescriptor = new MapDateDescriptorImpl();
+    mapDateDescriptor.setTargetDatePattern("epoch");
 
     MapperDate mapperDate = new MapperDate();
-    assertTrue("Could not initialize!", mapperDate.init(null, "someField", null, mapConfigs));
+    assertTrue("Could not initialize!", mapperDate.init(null, "someField", null, mapDateDescriptor));
 
     Map<String, Object> jsonObj = new HashMap<>();
 
@@ -61,11 +62,11 @@
   public void testMapperDate_pattern() throws Exception {
     LOG.info("testMapperDate_pattern()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
-    mapConfigs.put("target_date_pattern", "yyyy-MM-dd HH:mm:ss.SSS");
+    MapDateDescriptorImpl mapDateDescriptor = new MapDateDescriptorImpl();
+    mapDateDescriptor.setTargetDatePattern("yyyy-MM-dd HH:mm:ss.SSS");
 
     MapperDate mapperDate = new MapperDate();
-    assertTrue("Could not initialize!", mapperDate.init(null, "someField", null, mapConfigs));
+    assertTrue("Could not initialize!", mapperDate.init(null, "someField", null, mapDateDescriptor));
 
     Map<String, Object> jsonObj = new HashMap<>();
     String dateString = "2016-04-08 15:55:23.548";
@@ -80,44 +81,35 @@
   }
 
   @Test
-  public void testMapperDate_configNotMap() {
-    LOG.info("testMapperDate_configNotMap()");
-
-    MapperDate mapperDate = new MapperDate();
-    assertFalse("Was able to initialize!", mapperDate.init(null, "someField", null, ""));
-  }
-
-  @Test
   public void testMapperDate_noDatePattern() {
     LOG.info("testMapperDate_noDatePattern()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
-    mapConfigs.put("some_param", "some_value");
+    MapDateDescriptorImpl mapDateDescriptor = new MapDateDescriptorImpl();
 
     MapperDate mapperDate = new MapperDate();
-    assertFalse("Was able to initialize!", mapperDate.init(null, "someField", null, mapConfigs));
+    assertFalse("Was not able to initialize!", mapperDate.init(null, "someField", null, mapDateDescriptor));
   }
 
   @Test
   public void testMapperDate_notParsableDatePattern() {
     LOG.info("testMapperDate_notParsableDatePattern()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
-    mapConfigs.put("target_date_pattern", "not_parsable_content");
+    MapDateDescriptorImpl mapDateDescriptor = new MapDateDescriptorImpl();
+    mapDateDescriptor.setTargetDatePattern("not_parsable_content");
 
     MapperDate mapperDate = new MapperDate();
-    assertFalse("Was able to initialize!", mapperDate.init(null, "someField", null, mapConfigs));
+    assertFalse("Was not able to initialize!", mapperDate.init(null, "someField", null, mapDateDescriptor));
   }
 
   @Test
   public void testMapperDate_invalidEpochValue() {
     LOG.info("testMapperDate_invalidEpochValue()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
-    mapConfigs.put("target_date_pattern", "epoch");
+    MapDateDescriptorImpl mapDateDescriptor = new MapDateDescriptorImpl();
+    mapDateDescriptor.setTargetDatePattern("epoch");
 
     MapperDate mapperDate = new MapperDate();
-    assertTrue("Could not initialize!", mapperDate.init(null, "someField", null, mapConfigs));
+    assertTrue("Could not initialize!", mapperDate.init(null, "someField", null, mapDateDescriptor));
 
     Map<String, Object> jsonObj = new HashMap<>();
     String invalidValue = "abc";
@@ -131,11 +123,11 @@
   public void testMapperDate_invalidDateStringValue() {
     LOG.info("testMapperDate_invalidDateStringValue()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
-    mapConfigs.put("target_date_pattern", "yyyy-MM-dd HH:mm:ss.SSS");
+    MapDateDescriptorImpl mapDateDescriptor = new MapDateDescriptorImpl();
+    mapDateDescriptor.setTargetDatePattern("yyyy-MM-dd HH:mm:ss.SSS");
 
     MapperDate mapperDate = new MapperDate();
-    assertTrue("Could not initialize!", mapperDate.init(null, "someField", null, mapConfigs));
+    assertTrue("Could not initialize!", mapperDate.init(null, "someField", null, mapDateDescriptor));
 
     Map<String, Object> jsonObj = new HashMap<>();
     String invalidValue = "abc";
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldCopyTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldCopyTest.java
index 108c96e..4899dfc 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldCopyTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldCopyTest.java
@@ -21,6 +21,7 @@
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.MapFieldCopyDescriptorImpl;
 import org.apache.log4j.Logger;
 import org.junit.Test;
 
@@ -35,11 +36,11 @@
   public void testMapperFieldCopy_copyField() {
     LOG.info("testMapperFieldCopy_copyField()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
-    mapConfigs.put("copy_name", "someOtherField");
+    MapFieldCopyDescriptorImpl mapFieldCopyDescriptor = new MapFieldCopyDescriptorImpl();
+    mapFieldCopyDescriptor.setCopyName("someOtherField");
 
     MapperFieldCopy mapperFieldCopy = new MapperFieldCopy();
-    assertTrue("Could not initialize!", mapperFieldCopy.init(null, "someField", null, mapConfigs));
+    assertTrue("Could not initialize!", mapperFieldCopy.init(null, "someField", null, mapFieldCopyDescriptor));
 
     Map<String, Object> jsonObj = new HashMap<>();
     jsonObj.put("someField", "someValue");
@@ -52,20 +53,12 @@
   }
 
   @Test
-  public void testMapperFielCopy_configNotMap() {
-    LOG.info("testMapperFieldCopy_configNotMap()");
-
-    MapperFieldCopy mapperFieldCopy = new MapperFieldCopy();
-    assertFalse("Was able to initialize!", mapperFieldCopy.init(null, "someField", null, ""));
-  }
-
-  @Test
   public void testMapperFieldCopy_noNewFieldName() {
     LOG.info("testMapperFieldCopy_noNewFieldName()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
+    MapFieldCopyDescriptorImpl mapFieldCopyDescriptor = new MapFieldCopyDescriptorImpl();
 
     MapperFieldCopy mapperFieldCopy = new MapperFieldCopy();
-    assertFalse("Was able to initialize!", mapperFieldCopy.init(null, "someField", null, mapConfigs));
+    assertFalse("Was not able to initialize!", mapperFieldCopy.init(null, "someField", null, mapFieldCopyDescriptor));
   }
 }
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldNameTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldNameTest.java
index 8ecaad1..74b88fc 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldNameTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldNameTest.java
@@ -21,6 +21,7 @@
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.MapFieldNameDescriptorImpl;
 import org.apache.log4j.Logger;
 import org.junit.Test;
 
@@ -35,11 +36,11 @@
   public void testMapperFieldName_replaceField() {
     LOG.info("testMapperFieldName_replaceField()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
-    mapConfigs.put("new_fieldname", "someOtherField");
+    MapFieldNameDescriptorImpl mapFieldNameDescriptor = new MapFieldNameDescriptorImpl();
+    mapFieldNameDescriptor.setNewFieldName("someOtherField");
 
     MapperFieldName mapperFieldName = new MapperFieldName();
-    assertTrue("Could not initialize!", mapperFieldName.init(null, "someField", null, mapConfigs));
+    assertTrue("Could not initialize!", mapperFieldName.init(null, "someField", null, mapFieldNameDescriptor));
 
     Map<String, Object> jsonObj = new HashMap<>();
     jsonObj.put("someField", "someValue");
@@ -52,20 +53,12 @@
   }
 
   @Test
-  public void testMapperFieldName_configNotMap() {
-    LOG.info("testMapperFieldName_configNotMap()");
-
-    MapperFieldName mapperFieldName = new MapperFieldName();
-    assertFalse("Was able to initialize!", mapperFieldName.init(null, "someField", null, ""));
-  }
-
-  @Test
   public void testMapperFieldName_noNewFieldName() {
     LOG.info("testMapperFieldName_noNewFieldName()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
+    MapFieldNameDescriptorImpl mapFieldNameDescriptor = new MapFieldNameDescriptorImpl();
 
     MapperFieldName mapperFieldName = new MapperFieldName();
-    assertFalse("Was able to initialize!", mapperFieldName.init(null, "someField", null, mapConfigs));
+    assertFalse("Was able to initialize!", mapperFieldName.init(null, "someField", null, mapFieldNameDescriptor));
   }
 }
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldValueTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldValueTest.java
index fce4308..1a33740 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldValueTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/mapper/MapperFieldValueTest.java
@@ -21,6 +21,7 @@
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.MapFieldValueDescriptorImpl;
 import org.apache.log4j.Logger;
 import org.junit.Test;
 
@@ -35,12 +36,12 @@
   public void testMapperFieldValue_replaceValue() {
     LOG.info("testMapperFieldValue_replaceValue()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
-    mapConfigs.put("pre_value", "someValue");
-    mapConfigs.put("post_value", "someOtherValue");
+    MapFieldValueDescriptorImpl mapFieldValueDescriptor = new MapFieldValueDescriptorImpl();
+    mapFieldValueDescriptor.setPreValue("someValue");
+    mapFieldValueDescriptor.setPostValue("someOtherValue");
 
     MapperFieldValue mapperFieldValue = new MapperFieldValue();
-    assertTrue("Could not initialize!", mapperFieldValue.init(null, "someField", null, mapConfigs));
+    assertTrue("Could not initialize!", mapperFieldValue.init(null, "someField", null, mapFieldValueDescriptor));
 
     Map<String, Object> jsonObj = new HashMap<>();
 
@@ -52,33 +53,25 @@
   }
 
   @Test
-  public void testMapperFieldValue_configNotMap() {
-    LOG.info("testMapperFieldValue_configNotMap()");
-
-    MapperFieldValue mapperFieldValue = new MapperFieldValue();
-    assertFalse("Was able to initialize!", mapperFieldValue.init(null, "someField", null, ""));
-  }
-
-  @Test
   public void testMapperFieldValue_noPostValue() {
     LOG.info("testMapperFieldValue_noPostValue()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
+    MapFieldValueDescriptorImpl mapFieldValueDescriptor = new MapFieldValueDescriptorImpl();
 
     MapperFieldValue mapperFieldValue = new MapperFieldValue();
-    assertFalse("Was able to initialize!", mapperFieldValue.init(null, "someField", null, mapConfigs));
+    assertFalse("Was not able to initialize!", mapperFieldValue.init(null, "someField", null, mapFieldValueDescriptor));
   }
 
   @Test
   public void testMapperFieldValue_noPreValueFound() {
     LOG.info("testMapperFieldValue_noPreValueFound()");
 
-    Map<String, Object> mapConfigs = new HashMap<>();
-    mapConfigs.put("pre_value", "someValue");
-    mapConfigs.put("post_value", "someOtherValue");
+    MapFieldValueDescriptorImpl mapFieldValueDescriptor = new MapFieldValueDescriptorImpl();
+    mapFieldValueDescriptor.setPreValue("someValue");
+    mapFieldValueDescriptor.setPostValue("someOtherValue");
 
     MapperFieldValue mapperFieldValue = new MapperFieldValue();
-    assertTrue("Could not initialize!", mapperFieldValue.init(null, "someField", null, mapConfigs));
+    assertTrue("Could not initialize!", mapperFieldValue.init(null, "someField", null, mapFieldValueDescriptor));
 
     Map<String, Object> jsonObj = new HashMap<>();
 
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputLineFilterTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputLineFilterTest.java
index 1ccc319..6e108ab 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputLineFilterTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputLineFilterTest.java
@@ -21,6 +21,8 @@
 import org.apache.ambari.logfeeder.common.LogFeederConstants;
 import org.apache.ambari.logfeeder.input.Input;
 import org.apache.ambari.logfeeder.input.cache.LRUCache;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputDescriptorImpl;
 import org.easymock.EasyMock;
 import org.junit.Before;
 import org.junit.Test;
@@ -49,7 +51,7 @@
   public void testApplyWithFilterOutByDedupInterval() {
     // GIVEN
     EasyMock.expect(inputMock.getCache()).andReturn(createLruCache(DEFAULT_DUMMY_MESSAGE, 100L, false));
-    EasyMock.expect(inputMock.getConfigs()).andReturn(generateInputConfigs());
+    EasyMock.expect(inputMock.getInputDescriptor()).andReturn(generateInputDescriptor());
     EasyMock.expect(inputMock.getCacheKeyField()).andReturn(CACHE_KEY_FIELD);
     EasyMock.replay(inputMock);
     // WHEN
@@ -63,7 +65,7 @@
   public void testApplyDoNotFilterOutDataByDedupInterval() {
     // GIVEN
     EasyMock.expect(inputMock.getCache()).andReturn(createLruCache(DEFAULT_DUMMY_MESSAGE, 10L, false));
-    EasyMock.expect(inputMock.getConfigs()).andReturn(generateInputConfigs());
+    EasyMock.expect(inputMock.getInputDescriptor()).andReturn(generateInputDescriptor());
     EasyMock.expect(inputMock.getCacheKeyField()).andReturn(CACHE_KEY_FIELD);
     EasyMock.replay(inputMock);
     // WHEN
@@ -77,7 +79,7 @@
   public void testApplyWithFilterOutByDedupLast() {
     // GIVEN
     EasyMock.expect(inputMock.getCache()).andReturn(createLruCache(DEFAULT_DUMMY_MESSAGE, 10L, true));
-    EasyMock.expect(inputMock.getConfigs()).andReturn(generateInputConfigs());
+    EasyMock.expect(inputMock.getInputDescriptor()).andReturn(generateInputDescriptor());
     EasyMock.expect(inputMock.getCacheKeyField()).andReturn(CACHE_KEY_FIELD);
     EasyMock.replay(inputMock);
     // WHEN
@@ -91,7 +93,7 @@
   public void testApplyDoNotFilterOutDataByDedupLast() {
     // GIVEN
     EasyMock.expect(inputMock.getCache()).andReturn(createLruCache("myMessage2", 10L, true));
-    EasyMock.expect(inputMock.getConfigs()).andReturn(generateInputConfigs());
+    EasyMock.expect(inputMock.getInputDescriptor()).andReturn(generateInputDescriptor());
     EasyMock.expect(inputMock.getCacheKeyField()).andReturn(CACHE_KEY_FIELD);
     EasyMock.replay(inputMock);
     // WHEN
@@ -117,7 +119,7 @@
   public void testApplyWithoutInMemoryTimestamp() {
     // GIVEN
     EasyMock.expect(inputMock.getCache()).andReturn(createLruCache(DEFAULT_DUMMY_MESSAGE, 100L, true));
-    EasyMock.expect(inputMock.getConfigs()).andReturn(generateInputConfigs());
+    EasyMock.expect(inputMock.getInputDescriptor()).andReturn(generateInputDescriptor());
     EasyMock.expect(inputMock.getCacheKeyField()).andReturn(CACHE_KEY_FIELD);
     EasyMock.replay(inputMock);
     Map<String, Object> lineMap = generateLineMap();
@@ -133,7 +135,7 @@
   public void testApplyWithoutLogMessage() {
     // GIVEN
     EasyMock.expect(inputMock.getCache()).andReturn(createLruCache(DEFAULT_DUMMY_MESSAGE, 100L, true));
-    EasyMock.expect(inputMock.getConfigs()).andReturn(generateInputConfigs());
+    EasyMock.expect(inputMock.getInputDescriptor()).andReturn(generateInputDescriptor());
     EasyMock.expect(inputMock.getCacheKeyField()).andReturn(CACHE_KEY_FIELD);
     EasyMock.replay(inputMock);
     Map<String, Object> lineMap = generateLineMap();
@@ -152,10 +154,10 @@
     return lineMap;
   }
 
-  private Map<String, Object> generateInputConfigs() {
-    Map<String, Object> inputConfigs = new HashMap<>();
-    inputConfigs.put(LogFeederConstants.ROW_TYPE, "service");
-    return inputConfigs;
+  private InputDescriptor generateInputDescriptor() {
+    InputDescriptorImpl inputDescriptor = new InputDescriptorImpl() {};
+    inputDescriptor.setRowtype("service");
+    return inputDescriptor;
   }
 
   private LRUCache createLruCache(String defaultKey, long defaultValue, boolean lastDedupEanabled) {
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputManagerTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputManagerTest.java
index 0a0a195..5abb720 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputManagerTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputManagerTest.java
@@ -32,30 +32,28 @@
 import org.apache.ambari.logfeeder.input.Input;
 import org.apache.ambari.logfeeder.input.InputMarker;
 import org.apache.ambari.logfeeder.metrics.MetricData;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputDescriptorImpl;
 import org.junit.Test;
 
 public class OutputManagerTest {
 
   @Test
-  public void testOutputManager_addAndRemoveOutputs() {
+  public void testOutputManager_addOutputs() {
     Output output1 = strictMock(Output.class);
     Output output2 = strictMock(Output.class);
     Output output3 = strictMock(Output.class);
-    Output output4 = strictMock(Output.class);
     
-    replay(output1, output2, output3, output4);
+    replay(output1, output2, output3);
     
     OutputManager manager = new OutputManager();
     manager.add(output1);
     manager.add(output2);
     manager.add(output3);
     
-    manager.retainUsedOutputs(Arrays.asList(output1, output2, output4));
-    
-    verify(output1, output2, output3, output4);
+    verify(output1, output2, output3);
     
     List<Output> outputs = manager.getOutputs();
-    assertEquals(outputs.size(), 2);
+    assertEquals(outputs.size(), 3);
     assertEquals(outputs.get(0), output1);
     assertEquals(outputs.get(1), output2);
   }
@@ -94,15 +92,17 @@
     
     Input mockInput = strictMock(Input.class);
     InputMarker inputMarker = new InputMarker(mockInput, null, 0);
+    InputDescriptorImpl inputDescriptor = new InputDescriptorImpl() {};
+    inputDescriptor.setAddFields(Collections.<String, String> emptyMap());
     
     Output output1 = strictMock(Output.class);
     Output output2 = strictMock(Output.class);
     Output output3 = strictMock(Output.class);
     
-    expect(mockInput.getContextFields()).andReturn(Collections.<String, String> emptyMap());
+    expect(mockInput.getInputDescriptor()).andReturn(inputDescriptor);
     expect(mockInput.isUseEventMD5()).andReturn(false);
     expect(mockInput.isGenEventMD5()).andReturn(false);
-    expect(mockInput.getConfigs()).andReturn(Collections.<String, Object> emptyMap());
+    expect(mockInput.getInputDescriptor()).andReturn(inputDescriptor);
     expect(mockInput.getCache()).andReturn(null);
     expect(mockInput.getOutputList()).andReturn(Arrays.asList(output1, output2, output3));
 
@@ -128,12 +128,13 @@
     
     Input mockInput = strictMock(Input.class);
     InputMarker inputMarker = new InputMarker(mockInput, null, 0);
+    InputDescriptorImpl inputDescriptor = new InputDescriptorImpl() {};
     
     Output output1 = strictMock(Output.class);
     Output output2 = strictMock(Output.class);
     Output output3 = strictMock(Output.class);
     
-    expect(mockInput.getConfigs()).andReturn(Collections.<String, Object> emptyMap());
+    expect(mockInput.getInputDescriptor()).andReturn(inputDescriptor);
     expect(mockInput.getOutputList()).andReturn(Arrays.asList(output1, output2, output3));
     
     output1.write(jsonString, inputMarker); expectLastCall();
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputS3FileTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputS3FileTest.java
index 1872135..7c6aca2 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputS3FileTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/output/OutputS3FileTest.java
@@ -22,6 +22,7 @@
 import org.apache.ambari.logfeeder.input.InputMarker;
 import org.apache.ambari.logfeeder.output.spool.LogSpooler;
 import org.apache.ambari.logfeeder.output.spool.LogSpoolerContext;
+import org.apache.ambari.logsearch.config.zookeeper.model.inputconfig.impl.InputDescriptorImpl;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -33,7 +34,6 @@
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
-
 public class OutputS3FileTest {
 
   private Map<String, Object> configMap;
@@ -71,8 +71,11 @@
 
     Input input = mock(Input.class);
     InputMarker inputMarker = new InputMarker(input, null, 0);
+    InputDescriptorImpl inputDescriptor = new InputDescriptorImpl() {};
+    inputDescriptor.setType("hdfs-namenode");
+    
     expect(input.getFilePath()).andReturn("/var/log/hdfs-namenode.log");
-    expect(input.getStringValue(OutputS3File.INPUT_ATTRIBUTE_TYPE)).andReturn("hdfs-namenode");
+    expect(input.getInputDescriptor()).andReturn(inputDescriptor);
     final LogSpooler spooler = mock(LogSpooler.class);
     spooler.add("log event block");
     final S3Uploader s3Uploader = mock(S3Uploader.class);
@@ -99,8 +102,11 @@
   public void shouldReuseSpoolerForSamePath() throws Exception {
     Input input = mock(Input.class);
     InputMarker inputMarker = new InputMarker(input, null, 0);
+    InputDescriptorImpl inputDescriptor = new InputDescriptorImpl() {};
+    inputDescriptor.setType("hdfs-namenode");
+    
     expect(input.getFilePath()).andReturn("/var/log/hdfs-namenode.log");
-    expect(input.getStringValue(OutputS3File.INPUT_ATTRIBUTE_TYPE)).andReturn("hdfs-namenode");
+    expect(input.getInputDescriptor()).andReturn(inputDescriptor);
     final LogSpooler spooler = mock(LogSpooler.class);
     spooler.add("log event block1");
     spooler.add("log event block2");
@@ -169,8 +175,11 @@
   public void shouldUploadFileOnRollover() throws Exception {
     Input input = mock(Input.class);
     InputMarker inputMarker = new InputMarker(input, null, 0);
+    InputDescriptorImpl inputDescriptor = new InputDescriptorImpl() {};
+    inputDescriptor.setType("hdfs-namenode");
+    
     expect(input.getFilePath()).andReturn("/var/log/hdfs-namenode.log");
-    expect(input.getStringValue(OutputS3File.INPUT_ATTRIBUTE_TYPE)).andReturn("hdfs-namenode");
+    expect(input.getInputDescriptor()).andReturn(inputDescriptor);
     final LogSpooler spooler = mock(LogSpooler.class);
     spooler.add("log event block1");
     final S3Uploader s3Uploader = mock(S3Uploader.class);
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/resources/logfeeder.properties b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/resources/logfeeder.properties
index 59020cc..19027d1 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/resources/logfeeder.properties
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/resources/logfeeder.properties
@@ -17,4 +17,5 @@
 logfeeder.solr.config.interval=5
 logfeeder.solr.zk_connect_string=some_connect_string
 logfeeder.metrics.collector.hosts=some_collector_host
-node.hostname=test_host_name
\ No newline at end of file
+node.hostname=test_host_name
+logfeeder.include.default.level=FATAL,ERROR,WARN
\ No newline at end of file
diff --git a/ambari-logsearch/ambari-logsearch-server/pom.xml b/ambari-logsearch/ambari-logsearch-server/pom.xml
index 52bda8d..b505c12 100755
--- a/ambari-logsearch/ambari-logsearch-server/pom.xml
+++ b/ambari-logsearch/ambari-logsearch-server/pom.xml
@@ -1,4 +1,3 @@
-<?xml version="1.0"?>
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -573,6 +572,18 @@
           <groupId>com.sun.jersey</groupId>
           <artifactId>jersey-server</artifactId>
         </exclusion>
+        <exclusion>
+          <groupId>org.apache.curator</groupId>
+          <artifactId>curator-framework</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.curator</groupId>
+          <artifactId>curator-client</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.curator</groupId>
+          <artifactId>curator-recipes</artifactId>
+        </exclusion>
       </exclusions>
     </dependency>
     <dependency>
@@ -587,6 +598,11 @@
     </dependency>
     <dependency>
       <groupId>org.apache.ambari</groupId>
+      <artifactId>ambari-logsearch-config-zookeeper</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.ambari</groupId>
       <artifactId>ambari-metrics-common</artifactId>
       <version>${project.version}</version>
     </dependency>
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/LogSearch.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/LogSearch.java
index b75da0e..45281b6 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/LogSearch.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/LogSearch.java
@@ -82,6 +82,7 @@
   public void run(String[] argv) throws Exception {
     SSLUtil.ensureStorePasswords();
     SSLUtil.loadKeystore();
+    
     Server server = buildSever(argv);
     HandlerList handlers = new HandlerList();
     handlers.addHandler(createSwaggerContext());
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/common/MessageEnums.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/common/MessageEnums.java
index 4f1725f..9dd8b34 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/common/MessageEnums.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/common/MessageEnums.java
@@ -33,7 +33,8 @@
   ZNODE_NOT_READY("logsearch.zk.znode.error", "ZNode is not available."),
   ZK_CONFIG_NOT_READY("logsearch.zk.config.error", "Collection configuration has not uploaded yet"),
   SOLR_COLLECTION_NOT_READY("logsearch.solr.collection.error", "Solr has not accessible yet for collection."),
-
+  CONFIGURATION_NOT_AVAILABLE("logsearch.config.not_available", "Log Search configuration is not available"),
+  
   // Common Validations
   INVALID_PASSWORD("logsearch.validation.invalid_password", "Invalid password"),
   INVALID_INPUT_DATA("logsearch.validation.invalid_input_data", "Invalid input data"),
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/common/PropertiesHelper.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/common/PropertiesHelper.java
index 73a43ad..a2a7f5e 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/common/PropertiesHelper.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/common/PropertiesHelper.java
@@ -40,6 +40,9 @@
   private static final String LOGSEARCH_PROP_FILE="logsearch.properties";
   
   private static Map<String, String> propertiesMap;
+  public static Map<String, String> getProperties() {
+    return propertiesMap;
+  }
 
   private PropertiesHelper() {
   }
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/conf/SecurityConfig.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/conf/SecurityConfig.java
index 2f9cba4..973dc4b 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/conf/SecurityConfig.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/conf/SecurityConfig.java
@@ -19,6 +19,8 @@
 package org.apache.ambari.logsearch.conf;
 
 import com.google.common.collect.Lists;
+
+import org.apache.ambari.logsearch.conf.global.LogSearchConfigState;
 import org.apache.ambari.logsearch.conf.global.SolrCollectionState;
 import org.apache.ambari.logsearch.web.authenticate.LogsearchAuthFailureHandler;
 import org.apache.ambari.logsearch.web.authenticate.LogsearchAuthSuccessHandler;
@@ -26,6 +28,7 @@
 import org.apache.ambari.logsearch.web.filters.LogsearchAuditLogsStateFilter;
 import org.apache.ambari.logsearch.web.filters.LogsearchAuthenticationEntryPoint;
 import org.apache.ambari.logsearch.web.filters.LogsearchCorsFilter;
+import org.apache.ambari.logsearch.web.filters.LogSearchConfigStateFilter;
 import org.apache.ambari.logsearch.web.filters.LogsearchKRBAuthenticationFilter;
 import org.apache.ambari.logsearch.web.filters.LogsearchJWTFilter;
 import org.apache.ambari.logsearch.web.filters.LogsearchSecurityContextFormationFilter;
@@ -82,6 +85,9 @@
   @Named("solrUserConfigState")
   private SolrCollectionState solrUserConfigState;
 
+  @Inject
+  private LogSearchConfigState logSearchConfigState;
+
   @Override
   protected void configure(HttpSecurity http) throws Exception {
     http
@@ -108,6 +114,7 @@
       .addFilterAfter(logsearchUserConfigFilter(), LogsearchSecurityContextFormationFilter.class)
       .addFilterAfter(logsearchAuditLogFilter(), LogsearchSecurityContextFormationFilter.class)
       .addFilterAfter(logsearchServiceLogFilter(), LogsearchSecurityContextFormationFilter.class)
+      .addFilterAfter(logSearchConfigStateFilter(), LogsearchSecurityContextFormationFilter.class)
       .addFilterBefore(corsFilter(), LogsearchSecurityContextFormationFilter.class)
       .addFilterBefore(logsearchJwtFilter(), LogsearchSecurityContextFormationFilter.class)
       .logout()
@@ -175,6 +182,10 @@
   public LogsearchUserConfigStateFilter logsearchUserConfigFilter() {
     return new LogsearchUserConfigStateFilter(userConfigRequestMatcher(), solrUserConfigState, solrUserPropsConfig);
   }
+  
+  public LogSearchConfigStateFilter logSearchConfigStateFilter() {
+    return new LogSearchConfigStateFilter(logsearchConfigRequestMatcher(), logSearchConfigState);
+  }
 
   @Bean
   public RequestMatcher requestMatcher() {
@@ -206,4 +217,8 @@
     return new AntPathRequestMatcher("/api/v1/userconfig/**");
   }
 
+  public RequestMatcher logsearchConfigRequestMatcher() {
+    return new AntPathRequestMatcher("/api/v1/shipper/**");
+  }
+
 }
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/conf/global/LogSearchConfigState.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/conf/global/LogSearchConfigState.java
new file mode 100644
index 0000000..7ca701d
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/conf/global/LogSearchConfigState.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.conf.global;
+
+import javax.inject.Named;
+
+@Named
+public class LogSearchConfigState {
+  private volatile boolean logSearchConfigAvailable;
+
+  public boolean isLogSearchConfigAvailable() {
+    return logSearchConfigAvailable;
+  }
+
+  public void setLogSearchConfigAvailable(boolean logSearchConfigAvailable) {
+    this.logSearchConfigAvailable = logSearchConfigAvailable;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/Configurer.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/Configurer.java
new file mode 100644
index 0000000..141299c
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/Configurer.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.configurer;
+
+interface Configurer {
+  void start();
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/LogSearchConfigConfigurer.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/LogSearchConfigConfigurer.java
new file mode 100644
index 0000000..978e91a
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/LogSearchConfigConfigurer.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.configurer;
+
+import javax.inject.Inject;
+import javax.inject.Named;
+
+import org.apache.ambari.logsearch.common.PropertiesHelper;
+import org.apache.ambari.logsearch.conf.global.LogSearchConfigState;
+import org.apache.ambari.logsearch.config.api.LogSearchConfig;
+import org.apache.ambari.logsearch.config.api.LogSearchConfigFactory;
+import org.apache.ambari.logsearch.config.api.LogSearchConfig.Component;
+import org.apache.ambari.logsearch.config.zookeeper.LogSearchConfigZK;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Named
+public class LogSearchConfigConfigurer implements Configurer {
+  private static final Logger logger = LoggerFactory.getLogger(LogSearchConfigConfigurer.class);
+  
+  private static final int RETRY_INTERVAL_SECONDS = 10;
+  
+  private static LogSearchConfig logSearchConfig;
+  public static LogSearchConfig getConfig() {
+    return logSearchConfig;
+  }
+  
+  @Inject
+  private LogSearchConfigState logSearchConfigState;
+  
+  public void start() {
+    Thread setupThread = new Thread("setup_logsearch_config") {
+      @Override
+      public void run() {
+        logger.info("Started thread to set up log search config");
+        while (true) {
+          try {
+            logSearchConfig = LogSearchConfigFactory.createLogSearchConfig(Component.SERVER, PropertiesHelper.getProperties(),
+                LogSearchConfigZK.class);
+            logSearchConfigState.setLogSearchConfigAvailable(true);
+            break;
+          } catch (Exception e) {
+            logger.warn("Could not initialize Log Search config, going to sleep for " + RETRY_INTERVAL_SECONDS + " seconds ", e);
+            try { Thread.sleep(RETRY_INTERVAL_SECONDS * 1000); } catch (Exception e2) {/* ignore */}
+          }
+        }
+      }
+    };
+    setupThread.setDaemon(true);
+    setupThread.start();
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/LogfeederFilterConfigurer.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/LogfeederFilterConfigurer.java
deleted file mode 100644
index 34e1bec..0000000
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/LogfeederFilterConfigurer.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.ambari.logsearch.configurer;
-
-import org.apache.ambari.logsearch.conf.SolrPropsConfig;
-import org.apache.ambari.logsearch.conf.global.SolrCollectionState;
-import org.apache.ambari.logsearch.dao.UserConfigSolrDao;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class LogfeederFilterConfigurer implements SolrConfigurer {
-
-  private static final Logger LOG = LoggerFactory.getLogger(LogfeederFilterConfigurer.class);
-
-  private static final int SETUP_RETRY_SECOND = 10;
-
-  private final UserConfigSolrDao userConfigSolrDao;
-
-  public LogfeederFilterConfigurer(final UserConfigSolrDao userConfigSolrDao) {
-    this.userConfigSolrDao = userConfigSolrDao;
-  }
-
-  @Override
-  public void start() {
-    final SolrPropsConfig solrPropsConfig = userConfigSolrDao.getSolrPropsConfig();
-    final SolrCollectionState state = userConfigSolrDao.getSolrCollectionState();
-    Thread setupFiltersThread = new Thread("logfeeder_filter_setup") {
-      @Override
-      public void run() {
-        LOG.info("logfeeder_filter_setup thread started (to upload logfeeder config)");
-        while (true) {
-          int retryCount = 0;
-          try {
-            retryCount++;
-            Thread.sleep(SETUP_RETRY_SECOND * 1000);
-            if (state.isSolrCollectionReady()) {
-              LOG.info("Tries to initialize logfeeder filters in '{}' collection", solrPropsConfig.getCollection());
-              userConfigSolrDao.getUserFilter();
-              break;
-            }
-          } catch (Exception e) {
-            LOG.error("Not able to save logfeeder filter while initialization, retryCount=" + retryCount, e);
-          }
-        }
-      }
-    };
-    setupFiltersThread.setDaemon(true);
-    setupFiltersThread.start();
-  }
-}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/SolrAuditAliasConfigurer.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/SolrAuditAliasConfigurer.java
index c80a10d..1eca94b 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/SolrAuditAliasConfigurer.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/SolrAuditAliasConfigurer.java
@@ -36,7 +36,7 @@
 import java.util.Collection;
 import java.util.List;
 
-public class SolrAuditAliasConfigurer implements SolrConfigurer {
+public class SolrAuditAliasConfigurer implements Configurer {
 
   private static final Logger LOG = LoggerFactory.getLogger(SolrAuditAliasConfigurer.class);
 
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/SolrCollectionConfigurer.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/SolrCollectionConfigurer.java
index 7edc6aa..f2d022e 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/SolrCollectionConfigurer.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/SolrCollectionConfigurer.java
@@ -25,6 +25,7 @@
 import org.apache.ambari.logsearch.handler.CreateCollectionHandler;
 import org.apache.ambari.logsearch.handler.ListCollectionHandler;
 import org.apache.ambari.logsearch.handler.ReloadCollectionHandler;
+import org.apache.ambari.logsearch.handler.UpgradeSchemaHandler;
 import org.apache.ambari.logsearch.handler.UploadConfigurationHandler;
 import org.apache.commons.lang.StringUtils;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
@@ -44,7 +45,7 @@
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 
-public class SolrCollectionConfigurer implements SolrConfigurer {
+public class SolrCollectionConfigurer implements Configurer {
 
   private Logger LOG = LoggerFactory.getLogger(SolrCollectionConfigurer.class);
 
@@ -53,9 +54,11 @@
   private static final int CONNECTION_TIMEOUT = 30000;
 
   private final SolrDaoBase solrDaoBase;
+  private final boolean hasEnumConfig; // enumConfig.xml for solr collection
 
-  public SolrCollectionConfigurer(final SolrDaoBase solrDaoBase) {
+  public SolrCollectionConfigurer(final SolrDaoBase solrDaoBase, final boolean hasEnumConfig) {
     this.solrDaoBase = solrDaoBase;
+    this.hasEnumConfig = hasEnumConfig;
   }
 
   @Override
@@ -99,11 +102,12 @@
   }
 
   private boolean uploadConfigurationsIfNeeded(CloudSolrClient cloudSolrClient, File configSetFolder, SolrCollectionState state, SolrPropsConfig solrPropsConfig) throws Exception {
-    boolean reloadCollectionNeeded = new UploadConfigurationHandler(configSetFolder).handle(cloudSolrClient, solrPropsConfig);
+    boolean reloadCollectionNeeded = new UploadConfigurationHandler(configSetFolder, hasEnumConfig).handle(cloudSolrClient, solrPropsConfig);
     if (!state.isConfigurationUploaded()) {
       state.setConfigurationUploaded(true);
     }
-    return reloadCollectionNeeded;
+    boolean upgradeSchema = new UpgradeSchemaHandler(cloudSolrClient, configSetFolder).handle(cloudSolrClient, solrPropsConfig);
+    return reloadCollectionNeeded || upgradeSchema;
   }
 
   public boolean stopSetupCondition(SolrCollectionState state) {
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/SolrConfigurer.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/SolrConfigurer.java
deleted file mode 100644
index 67cb9d1..0000000
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/SolrConfigurer.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.ambari.logsearch.configurer;
-
-interface SolrConfigurer {
-  void start();
-}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/AuditSolrDao.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/AuditSolrDao.java
index d058383..3eea08f 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/AuditSolrDao.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/AuditSolrDao.java
@@ -69,7 +69,7 @@
     String rangerAuditCollection = solrAuditLogPropsConfig.getRangerCollection();
 
     try {
-      new SolrCollectionConfigurer(this).start();
+      new SolrCollectionConfigurer(this, true).start();
       boolean createAlias = (aliasNameIn != null && StringUtils.isNotBlank(rangerAuditCollection));
       if (createAlias) {
         new SolrAuditAliasConfigurer(this).start();
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/ServiceLogsSolrDao.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/ServiceLogsSolrDao.java
index 37375dc..308ef1f 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/ServiceLogsSolrDao.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/ServiceLogsSolrDao.java
@@ -65,7 +65,7 @@
   public void postConstructor() {
     LOG.info("postConstructor() called.");
     try {
-      new SolrCollectionConfigurer(this).start();
+      new SolrCollectionConfigurer(this, true).start();
     } catch (Exception e) {
       LOG.error("error while connecting to Solr for service logs : solrUrl=" + solrServiceLogPropsConfig.getSolrUrl()
         + ", zkConnectString=" + solrServiceLogPropsConfig.getZkConnectString()
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/UserConfigSolrDao.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/UserConfigSolrDao.java
index 256ddae..a0e01a3 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/UserConfigSolrDao.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/UserConfigSolrDao.java
@@ -20,45 +20,25 @@
 package org.apache.ambari.logsearch.dao;
 
 import java.io.IOException;
-import java.util.List;
-import java.util.Set;
-import java.util.TreeMap;
 
 import javax.annotation.PostConstruct;
 import javax.inject.Inject;
 import javax.inject.Named;
 
-import org.apache.ambari.logsearch.common.HadoopServiceConfigHelper;
-import org.apache.ambari.logsearch.common.LogSearchConstants;
 import org.apache.ambari.logsearch.common.LogSearchContext;
 import org.apache.ambari.logsearch.common.LogType;
 import org.apache.ambari.logsearch.conf.SolrPropsConfig;
 import org.apache.ambari.logsearch.conf.SolrUserPropsConfig;
 import org.apache.ambari.logsearch.conf.global.SolrCollectionState;
-import org.apache.ambari.logsearch.configurer.LogfeederFilterConfigurer;
 import org.apache.ambari.logsearch.configurer.SolrCollectionConfigurer;
-import org.apache.ambari.logsearch.model.common.LogFeederDataMap;
-import org.apache.ambari.logsearch.model.common.LogfeederFilterData;
-import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.client.solrj.response.UpdateResponse;
-import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.SolrDocumentList;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrInputDocument;
 
-import org.apache.ambari.logsearch.util.JSONUtil;
-import org.apache.commons.collections.CollectionUtils;
 import org.apache.log4j.Logger;
 import org.springframework.data.solr.core.SolrTemplate;
 
-import static org.apache.ambari.logsearch.solr.SolrConstants.UserConfigConstants.ID;
-import static org.apache.ambari.logsearch.solr.SolrConstants.UserConfigConstants.USER_NAME;
-import static org.apache.ambari.logsearch.solr.SolrConstants.UserConfigConstants.VALUES;
-import static org.apache.ambari.logsearch.solr.SolrConstants.UserConfigConstants.FILTER_NAME;
-import static org.apache.ambari.logsearch.solr.SolrConstants.UserConfigConstants.ROW_TYPE;
-
 @Named
 public class UserConfigSolrDao extends SolrDaoBase {
 
@@ -98,26 +78,13 @@
     String collection = solrUserConfig.getCollection();
 
     try {
-      new SolrCollectionConfigurer(this).start();
-      new LogfeederFilterConfigurer(this).start();
+      new SolrCollectionConfigurer(this, false).start();
     } catch (Exception e) {
       LOG.error("error while connecting to Solr for history logs : solrUrl=" + solrUrl + ", zkConnectString=" + zkConnectString +
           ", collection=" + collection, e);
     }
   }
 
-  public void saveUserFilter(LogFeederDataMap logfeederFilterWrapper) throws SolrException, SolrServerException, IOException {
-    String filterName = LogSearchConstants.LOGFEEDER_FILTER_NAME;
-    String json = JSONUtil.objToJson(logfeederFilterWrapper);
-    SolrInputDocument configDocument = new SolrInputDocument();
-    configDocument.addField(ID, logfeederFilterWrapper.getId());
-    configDocument.addField(ROW_TYPE, filterName);
-    configDocument.addField(VALUES, json);
-    configDocument.addField(USER_NAME, filterName);
-    configDocument.addField(FILTER_NAME, filterName);
-    addDocs(configDocument);
-  }
-
   public void deleteUserConfig(String id) throws SolrException, SolrServerException, IOException {
     removeDoc("id:" + id);
   }
@@ -138,52 +105,6 @@
     return updateResoponse;
   }
 
-  public LogFeederDataMap getUserFilter() throws SolrServerException, IOException {
-    SolrQuery solrQuery = new SolrQuery();
-    solrQuery.setQuery("*:*");
-    solrQuery.setFilterQueries(ROW_TYPE + ":" + LogSearchConstants.LOGFEEDER_FILTER_NAME);
-
-    QueryResponse response = process(solrQuery);
-    SolrDocumentList documentList = response.getResults();
-    LogFeederDataMap logfeederDataMap = null;
-    if (CollectionUtils.isNotEmpty(documentList)) {
-      SolrDocument configDoc = documentList.get(0);
-      String json = (String) configDoc.get(VALUES);
-      logfeederDataMap = (LogFeederDataMap) JSONUtil.jsonToObj(json, LogFeederDataMap.class);
-      logfeederDataMap.setId("" + configDoc.get(ID));
-    } else {
-      logfeederDataMap = new LogFeederDataMap();
-      logfeederDataMap.setFilter(new TreeMap<String, LogfeederFilterData>());
-      logfeederDataMap.setId(Long.toString(System.currentTimeMillis()));
-    }
-    
-    addMissingFilters(logfeederDataMap);
-    
-    return logfeederDataMap;
-  }
-
-  private void addMissingFilters(LogFeederDataMap logfeederDataMap) throws SolrServerException, IOException {
-    Set<String> logIds = HadoopServiceConfigHelper.getAllLogIds();
-    if (logIds != null) {
-      List<String> logfeederDefaultLevels = solrUserConfig.getLogLevels();
-      
-      boolean modified = false;
-      for (String logId : logIds) {
-        if (!logfeederDataMap.getFilter().containsKey(logId)) {
-          LogfeederFilterData logfeederFilterData = new LogfeederFilterData();
-          logfeederFilterData.setLabel(logId);
-          logfeederFilterData.setDefaultLevels(logfeederDefaultLevels);
-          logfeederDataMap.getFilter().put(logId, logfeederFilterData);
-          modified = true;
-        }
-      }
-      
-      if (modified) {
-        saveUserFilter(logfeederDataMap);
-      }
-    }
-  }
-
   @Override
   public SolrCollectionState getSolrCollectionState() {
     return solrUserConfigState;
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java
index 984e834..885771d 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java
@@ -48,6 +48,7 @@
   }
 
   public class AuditOperationDescriptions {
+    public static final String GET_AUDIT_CLUSTERS_OD = "Get all of the clusters for audit logs";
     public static final String GET_AUDIT_SCHEMA_FIELD_LIST_OD = "Get list of schema fields in audit collection";
     public static final String GET_AUDIT_LOGS_OD = "Get the list of logs details";
     public static final String PURGE_AUDIT_LOGS_OD = "Purge service logs based by criteria";
@@ -77,6 +78,7 @@
   }
 
   public class ServiceOperationDescriptions {
+    public static final String GET_SERVICE_CLUSTERS_OD = "Get all of the clusters for service logs";
     public static final String SEARCH_LOGS_OD = "Searching logs entry";
     public static final String PURGE_LOGS_OD = "Purge service logs based by criteria";
     public static final String GET_HOSTS_OD = "Get the list of service hosts currently active or having data in Solr";
@@ -111,11 +113,17 @@
     public static final String SAVE_USER_CONFIG_OD = "Save user config";
     public static final String DELETE_USER_CONFIG_OD = "Delete user config";
     public static final String GET_USER_CONFIG_OD = "Get user config";
-    public static final String GET_USER_FILTER_OD = "Get user filter";
-    public static final String UPDATE_USER_FILTER_OD = "Update user filter";
     public static final String GET_ALL_USER_NAMES_OD = "Get all user names";
   }
 
+  public class ShipperConfigOperationDescriptions {
+    public static final String GET_SERVICE_NAMES_OD = "Get service names";
+    public static final String GET_SHIPPER_CONFIG_OD = "Get shipper config";
+    public static final String SET_SHIPPER_CONFIG_OD = "Set shipper config";
+    public static final String GET_LOG_LEVEL_FILTER_OD = "Get log level filter";
+    public static final String UPDATE_LOG_LEVEL_FILTER_OD = "Update log level filter";
+  }
+
   public class StatusOperationDescriptions {
     public static final String STATUS_OD = "Get statuses for collections (not health state - show true if something already done)";
     public static final String SERVICE_LOGS_STATUS_OD = "Get statuses for service log collection (not health state - show true if something already done)";
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/AbstractSolrConfigHandler.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/AbstractSolrConfigHandler.java
new file mode 100644
index 0000000..6f3d8ca
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/AbstractSolrConfigHandler.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.handler;
+
+import org.apache.ambari.logsearch.conf.SolrPropsConfig;
+import org.apache.commons.io.FileUtils;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.ZkConfigManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.FileSystems;
+import java.nio.file.Paths;
+import java.util.UUID;
+
+public abstract class AbstractSolrConfigHandler implements SolrZkRequestHandler<Boolean> {
+
+  private static final Logger LOG = LoggerFactory.getLogger(AbstractSolrConfigHandler.class);
+
+  private File configSetFolder;
+
+  public AbstractSolrConfigHandler(File configSetFolder) {
+    this.configSetFolder = configSetFolder;
+  }
+
+  @Override
+  public Boolean handle(CloudSolrClient solrClient, SolrPropsConfig solrPropsConfig) throws Exception {
+    boolean reloadCollectionNeeded = false;
+    String separator = FileSystems.getDefault().getSeparator();
+    String downloadFolderLocation = String.format("%s%s%s%s%s", System.getProperty("java.io.tmpdir"), separator,
+      UUID.randomUUID().toString(), separator, solrPropsConfig.getConfigName());
+    solrClient.connect();
+    SolrZkClient zkClient = solrClient.getZkStateReader().getZkClient();
+    File tmpDir = new File(downloadFolderLocation);
+    try {
+      ZkConfigManager zkConfigManager = new ZkConfigManager(zkClient);
+      boolean configExists = zkConfigManager.configExists(solrPropsConfig.getConfigName());
+      if (configExists) {
+        uploadMissingConfigFiles(zkClient, zkConfigManager, solrPropsConfig.getConfigName());
+        reloadCollectionNeeded = doIfConfigExists(solrPropsConfig, zkClient, separator, downloadFolderLocation, tmpDir);
+      } else {
+        doIfConfigNotExist(solrPropsConfig, zkConfigManager);
+        uploadMissingConfigFiles(zkClient, zkConfigManager, solrPropsConfig.getConfigName());
+      }
+    } catch (Exception e) {
+      throw new RuntimeException(String.format("Cannot upload configurations to zk. (collection: %s, config set folder: %s)",
+        solrPropsConfig.getCollection(), solrPropsConfig.getConfigSetFolder()), e);
+    } finally {
+      if (tmpDir.exists()) {
+        try {
+          FileUtils.deleteDirectory(tmpDir);
+        } catch (IOException e){
+          LOG.error("Cannot delete temp directory.", e);
+        }
+      }
+    }
+    return reloadCollectionNeeded;
+  }
+
+  /**
+   * Update config file (like solrconfig.xml) to zookeeper znode of solr, contains a download location as well which can be
+   * used to determine that you need to update the configuration or not
+   */
+  public abstract boolean updateConfigIfNeeded(SolrPropsConfig solrPropsConfig, SolrZkClient zkClient, File file,
+                                               String separator, String downloadFolderLocation) throws IOException;
+
+  /**
+   * Config file name which should be uploaded to zookeeper
+   */
+  public abstract String getConfigFileName();
+
+  public void doIfConfigNotExist(SolrPropsConfig solrPropsConfig, ZkConfigManager zkConfigManager) throws IOException {
+    // Do nothing
+  };
+
+  public void uploadMissingConfigFiles(SolrZkClient zkClient, ZkConfigManager zkConfigManager, String configName) throws IOException {
+    // do Nothing
+  }
+
+  public boolean doIfConfigExists(SolrPropsConfig solrPropsConfig, SolrZkClient zkClient, String separator, String downloadFolderLocation, File tmpDir) throws IOException {
+    boolean result = false;
+    LOG.info("Config set exists for '{}' collection. Refreshing it if needed...", solrPropsConfig.getCollection());
+    if (!tmpDir.mkdirs()) {
+      LOG.error("Cannot create directories for '{}'", tmpDir.getAbsolutePath());
+    }
+    ZkConfigManager zkConfigManager = new ZkConfigManager(zkClient);
+    zkConfigManager.downloadConfigDir(solrPropsConfig.getConfigName(), Paths.get(downloadFolderLocation));
+    File[] listOfFiles = getConfigSetFolder().listFiles();
+    if (listOfFiles != null) {
+      for (File file : listOfFiles) {
+        if (file.getName().equals(getConfigFileName()) && updateConfigIfNeeded(solrPropsConfig, zkClient, file, separator, downloadFolderLocation)) {
+          result = true;
+          break;
+        }
+      }
+    }
+    return result;
+  }
+
+  protected File getConfigSetFolder() {
+    return configSetFolder;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/UpgradeSchemaHandler.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/UpgradeSchemaHandler.java
new file mode 100644
index 0000000..0da2125
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/UpgradeSchemaHandler.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.handler;
+
+import org.apache.ambari.logsearch.conf.SolrPropsConfig;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.configuration.HierarchicalConfiguration.Node;
+import org.apache.commons.configuration.XMLConfiguration;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.schema.SchemaRequest;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public class UpgradeSchemaHandler extends AbstractSolrConfigHandler {
+
+  private static final Logger LOG = LoggerFactory.getLogger(UpgradeSchemaHandler.class);
+
+  private static final String SCHEMA_FILE = "managed-schema";
+  private static final String FIELD_NAME_PATH = "field[@name]";
+  private static final String FIELD_TYPE_NAME_PATH = "fieldType[@name]";
+  private static final String DYNAMIC_FIELD = "dynamicField";
+  private static final String DYNAMIC_FIELD_NAME_PATH = DYNAMIC_FIELD + "[@name]";
+
+  private CloudSolrClient cloudSolrClient;
+
+  private XMLConfiguration localFileXml;
+  private List<String> localDynamicFields;
+
+  public UpgradeSchemaHandler(CloudSolrClient cloudSolrClient, File configSetFolder) {
+    super(configSetFolder);
+    this.cloudSolrClient = cloudSolrClient;
+  }
+
+  @Override
+  public boolean updateConfigIfNeeded(SolrPropsConfig solrPropsConfig, SolrZkClient zkClient, File file, String separator, String downloadFolderLocation) throws IOException {
+    boolean result = false;
+    if (localSchemaFileHasMoreFields(file, new File(String.format("%s%s%s", downloadFolderLocation, separator, file.getName())))) {
+      LOG.info("Solr schema file differs ('{}'), update config schema...", file.getName());
+      try {
+        upgradeDynamicFields();
+      } catch (Exception e) {
+        throw new RuntimeException(e);
+      }
+      result = true;
+    }
+    return result;
+  }
+
+  // for now we only upgrades dynamic fields, later we can extend this feature if needed
+  private void upgradeDynamicFields() throws IOException, SolrServerException {
+    if (localFileXml.getRoot() != null && CollectionUtils.isNotEmpty(localDynamicFields)) {
+      List<Node> children = localFileXml.getRoot().getChildren(DYNAMIC_FIELD);
+      for (Node dynamicFieldNode : children) {
+        List<Node> attributes = dynamicFieldNode.getAttributes();
+        Map<String, Object> attributesMap = new HashMap<>();
+        for (Node attribute : attributes) {
+          attributesMap.put(attribute.getName(), attribute.getValue());
+        }
+        if (attributesMap.get("name") != null && localDynamicFields.contains(attributesMap.get("name").toString())) {
+          SchemaRequest.AddDynamicField addDynamicFieldRequest = new SchemaRequest.AddDynamicField(attributesMap);
+          addDynamicFieldRequest.process(cloudSolrClient);
+          LOG.info("Added dynamic field request sent. (field name: {})", attributesMap.get("name"));
+        }
+      }
+    }
+  }
+
+  @Override
+  public String getConfigFileName() {
+    return SCHEMA_FILE;
+  }
+
+  private boolean localSchemaFileHasMoreFields(File localFile, File downloadedFile) {
+    try {
+      localFileXml = new XMLConfiguration(localFile);
+      XMLConfiguration downloadedFileXml = new XMLConfiguration(downloadedFile);
+
+      List<String> localFieldNames = (ArrayList<String>) localFileXml.getProperty(FIELD_NAME_PATH);
+      List<String> localFieldTypes = (ArrayList<String>) localFileXml.getProperty(FIELD_TYPE_NAME_PATH);
+      localDynamicFields = (ArrayList<String>) localFileXml.getProperty(DYNAMIC_FIELD_NAME_PATH);
+
+      List<String> fieldNames = (ArrayList<String>) downloadedFileXml.getProperty(FIELD_NAME_PATH);
+      List<String> fieldTypes = (ArrayList<String>) downloadedFileXml.getProperty(FIELD_TYPE_NAME_PATH);
+      List<String> dynamicFields = (ArrayList<String>) downloadedFileXml.getProperty(DYNAMIC_FIELD_NAME_PATH);
+
+      boolean fieldNameHasDiff = hasMoreFields(localFieldNames, fieldNames, FIELD_NAME_PATH);
+      boolean fieldTypeHasDiff = hasMoreFields(localFieldTypes, fieldTypes, FIELD_TYPE_NAME_PATH);
+      boolean dynamicFieldNameHasDiff = hasMoreFields(localDynamicFields, dynamicFields, DYNAMIC_FIELD_NAME_PATH);
+
+      return fieldNameHasDiff || fieldTypeHasDiff || dynamicFieldNameHasDiff;
+    } catch (Exception e) {
+      throw new RuntimeException("Exception during schema xml parsing.", e);
+    }
+  }
+
+  private boolean hasMoreFields(List<String> localFields, List<String> fields, String tag) {
+    boolean result = false;
+    if (localFields != null) {
+      if (fields == null) {
+        result = true;
+      } else {
+        localFields.removeAll(fields);
+        if (!localFields.isEmpty()) {
+          result = true;
+        }
+      }
+    }
+    if (result) {
+      LOG.info("Found new fields or field types in local schema file.: {} ({})", localFields.toString(), tag);
+    }
+    return result;
+  }
+
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/UploadConfigurationHandler.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/UploadConfigurationHandler.java
index 27a6705..f48b7db 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/UploadConfigurationHandler.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/UploadConfigurationHandler.java
@@ -19,133 +19,99 @@
 package org.apache.ambari.logsearch.handler;
 
 import org.apache.ambari.logsearch.conf.SolrPropsConfig;
-import org.apache.commons.configuration.XMLConfiguration;
 import org.apache.commons.io.FileUtils;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.commons.io.IOUtils;
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkConfigManager;
+import org.apache.zookeeper.CreateMode;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.io.InputStream;
 import java.nio.file.FileSystems;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
 
-public class UploadConfigurationHandler implements SolrZkRequestHandler<Boolean> {
+public class UploadConfigurationHandler extends AbstractSolrConfigHandler {
 
   private static final Logger LOG = LoggerFactory.getLogger(UploadConfigurationHandler.class);
 
-  private static final String SCHEMA_FILE = "managed-schema";
   private static final String SOLR_CONFIG_FILE = "solrconfig.xml";
-  private static final String FIELD_NAME_PATH = "field[@name]";
-  private static final String FIELD_TYPE_NAME_PATH = "fieldType[@name]";
-  private static final String DYNAMIC_FIELD_NAME_PATH = "dynamicField[@name]";
+  private static final String[] configFiles = {
+    "admin-extra.html", "admin-extra.menu-bottom.html", "admin-extra.menu-top.html",
+    "elevate.xml", "enumsConfig.xml", "managed-schema", "solrconfig.xml"
+  };
+  private boolean hasEnumConfig;
 
-  private File configSetFolder;
-
-  public UploadConfigurationHandler(File configSetFolder) {
-    this.configSetFolder = configSetFolder;
+  public UploadConfigurationHandler(File configSetFolder, boolean hasEnumConfig) {
+    super(configSetFolder);
+    this.hasEnumConfig = hasEnumConfig;
   }
 
   @Override
-  public Boolean handle(CloudSolrClient solrClient, SolrPropsConfig solrPropsConfig) throws Exception {
-    boolean reloadCollectionNeeded = false;
-    String separator = FileSystems.getDefault().getSeparator();
-    String downloadFolderLocation = String.format("%s%s%s%s%s", System.getProperty("java.io.tmpdir"), separator,
-      UUID.randomUUID().toString(), separator, solrPropsConfig.getConfigName());
-    solrClient.connect();
-    SolrZkClient zkClient = solrClient.getZkStateReader().getZkClient();
-    File tmpDir = new File(downloadFolderLocation);
-    try {
-      ZkConfigManager zkConfigManager = new ZkConfigManager(zkClient);
-      boolean configExists = zkConfigManager.configExists(solrPropsConfig.getConfigName());
-      if (configExists) {
-        LOG.info("Config set exists for '{}' collection. Refreshing it if needed...", solrPropsConfig.getCollection());
-        if (!tmpDir.mkdirs()) {
-          LOG.error("Cannot create directories for '{}'", tmpDir.getAbsolutePath());
-        }
-        zkConfigManager.downloadConfigDir(solrPropsConfig.getConfigName(), Paths.get(downloadFolderLocation));
-        File[] listOfFiles = configSetFolder.listFiles();
-        if (listOfFiles != null) {
-          for (File file : listOfFiles) {
-            if (file.getName().equals(SOLR_CONFIG_FILE) && !FileUtils.contentEquals(file, new File(String.format("%s%s%s", downloadFolderLocation, separator, file.getName())))) {
-              LOG.info("Solr config file differs ('{}'), upload config set to zookeeper", file.getName());
-              zkConfigManager.uploadConfigDir(configSetFolder.toPath(), solrPropsConfig.getConfigName());
-              reloadCollectionNeeded = true;
-              break;
-            }
-            if (file.getName().equals(SCHEMA_FILE) && localSchemaFileHasMoreFields(file, new File(String.format("%s%s%s", downloadFolderLocation, separator, file.getName())))) {
-              LOG.info("Solr schema file differs ('{}'), upload config set to zookeeper", file.getName());
-              zkConfigManager.uploadConfigDir(configSetFolder.toPath(), solrPropsConfig.getConfigName());
-              reloadCollectionNeeded = true;
-              break;
-            }
-          }
-        }
-      } else {
-        LOG.info("Config set does not exist for '{}' collection. Uploading it to zookeeper...", solrPropsConfig.getCollection());
-        File[] listOfFiles = configSetFolder.listFiles();
-        if (listOfFiles != null) {
-          zkConfigManager.uploadConfigDir(configSetFolder.toPath(), solrPropsConfig.getConfigName());
-        }
-      }
-    } catch (Exception e) {
-      throw new RuntimeException(String.format("Cannot upload configurations to zk. (collection: %s, config set folder: %s)",
-        solrPropsConfig.getCollection(), solrPropsConfig.getConfigSetFolder()), e);
-    } finally {
-      if (tmpDir.exists()) {
-        try {
-          FileUtils.deleteDirectory(tmpDir);
-        } catch (IOException e){
-          LOG.error("Cannot delete temp directory.", e);
-        }
-      }
-    }
-    return reloadCollectionNeeded;
-  }
-
-  private boolean localSchemaFileHasMoreFields(File localFile, File downloadedFile) {
-    try {
-      XMLConfiguration localFileXml = new XMLConfiguration(localFile);
-      XMLConfiguration downloadedFileXml = new XMLConfiguration(downloadedFile);
-
-      List<String> localFieldNames = (ArrayList<String>) localFileXml.getProperty(FIELD_NAME_PATH);
-      List<String> localFieldTypes = (ArrayList<String>) localFileXml.getProperty(FIELD_TYPE_NAME_PATH);
-      List<String> localDynamicFields = (ArrayList<String>) localFileXml.getProperty(DYNAMIC_FIELD_NAME_PATH);
-
-      List<String> fieldNames = (ArrayList<String>) downloadedFileXml.getProperty(FIELD_NAME_PATH);
-      List<String> fieldTypes = (ArrayList<String>) downloadedFileXml.getProperty(FIELD_TYPE_NAME_PATH);
-      List<String> dynamicFields = (ArrayList<String>) downloadedFileXml.getProperty(DYNAMIC_FIELD_NAME_PATH);
-
-      boolean fieldNameHasDiff = hasMoreFields(localFieldNames, fieldNames, FIELD_NAME_PATH);
-      boolean fieldTypeHasDiff = hasMoreFields(localFieldTypes, fieldTypes, FIELD_TYPE_NAME_PATH);
-      boolean dynamicFieldNameHasDiff = hasMoreFields(localDynamicFields, dynamicFields, DYNAMIC_FIELD_NAME_PATH);
-
-      return fieldNameHasDiff || fieldTypeHasDiff || dynamicFieldNameHasDiff;
-    } catch (Exception e) {
-      throw new RuntimeException("Exception during schema xml parsing.", e);
-    }
-  }
-
-  private boolean hasMoreFields(List<String> localFields, List<String> fields, String tag) {
+  public boolean updateConfigIfNeeded(SolrPropsConfig solrPropsConfig, SolrZkClient zkClient, File file,
+                                      String separator, String downloadFolderLocation) throws IOException {
     boolean result = false;
-    if (localFields != null) {
-      if (fields == null) {
-        result = true;
-      } else {
-        localFields.removeAll(fields);
-        if (!localFields.isEmpty()) {
-          result = true;
-        }
-      }
-    }
-    if (result) {
-      LOG.info("Found new fields or field types in local schema file.: {} ({})", localFields.toString(), tag);
+    if (!FileUtils.contentEquals(file, new File(String.format("%s%s%s", downloadFolderLocation, separator, file.getName())))) {
+      LOG.info("Solr config file differs ('{}'), upload config set to zookeeper", file.getName());
+      ZkConfigManager zkConfigManager = new ZkConfigManager(zkClient);
+      zkConfigManager.uploadConfigDir(getConfigSetFolder().toPath(), solrPropsConfig.getConfigName());
+      String filePath = String.format("%s%s%s", getConfigSetFolder(), separator, getConfigFileName());
+      String configsPath = String.format("/%s/%s/%s", "configs", solrPropsConfig.getConfigName(), getConfigFileName());
+      uploadFileToZk(zkClient, filePath, configsPath);
+      result = true;
     }
     return result;
   }
 
+  @Override
+  public void doIfConfigNotExist(SolrPropsConfig solrPropsConfig, ZkConfigManager zkConfigManager) throws IOException {
+    LOG.info("Config set does not exist for '{}' collection. Uploading it to zookeeper...", solrPropsConfig.getCollection());
+    File[] listOfFiles = getConfigSetFolder().listFiles();
+    if (listOfFiles != null) {
+      zkConfigManager.uploadConfigDir(getConfigSetFolder().toPath(), solrPropsConfig.getConfigName());
+    }
+  }
+
+  @Override
+  public String getConfigFileName() {
+    return SOLR_CONFIG_FILE;
+  }
+
+  @Override
+  public void uploadMissingConfigFiles(SolrZkClient zkClient, ZkConfigManager zkConfigManager, String configName) throws IOException {
+    LOG.info("Check any of the configs files are missing for config ({})", configName);
+    for (String configFile : configFiles) {
+      if ("enumsConfig.xml".equals(configFile) && !hasEnumConfig) {
+        LOG.info("Config file ({}) is not needed for {}", configFile, configName);
+        continue;
+      }
+      String zkPath = String.format("%s/%s", configName, configFile);
+      if (zkConfigManager.configExists(zkPath)) {
+        LOG.info("Config file ({}) has already uploaded properly.", configFile);
+      } else {
+        LOG.info("Config file ({}) is missing. Reupload...", configFile);
+        FileSystems.getDefault().getSeparator();
+        uploadFileToZk(zkClient,
+          String.format("%s%s%s", getConfigSetFolder(), FileSystems.getDefault().getSeparator(), configFile),
+          String.format("%s%s", "/configs/", zkPath));
+      }
+    }
+  }
+
+  private void uploadFileToZk(SolrZkClient zkClient, String filePath, String configsPath) throws FileNotFoundException {
+    InputStream is = new FileInputStream(filePath);
+    try {
+      if (zkClient.exists(configsPath, true)) {
+        zkClient.setData(configsPath, IOUtils.toByteArray(is), true);
+      } else {
+        zkClient.create(configsPath, IOUtils.toByteArray(is), CreateMode.PERSISTENT, true);
+      }
+    } catch (Exception e) {
+      throw new IllegalStateException(e);
+    } finally {
+      IOUtils.closeQuietly(is);
+    }
+  }
 }
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/AuditLogsManager.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/AuditLogsManager.java
index 25e3271..99d2675 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/AuditLogsManager.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/AuditLogsManager.java
@@ -71,6 +71,7 @@
 import org.springframework.data.solr.core.query.SimpleQuery;
 
 import static org.apache.ambari.logsearch.solr.SolrConstants.AuditLogConstants.AUDIT_COMPONENT;
+import static org.apache.ambari.logsearch.solr.SolrConstants.CommonLogConstants.CLUSTER;
 
 @Named
 public class AuditLogsManager extends ManagerBase<SolrAuditLogData, AuditLogResponse> {
@@ -221,4 +222,8 @@
     UpdateResponse updateResponse = auditSolrDao.deleteByQuery(solrQuery, "/audit/logs");
     return new StatusMessage(updateResponse.getStatus());
   }
+
+  public List<String> getClusters() {
+    return getClusters(auditSolrDao, CLUSTER, "/audit/logs/clusters");
+  }
 }
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ManagerBase.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ManagerBase.java
index 6b40cb5..cddfc85 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ManagerBase.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ManagerBase.java
@@ -23,17 +23,25 @@
 import java.util.Collections;
 import java.util.List;
 
+import com.google.common.collect.Lists;
+import org.apache.ambari.logsearch.common.LogSearchConstants;
+import org.apache.ambari.logsearch.model.response.GroupListResponse;
 import org.apache.ambari.logsearch.model.response.LogData;
 import org.apache.ambari.logsearch.model.response.LogSearchResponse;
 import org.apache.ambari.logsearch.dao.SolrDaoBase;
+import org.apache.ambari.logsearch.util.SolrUtil;
+import org.apache.commons.collections.CollectionUtils;
 import org.apache.log4j.Logger;
 import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.response.FacetField;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.SolrDocumentList;
 import org.springframework.data.solr.core.DefaultQueryParser;
 import org.springframework.data.solr.core.query.SimpleQuery;
 import org.springframework.data.solr.core.query.SolrDataQuery;
 
+import static org.apache.ambari.logsearch.solr.SolrConstants.CommonLogConstants.CLUSTER;
+
 public abstract class ManagerBase<LOG_DATA_TYPE extends LogData, SEARCH_RESPONSE extends LogSearchResponse> extends JsonManagerBase {
   private static final Logger logger = Logger.getLogger(ManagerBase.class);
 
@@ -92,4 +100,26 @@
   protected abstract List<LOG_DATA_TYPE> convertToSolrBeans(QueryResponse response);
 
   protected abstract SEARCH_RESPONSE createLogSearchResponse();
+
+  protected List<String> getClusters(SolrDaoBase solrDaoBase, String clusterField, String event) {
+    List<String> clusterResponse = Lists.newArrayList();
+    SolrQuery solrQuery = new SolrQuery();
+    solrQuery.setQuery("*:*");
+    SolrUtil.setFacetField(solrQuery, clusterField);
+    SolrUtil.setFacetSort(solrQuery, LogSearchConstants.FACET_INDEX);
+
+    QueryResponse response = solrDaoBase.process(solrQuery, event);
+    if (response == null) {
+      return clusterResponse;
+    }
+    List<FacetField> clusterFields = response.getFacetFields();
+    if (CollectionUtils.isNotEmpty(clusterFields)) {
+      FacetField clusterFacets = clusterFields.get(0);
+      for (FacetField.Count clusterCount : clusterFacets.getValues()) {
+        clusterResponse.add(clusterCount.getName());
+      }
+    }
+    return clusterResponse;
+  }
+
 }
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ServiceLogsManager.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ServiceLogsManager.java
index 9ce209b..cb9e806 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ServiceLogsManager.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ServiceLogsManager.java
@@ -72,6 +72,7 @@
 import org.apache.ambari.logsearch.converter.BaseServiceLogRequestQueryConverter;
 import org.apache.ambari.logsearch.converter.ServiceLogTruncatedRequestQueryConverter;
 import org.apache.ambari.logsearch.solr.ResponseDataGenerator;
+import org.apache.ambari.logsearch.solr.SolrConstants;
 import org.apache.ambari.logsearch.solr.model.SolrComponentTypeLogData;
 import org.apache.ambari.logsearch.solr.model.SolrHostLogData;
 import org.apache.ambari.logsearch.solr.model.SolrServiceLogData;
@@ -614,4 +615,8 @@
     UpdateResponse updateResponse = serviceLogsSolrDao.deleteByQuery(solrQuery, "/service/logs");
     return new StatusMessage(updateResponse.getStatus());
   }
+
+  public List<String> getClusters() {
+    return getClusters(serviceLogsSolrDao, CLUSTER, "/service/logs/clusters");
+  }
 }
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ShipperConfigManager.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ShipperConfigManager.java
new file mode 100644
index 0000000..44d91a9
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ShipperConfigManager.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.manager;
+
+import java.util.List;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputConfig;
+import org.apache.ambari.logsearch.configurer.LogSearchConfigConfigurer;
+import org.apache.ambari.logsearch.model.common.LSServerInputConfig;
+import org.apache.ambari.logsearch.model.common.LSServerLogLevelFilterMap;
+import org.apache.log4j.Logger;
+
+import com.google.common.collect.ImmutableMap;
+
+import javax.annotation.PostConstruct;
+import javax.inject.Inject;
+import javax.inject.Named;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+
+@Named
+public class ShipperConfigManager extends JsonManagerBase {
+
+  private static final Logger logger = Logger.getLogger(ShipperConfigManager.class);
+
+  @Inject
+  private LogSearchConfigConfigurer logSearchConfigConfigurer;
+
+  @PostConstruct
+  private void postConstructor() {
+    logSearchConfigConfigurer.start();
+  }
+  
+  public List<String> getServices(String clusterName) {
+    return LogSearchConfigConfigurer.getConfig().getServices(clusterName);
+  }
+
+  public LSServerInputConfig getInputConfig(String clusterName, String serviceName) {
+    InputConfig inputConfig = LogSearchConfigConfigurer.getConfig().getInputConfig(clusterName, serviceName);
+    return new LSServerInputConfig(inputConfig);
+  }
+
+  public Response createInputConfig(String clusterName, String serviceName, String inputConfig) {
+    
+    try {
+      if (LogSearchConfigConfigurer.getConfig().inputConfigExists(clusterName, serviceName)) {
+        return Response.serverError()
+            .type(MediaType.APPLICATION_JSON)
+            .entity(ImmutableMap.of("errorMessage", "Input config already exists for service " + serviceName))
+            .build();
+      }
+      
+      LogSearchConfigConfigurer.getConfig().createInputConfig(clusterName, serviceName, inputConfig);
+      return Response.ok().build();
+    } catch (Exception e) {
+      logger.warn("Could not create input config", e);
+      return Response.serverError().build();
+    }
+  }
+
+  public Response setInputConfig(String clusterName, String serviceName, String inputConfig) {
+    try {
+      if (!LogSearchConfigConfigurer.getConfig().inputConfigExists(clusterName, serviceName)) {
+        return Response.serverError()
+            .type(MediaType.APPLICATION_JSON)
+            .entity(ImmutableMap.of("errorMessage", "Input config doesn't exist for service " + serviceName))
+            .build();
+      }
+      
+      LogSearchConfigConfigurer.getConfig().setInputConfig(clusterName, serviceName, inputConfig);
+      return Response.ok().build();
+    } catch (Exception e) {
+      logger.warn("Could not update input config", e);
+      return Response.serverError().build();
+    }
+  }
+
+  public LSServerLogLevelFilterMap getLogLevelFilters(String clusterName) {
+    return new LSServerLogLevelFilterMap(LogSearchConfigConfigurer.getConfig().getLogLevelFilters(clusterName));
+  }
+
+  public Response setLogLevelFilters(String clusterName, LSServerLogLevelFilterMap request) {
+    try {
+      LogSearchConfigConfigurer.getConfig().setLogLevelFilters(clusterName, request.convertToApi());
+      return Response.ok().build();
+    } catch (Exception e) {
+      logger.warn("Could not update log level filters", e);
+      return Response.serverError().build();
+    }
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/UserConfigManager.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/UserConfigManager.java
index a60fc5c..1df9f5a 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/UserConfigManager.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/UserConfigManager.java
@@ -27,7 +27,6 @@
 import org.apache.ambari.logsearch.common.LogSearchContext;
 import org.apache.ambari.logsearch.common.MessageEnums;
 import org.apache.ambari.logsearch.dao.UserConfigSolrDao;
-import org.apache.ambari.logsearch.model.common.LogFeederDataMap;
 import org.apache.ambari.logsearch.model.request.impl.UserConfigRequest;
 import org.apache.ambari.logsearch.model.response.UserConfigData;
 import org.apache.ambari.logsearch.model.response.UserConfigDataListResponse;
@@ -176,29 +175,6 @@
 
   }
 
-  // ////////////////////////////LEVEL FILTER/////////////////////////////////////
-
-  public LogFeederDataMap getUserFilter() {
-    LogFeederDataMap userFilter;
-    try {
-      userFilter = userConfigSolrDao.getUserFilter();
-    } catch (SolrServerException | IOException e) {
-      logger.error(e);
-      throw RESTErrorUtil.createRESTException(MessageEnums.SOLR_ERROR.getMessage().getMessage(), MessageEnums.ERROR_SYSTEM);
-    }
-    return userFilter;
-  }
-
-  public LogFeederDataMap saveUserFiter(LogFeederDataMap logfeederFilters) {
-    try {
-      userConfigSolrDao.saveUserFilter(logfeederFilters);
-    } catch (SolrException | SolrServerException | IOException e) {
-      logger.error("user config not able to save", e);
-      throw RESTErrorUtil.createRESTException(MessageEnums.SOLR_ERROR.getMessage().getMessage(), MessageEnums.ERROR_SYSTEM);
-    }
-    return getUserFilter();
-  }
-
   public List<String> getAllUserName() {
     List<String> userList = new ArrayList<String>();
     try {
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerConditions.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerConditions.java
new file mode 100644
index 0000000..9cd9710
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerConditions.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.Conditions;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+public class LSServerConditions {
+  private LSServerFields fields;
+  
+  public LSServerConditions(Conditions conditions) {
+    this.fields = new LSServerFields(conditions.getFields());
+  }
+
+  public LSServerFields getFields() {
+    return fields;
+  }
+
+  public void setFields(LSServerFields fields) {
+    this.fields = fields;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFields.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFields.java
new file mode 100644
index 0000000..5f570da
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFields.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import java.util.Set;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.Fields;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+public class LSServerFields {
+  private Set<String> type;
+  
+  public LSServerFields(Fields fields) {
+    this.type = fields.getType();
+  }
+
+  public Set<String> getType() {
+    return type;
+  }
+
+  public void setType(Set<String> type) {
+    this.type = type;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilter.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilter.java
new file mode 100644
index 0000000..0190c01
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilter.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.PostMapValues;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonInclude.Include;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+@JsonInclude(Include.NON_NULL)
+public abstract class LSServerFilter {
+  private String filter;
+  
+  private LSServerConditions conditions;
+  
+  @JsonProperty("sort_order")
+  private Integer sortOrder;
+  
+  private String sourceField;
+  
+  @JsonProperty("remove_source_field")
+  private Boolean removeSourceField;
+  
+  private Map<String, List<LSServerPostMapValues>> postMapValues;
+  
+  @JsonProperty("is_enabled")
+  private Boolean isEnabled;
+
+  public LSServerFilter(FilterDescriptor filterDescriptor) {
+    this.filter = filterDescriptor.getFilter();
+    this.conditions = new LSServerConditions(filterDescriptor.getConditions());
+    this.sortOrder = filterDescriptor.getSortOrder();
+    this.sourceField = filterDescriptor.getSourceField();
+    this.removeSourceField = filterDescriptor.isRemoveSourceField();
+    
+    postMapValues = new HashMap<String, List<LSServerPostMapValues>>();
+    for (Map.Entry<String, ? extends List<? extends PostMapValues>> e : filterDescriptor.getPostMapValues().entrySet()) {
+      List<LSServerPostMapValues> lsServerPostMapValues = new ArrayList<>();
+      for (PostMapValues pmv : e.getValue()) {
+        lsServerPostMapValues.add(new LSServerPostMapValues(pmv));
+      }
+      postMapValues.put(e.getKey(), lsServerPostMapValues);
+    }
+    
+    this.isEnabled = filterDescriptor.isEnabled();
+  }
+
+  public String getFilter() {
+    return filter;
+  }
+
+  public void setFilter(String filter) {
+    this.filter = filter;
+  }
+
+  public LSServerConditions getConditions() {
+    return conditions;
+  }
+
+  public void setConditions(LSServerConditions conditions) {
+    this.conditions = conditions;
+  }
+
+  public Integer getSortOrder() {
+    return sortOrder;
+  }
+
+  public void setSortOrder(Integer sortOrder) {
+    this.sortOrder = sortOrder;
+  }
+
+  public String getSourceField() {
+    return sourceField;
+  }
+
+  public void setSourceField(String sourceField) {
+    this.sourceField = sourceField;
+  }
+
+  public Boolean getRemoveSourceField() {
+    return removeSourceField;
+  }
+
+  public void setRemoveSourceField(Boolean removeSourceField) {
+    this.removeSourceField = removeSourceField;
+  }
+
+  public Map<String, List<LSServerPostMapValues>> getPostMapValues() {
+    return postMapValues;
+  }
+
+  public void setPostMapValues(Map<String, List<LSServerPostMapValues>> postMapValues) {
+    this.postMapValues = postMapValues;
+  }
+
+  public Boolean getIsEnabled() {
+    return isEnabled;
+  }
+
+  public void setIsEnabled(Boolean isEnabled) {
+    this.isEnabled = isEnabled;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilterGrok.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilterGrok.java
new file mode 100644
index 0000000..a8c4a7a
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilterGrok.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterGrokDescriptor;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+public class LSServerFilterGrok extends LSServerFilter {
+  @JsonProperty("log4j_format")
+  private String log4jFormat;
+
+  @JsonProperty("multiline_pattern")
+  private String multilinePattern;
+
+  @JsonProperty("message_pattern")
+  private String messagePattern;
+
+  public LSServerFilterGrok(FilterDescriptor filterDescriptor) {
+    super(filterDescriptor);
+    if (filterDescriptor instanceof FilterGrokDescriptor) {
+      FilterGrokDescriptor filterGrokDescriptor = (FilterGrokDescriptor)filterDescriptor;
+      this.log4jFormat = filterGrokDescriptor.getLog4jFormat();
+      this.multilinePattern = filterGrokDescriptor.getMultilinePattern();
+      this.messagePattern = filterGrokDescriptor.getMessagePattern();
+    }
+  }
+
+  public String getLog4jFormat() {
+    return log4jFormat;
+  }
+
+  public void setLog4jFormat(String log4jFormat) {
+    this.log4jFormat = log4jFormat;
+  }
+
+  public String getMultilinePattern() {
+    return multilinePattern;
+  }
+
+  public void setMultilinePattern(String multilinePattern) {
+    this.multilinePattern = multilinePattern;
+  }
+
+  public String getMessagePattern() {
+    return messagePattern;
+  }
+
+  public void setMessagePattern(String messagePattern) {
+    this.messagePattern = messagePattern;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilterJson.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilterJson.java
new file mode 100644
index 0000000..3c0ed17
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilterJson.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+public class LSServerFilterJson extends LSServerFilter {
+  public LSServerFilterJson(FilterDescriptor filterDescriptor) {
+    super(filterDescriptor);
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilterKeyValue.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilterKeyValue.java
new file mode 100644
index 0000000..dcee25d
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerFilterKeyValue.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterKeyValueDescriptor;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+public class LSServerFilterKeyValue extends LSServerFilter {
+  @JsonProperty("field_split")
+  private String fieldSplit;
+
+  @JsonProperty("value_split")
+  private String valueSplit;
+
+  @JsonProperty("value_borders")
+  private String valueBorders;
+
+  public LSServerFilterKeyValue(FilterDescriptor filterDescriptor) {
+    super(filterDescriptor);
+    FilterKeyValueDescriptor filterKeyValueDescriptor = (FilterKeyValueDescriptor)filterDescriptor;
+    this.fieldSplit = filterKeyValueDescriptor.getFieldSplit();
+    this.valueSplit = filterKeyValueDescriptor.getValueSplit();
+    this.valueBorders = filterKeyValueDescriptor.getValueBorders();
+  }
+
+  public String getFieldSplit() {
+    return fieldSplit;
+  }
+
+  public void setFieldSplit(String fieldSplit) {
+    this.fieldSplit = fieldSplit;
+  }
+
+  public String getValueSplit() {
+    return valueSplit;
+  }
+
+  public void setValueSplit(String valueSplit) {
+    this.valueSplit = valueSplit;
+  }
+
+  public String getValueBorders() {
+    return valueBorders;
+  }
+
+  public void setValueBorders(String valueBorders) {
+    this.valueBorders = valueBorders;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInput.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInput.java
new file mode 100644
index 0000000..fe83fe4
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInput.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import java.util.Map;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonInclude.Include;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+@JsonInclude(Include.NON_NULL)
+public abstract class LSServerInput {
+  private final String type;
+  private final String rowtype;
+  private final String path;
+  
+  @JsonProperty("add_fields")
+  private final Map<String, String> addFields;
+  
+  private final String source;
+  private final Boolean tail;
+  
+  @JsonProperty("gen_event_md5")
+  private final Boolean genEventMd5;
+  
+  @JsonProperty("use_event_md5_as_id")
+  private final Boolean useEventMd5AsId;
+  
+  @JsonProperty("start_position")
+  private final String startPosition;
+  
+  @JsonProperty("cache_enabled")
+  private final Boolean cacheEnabled;
+  
+  @JsonProperty("cache_key_field")
+  private final String cacheKeyField;
+  
+  @JsonProperty("cache_last_dedup_enabled")
+  private final Boolean cacheLastDedupEnabled;
+  
+  @JsonProperty("cache_size")
+  private final Integer cacheSize;
+  
+  @JsonProperty("cache_dedup_interval")
+  private final Long cacheDedupInterval;
+  
+  @JsonProperty("is_enabled")
+  private final Boolean isEnabled;
+  
+  public LSServerInput(InputDescriptor inputDescriptor) {
+    this.type = inputDescriptor.getType();
+    this.rowtype = inputDescriptor.getRowtype();
+    this.path = inputDescriptor.getPath();
+    this.addFields = inputDescriptor.getAddFields();
+    this.source = inputDescriptor.getSource();
+    this.tail = inputDescriptor.isTail();
+    this.genEventMd5 = inputDescriptor.isGenEventMd5();
+    this.useEventMd5AsId = inputDescriptor.isUseEventMd5AsId();
+    this.startPosition = inputDescriptor.getStartPosition();
+    this.cacheEnabled = inputDescriptor.isCacheEnabled();
+    this.cacheKeyField = inputDescriptor.getCacheKeyField();
+    this.cacheLastDedupEnabled = inputDescriptor.getCacheLastDedupEnabled();
+    this.cacheSize = inputDescriptor.getCacheSize();
+    this.cacheDedupInterval = inputDescriptor.getCacheDedupInterval();
+    this.isEnabled = inputDescriptor.isEnabled();
+  }
+
+  public String getType() {
+    return type;
+  }
+
+  public String getRowtype() {
+    return rowtype;
+  }
+
+  public String getPath() {
+    return path;
+  }
+
+  public Map<String, String> getAddFields() {
+    return addFields;
+  }
+
+  public String getSource() {
+    return source;
+  }
+
+  public Boolean getTail() {
+    return tail;
+  }
+
+  public Boolean getGenEventMd5() {
+    return genEventMd5;
+  }
+
+  public Boolean getUseEventMd5AsId() {
+    return useEventMd5AsId;
+  }
+
+  public String getStartPosition() {
+    return startPosition;
+  }
+
+  public Boolean getCacheEnabled() {
+    return cacheEnabled;
+  }
+
+  public String getCacheKeyField() {
+    return cacheKeyField;
+  }
+
+  public Boolean getCacheLastDedupEnabled() {
+    return cacheLastDedupEnabled;
+  }
+
+  public Integer getCacheSize() {
+    return cacheSize;
+  }
+
+  public Long getCacheDedupInterval() {
+    return cacheDedupInterval;
+  }
+
+  public Boolean getIsEnabled() {
+    return isEnabled;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputConfig.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputConfig.java
new file mode 100644
index 0000000..e3dc0d1
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputConfig.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterGrokDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterJsonDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.FilterKeyValueDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputConfig;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputFileBaseDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputS3FileDescriptor;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+@ApiModel
+public class LSServerInputConfig {
+  @ApiModelProperty
+  private List<LSServerInput> input;
+  
+  @ApiModelProperty
+  private List<LSServerFilter> filter;
+  
+  public LSServerInputConfig(InputConfig inputConfig) {
+    input = new ArrayList<>();
+    for (InputDescriptor inputDescriptor : inputConfig.getInput()) {
+      if (inputDescriptor instanceof InputFileBaseDescriptor) {
+        LSServerInput inputItem = new LSServerInputFile(inputDescriptor);
+        input.add(inputItem);
+      } else if (inputDescriptor instanceof InputS3FileDescriptor) {
+        LSServerInput inputItem = new LSServerInputS3File(inputDescriptor);
+        input.add(inputItem);
+      }
+    }
+    
+    filter = new ArrayList<>();
+    for (FilterDescriptor filterDescriptor : inputConfig.getFilter()) {
+      if (filterDescriptor instanceof FilterGrokDescriptor) {
+        LSServerFilter filterItem = new LSServerFilterGrok(filterDescriptor);
+        filter.add(filterItem);
+      } else if (filterDescriptor instanceof FilterKeyValueDescriptor) {
+        LSServerFilter filterItem = new LSServerFilterKeyValue(filterDescriptor);
+        filter.add(filterItem);
+      } else if (filterDescriptor instanceof FilterJsonDescriptor) {
+        LSServerFilter filterItem = new LSServerFilterJson(filterDescriptor);
+        filter.add(filterItem);
+      }
+    }
+  }
+
+  public List<LSServerInput> getInput() {
+    return input;
+  }
+
+  public void setInput(List<LSServerInput> input) {
+    this.input = input;
+  }
+
+  public List<LSServerFilter> getFilter() {
+    return filter;
+  }
+
+  public void setFilter(List<LSServerFilter> filter) {
+    this.filter = filter;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputFile.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputFile.java
new file mode 100644
index 0000000..5c547ad
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputFile.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+public class LSServerInputFile extends LSServerInputFileBase {
+  public LSServerInputFile(InputDescriptor inputDescriptor) {
+    super(inputDescriptor);
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputFileBase.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputFileBase.java
new file mode 100644
index 0000000..df21d0d
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputFileBase.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputFileBaseDescriptor;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+public abstract class LSServerInputFileBase extends LSServerInput {
+  @JsonProperty("checkpoint_interval_ms")
+  private Integer checkpointIntervalMs;
+
+  @JsonProperty("process_file")
+  private Boolean processFile;
+
+  @JsonProperty("copy_file")
+  private Boolean copyFile;
+  
+  public LSServerInputFileBase(InputDescriptor inputDescriptor) {
+    super(inputDescriptor);
+    
+    InputFileBaseDescriptor inputFileBaseDescriptor = (InputFileBaseDescriptor)inputDescriptor;
+    this.checkpointIntervalMs = inputFileBaseDescriptor.getCheckpointIntervalMs();
+    this.processFile = inputFileBaseDescriptor.getProcessFile();
+    this.copyFile = inputFileBaseDescriptor.getCopyFile();
+  }
+
+  public Integer getCheckpointIntervalMs() {
+    return checkpointIntervalMs;
+  }
+
+  public void setCheckpointIntervalMs(Integer checkpointIntervalMs) {
+    this.checkpointIntervalMs = checkpointIntervalMs;
+  }
+
+  public Boolean getProcessFile() {
+    return processFile;
+  }
+
+  public void setProcessFile(Boolean processFile) {
+    this.processFile = processFile;
+  }
+
+  public Boolean getCopyFile() {
+    return copyFile;
+  }
+
+  public void setCopyFile(Boolean copyFile) {
+    this.copyFile = copyFile;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputS3File.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputS3File.java
new file mode 100644
index 0000000..8e9acf0
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerInputS3File.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.InputS3FileDescriptor;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+public class LSServerInputS3File extends LSServerInputFileBase {
+  @JsonProperty("s3_access_key")
+  private String s3AccessKey;
+  
+  @JsonProperty("s3_secret_key")
+  private String s3SecretKey;
+  
+  public LSServerInputS3File(InputDescriptor inputDescriptor) {
+    super(inputDescriptor);
+    InputS3FileDescriptor inputS3FileDescriptor = (InputS3FileDescriptor)inputDescriptor;
+    this.s3AccessKey = inputS3FileDescriptor.getS3AccessKey();
+    this.s3SecretKey = inputS3FileDescriptor.getS3SecretKey();
+  }
+
+  public String getS3AccessKey() {
+    return s3AccessKey;
+  }
+
+  public void setS3AccessKey(String s3AccessKey) {
+    this.s3AccessKey = s3AccessKey;
+  }
+
+  public String getS3SecretKey() {
+    return s3SecretKey;
+  }
+
+  public void setS3SecretKey(String s3SecretKey) {
+    this.s3SecretKey = s3SecretKey;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerLogLevelFilter.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerLogLevelFilter.java
new file mode 100644
index 0000000..2a00802
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerLogLevelFilter.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import java.util.Date;
+import java.util.List;
+
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilter;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+@ApiModel
+public class LSServerLogLevelFilter {
+
+  @ApiModelProperty private String label;
+  @ApiModelProperty private List<String> hosts;
+  @ApiModelProperty private List<String> defaultLevels;
+  @ApiModelProperty private List<String> overrideLevels;
+  @ApiModelProperty private Date expiryTime;
+
+  public LSServerLogLevelFilter() {}
+
+  public LSServerLogLevelFilter(LogLevelFilter logLevelFilter) {
+    label = logLevelFilter.getLabel();
+    hosts = logLevelFilter.getHosts();
+    defaultLevels = logLevelFilter.getDefaultLevels();
+    overrideLevels = logLevelFilter.getOverrideLevels();
+    expiryTime = logLevelFilter.getExpiryTime();
+  }
+
+  public String getLabel() {
+    return label;
+  }
+
+  public void setLabel(String label) {
+    this.label = label;
+  }
+
+  public List<String> getHosts() {
+    return hosts;
+  }
+
+  public void setHosts(List<String> hosts) {
+    this.hosts = hosts;
+  }
+
+  public List<String> getDefaultLevels() {
+    return defaultLevels;
+  }
+
+  public void setDefaultLevels(List<String> defaultLevels) {
+    this.defaultLevels = defaultLevels;
+  }
+
+  public List<String> getOverrideLevels() {
+    return overrideLevels;
+  }
+
+  public void setOverrideLevels(List<String> overrideLevels) {
+    this.overrideLevels = overrideLevels;
+  }
+
+  public Date getExpiryTime() {
+    return expiryTime;
+  }
+
+  public void setExpiryTime(Date expiryTime) {
+    this.expiryTime = expiryTime;
+  }
+
+  public LogLevelFilter convertToApi() {
+    LogLevelFilter apiFilter = new LogLevelFilter();
+    
+    apiFilter.setLabel(label);
+    apiFilter.setHosts(hosts);
+    apiFilter.setDefaultLevels(defaultLevels);
+    apiFilter.setOverrideLevels(overrideLevels);
+    apiFilter.setExpiryTime(expiryTime);
+    
+    return apiFilter;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerLogLevelFilterMap.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerLogLevelFilterMap.java
new file mode 100644
index 0000000..3088db1
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerLogLevelFilterMap.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import java.util.Map;
+import java.util.TreeMap;
+
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilter;
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilterMap;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+@ApiModel
+public class LSServerLogLevelFilterMap {
+
+  @ApiModelProperty
+  private TreeMap<String, LSServerLogLevelFilter> filter;
+
+  public LSServerLogLevelFilterMap() {}
+
+  public LSServerLogLevelFilterMap(LogLevelFilterMap logLevelFilterMap) {
+    filter = new TreeMap<>();
+    for (Map.Entry<String, LogLevelFilter> e : logLevelFilterMap.getFilter().entrySet()) {
+      filter.put(e.getKey(), new LSServerLogLevelFilter(e.getValue()));
+    }
+  }
+
+  public TreeMap<String, LSServerLogLevelFilter> getFilter() {
+    return filter;
+  }
+
+  public void setFilter(TreeMap<String, LSServerLogLevelFilter> filter) {
+    this.filter = filter;
+  }
+
+  public LogLevelFilterMap convertToApi() {
+    LogLevelFilterMap logLevelFilterMap = new LogLevelFilterMap();
+
+    TreeMap<String, LogLevelFilter> apiFilter = new TreeMap<>();
+    for (Map.Entry<String, LSServerLogLevelFilter> e : filter.entrySet()) {
+      apiFilter.put(e.getKey(), e.getValue().convertToApi());
+    }
+    logLevelFilterMap.setFilter(apiFilter);
+
+    return logLevelFilterMap;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapDate.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapDate.java
new file mode 100644
index 0000000..dcacceb
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapDate.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapDateDescriptor;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonInclude.Include;
+
+@JsonInclude(Include.NON_NULL)
+public class LSServerMapDate extends LSServerMapField {
+  @Override
+  public String getName() {
+    return "map_date";
+  }
+
+  @JsonProperty("source_date_pattern")
+  private String sourceDatePattern;
+
+  @JsonProperty("target_date_pattern")
+  private String targetDatePattern;
+
+  public LSServerMapDate(MapDateDescriptor mapDateDescriptor) {
+    this.sourceDatePattern = mapDateDescriptor.getSourceDatePattern();
+    this.targetDatePattern = mapDateDescriptor.getTargetDatePattern();
+  }
+
+  public String getSourceDatePattern() {
+    return sourceDatePattern;
+  }
+
+  public void setSourceDatePattern(String sourceDatePattern) {
+    this.sourceDatePattern = sourceDatePattern;
+  }
+
+  public String getTargetDatePattern() {
+    return targetDatePattern;
+  }
+
+  public void setTargetDatePattern(String targetDatePattern) {
+    this.targetDatePattern = targetDatePattern;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapField.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapField.java
new file mode 100644
index 0000000..b18439c
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapField.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+@JsonIgnoreProperties(value = { "name" })
+public abstract class LSServerMapField {
+  public abstract String getName();
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapFieldCopy.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapFieldCopy.java
new file mode 100644
index 0000000..b0bea83
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapFieldCopy.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldCopyDescriptor;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+public class LSServerMapFieldCopy extends LSServerMapField {
+  @Override
+  public String getName() {
+    return "map_fieldcopy";
+  }
+
+  @JsonProperty("copy_name")
+  private String copyName;
+
+  public LSServerMapFieldCopy(MapFieldCopyDescriptor mapFieldCopyDescriptor) {
+    this.copyName = mapFieldCopyDescriptor.getCopyName();
+  }
+
+  public String getCopyName() {
+    return copyName;
+  }
+
+  public void setCopyName(String copyName) {
+    this.copyName = copyName;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapFieldName.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapFieldName.java
new file mode 100644
index 0000000..000b29d
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapFieldName.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldNameDescriptor;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+public class LSServerMapFieldName extends LSServerMapField {
+  @Override
+  public String getName() {
+    return "map_fieldname";
+  }
+
+  @JsonProperty("new_field_name")
+  private String newFieldName;
+
+  public LSServerMapFieldName(MapFieldNameDescriptor mapFieldNameDescriptor) {
+    this.newFieldName = mapFieldNameDescriptor.getNewFieldName();
+  }
+
+  public String getNewFieldName() {
+    return newFieldName;
+  }
+
+  public void setNewFieldName(String newFieldName) {
+    this.newFieldName = newFieldName;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapFieldValue.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapFieldValue.java
new file mode 100644
index 0000000..6152de5
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerMapFieldValue.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldValueDescriptor;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+public class LSServerMapFieldValue extends LSServerMapField {
+  @Override
+  public String getName() {
+    return "map_fieldvalue";
+  }
+
+  @JsonProperty("pre_value")
+  private String preValue;
+
+  @JsonProperty("post_value")
+  private String postValue;
+
+  public LSServerMapFieldValue(MapFieldValueDescriptor mapFieldValueDescriptor) {
+    this.preValue = mapFieldValueDescriptor.getPreValue();
+    this.postValue = mapFieldValueDescriptor.getPostValue();
+  }
+
+  public String getPreValue() {
+    return preValue;
+  }
+
+  public void setPreValue(String preValue) {
+    this.preValue = preValue;
+  }
+
+  public String getPostValue() {
+    return postValue;
+  }
+
+  public void setPostValue(String postValue) {
+    this.postValue = postValue;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerPostMapValues.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerPostMapValues.java
new file mode 100644
index 0000000..5f361c9
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerPostMapValues.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapDateDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldCopyDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldNameDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.MapFieldValueDescriptor;
+import org.apache.ambari.logsearch.config.api.model.inputconfig.PostMapValues;
+
+import com.fasterxml.jackson.databind.annotation.JsonSerialize;
+
+import io.swagger.annotations.ApiModel;
+
+@ApiModel
+@JsonSerialize(using = LSServerPostMapValuesSerializer.class)
+public class LSServerPostMapValues {
+  private List<LSServerMapField> mappers;
+  
+  public LSServerPostMapValues(PostMapValues pmv) {
+    mappers = new ArrayList<>();
+    for (MapFieldDescriptor mapFieldDescriptor : pmv.getMappers()) {
+      if (mapFieldDescriptor instanceof MapDateDescriptor) {
+        mappers.add(new LSServerMapDate((MapDateDescriptor)mapFieldDescriptor));
+      } else if (mapFieldDescriptor instanceof MapFieldCopyDescriptor) {
+        mappers.add(new LSServerMapFieldCopy((MapFieldCopyDescriptor)mapFieldDescriptor));
+      } else if (mapFieldDescriptor instanceof MapFieldNameDescriptor) {
+        mappers.add(new LSServerMapFieldName((MapFieldNameDescriptor)mapFieldDescriptor));
+      } else if (mapFieldDescriptor instanceof MapFieldValueDescriptor) {
+        mappers.add(new LSServerMapFieldValue((MapFieldValueDescriptor)mapFieldDescriptor));
+      }
+    }
+  }
+
+  public List<LSServerMapField> getMappers() {
+    return mappers;
+  }
+
+  public void setMappers(List<LSServerMapField> mappers) {
+    this.mappers = mappers;
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerPostMapValuesSerializer.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerPostMapValuesSerializer.java
new file mode 100644
index 0000000..7543677
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerPostMapValuesSerializer.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import java.io.IOException;
+
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.JsonSerializer;
+import com.fasterxml.jackson.databind.SerializerProvider;
+
+public class LSServerPostMapValuesSerializer extends JsonSerializer<LSServerPostMapValues> {
+  @Override
+  public void serialize(LSServerPostMapValues value, JsonGenerator jgen, SerializerProvider provider)
+      throws IOException, JsonProcessingException {
+    jgen.writeStartObject();
+    for (LSServerMapField mapField : value.getMappers()) {
+      jgen.writeObjectField(mapField.getName(), mapField);
+    }
+    jgen.writeEndObject();
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LogFeederDataMap.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LogFeederDataMap.java
deleted file mode 100644
index cc7d53d..0000000
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LogFeederDataMap.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.ambari.logsearch.model.common;
-
-import io.swagger.annotations.ApiModel;
-import io.swagger.annotations.ApiModelProperty;
-
-import java.util.TreeMap;
-
-@ApiModel
-public class LogFeederDataMap {
-
-  @ApiModelProperty
-  private String id;
-
-  @ApiModelProperty
-  private TreeMap<String, LogfeederFilterData> filter;
-
-  public TreeMap<String, LogfeederFilterData> getFilter() {
-    return filter;
-  }
-
-  public void setFilter(TreeMap<String, LogfeederFilterData> filter) {
-    this.filter = filter;
-  }
-
-  public String getId() {
-    return id;
-  }
-
-  public void setId(String id) {
-    this.id = id;
-  }
-}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LogfeederFilterData.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LogfeederFilterData.java
deleted file mode 100644
index e0f8013..0000000
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LogfeederFilterData.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.ambari.logsearch.model.common;
-
-import io.swagger.annotations.ApiModel;
-import io.swagger.annotations.ApiModelProperty;
-
-import java.util.ArrayList;
-import java.util.List;
-
-@ApiModel
-public class LogfeederFilterData {
-
-  @ApiModelProperty
-  private String label;
-
-  @ApiModelProperty
-  private List<String> hosts = new ArrayList<>();
-
-  @ApiModelProperty
-  private List<String> defaultLevels = new ArrayList<>();
-
-  @ApiModelProperty
-  private List<String> overrideLevels = new ArrayList<>();
-
-  @ApiModelProperty
-  private String expiryTime;
-
-  public LogfeederFilterData() {
-  }
-
-  public String getLabel() {
-    return label;
-  }
-
-  public void setLabel(String label) {
-    this.label = label;
-  }
-
-  public List<String> getHosts() {
-    return hosts;
-  }
-
-  public void setHosts(List<String> hosts) {
-    this.hosts = hosts;
-  }
-
-  public List<String> getDefaultLevels() {
-    return defaultLevels;
-  }
-
-  public void setDefaultLevels(List<String> defaultLevels) {
-    this.defaultLevels = defaultLevels;
-  }
-
-  public List<String> getOverrideLevels() {
-    return overrideLevels;
-  }
-
-  public void setOverrideLevels(List<String> overrideLevels) {
-    this.overrideLevels = overrideLevels;
-  }
-
-  public String getExpiryTime() {
-    return expiryTime;
-  }
-
-  public void setExpiryTime(String expiryTime) {
-    this.expiryTime = expiryTime;
-  }
-}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/AuditLogsResource.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/AuditLogsResource.java
index 00bf07c..5312da8 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/AuditLogsResource.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/AuditLogsResource.java
@@ -44,6 +44,8 @@
 import org.apache.ambari.logsearch.manager.AuditLogsManager;
 import org.springframework.context.annotation.Scope;
 
+import java.util.List;
+
 import static org.apache.ambari.logsearch.doc.DocConstants.AuditOperationDescriptions.*;
 
 @Api(value = "audit/logs", description = "Audit log operations")
@@ -117,4 +119,12 @@
     return auditLogsManager.getServiceLoad(request);
   }
 
+  @GET
+  @Path("/clusters")
+  @Produces({"application/json"})
+  @ApiOperation(GET_AUDIT_CLUSTERS_OD)
+  public List<String> getClustersForServiceLog() {
+    return auditLogsManager.getClusters();
+  }
+
 }
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/ServiceLogsResource.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/ServiceLogsResource.java
index 498da69..e02acb8 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/ServiceLogsResource.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/ServiceLogsResource.java
@@ -58,6 +58,8 @@
 import org.apache.ambari.logsearch.manager.ServiceLogsManager;
 import org.springframework.context.annotation.Scope;
 
+import java.util.List;
+
 import static org.apache.ambari.logsearch.doc.DocConstants.ServiceOperationDescriptions.*;
 
 @Api(value = "service/logs", description = "Service log operations")
@@ -219,4 +221,13 @@
   public HostLogFilesResponse getHostLogFiles(@Valid @BeanParam HostLogFilesRequest request) {
     return serviceLogsManager.getHostLogFileData(request);
   }
+
+  @GET
+  @Path("/clusters")
+  @Produces({"application/json"})
+  @ApiOperation(GET_SERVICE_CLUSTERS_OD)
+  public List<String> getClustersForServiceLog() {
+    return serviceLogsManager.getClusters();
+  }
+
 }
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/ShipperConfigResource.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/ShipperConfigResource.java
new file mode 100644
index 0000000..a7d99c9
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/ShipperConfigResource.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.rest;
+
+import javax.inject.Inject;
+import javax.inject.Named;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.Response;
+
+import io.swagger.annotations.Api;
+import io.swagger.annotations.ApiOperation;
+
+import org.apache.ambari.logsearch.manager.ShipperConfigManager;
+import org.apache.ambari.logsearch.model.common.LSServerInputConfig;
+import org.apache.ambari.logsearch.model.common.LSServerLogLevelFilterMap;
+import org.springframework.context.annotation.Scope;
+
+import java.util.List;
+
+import static org.apache.ambari.logsearch.doc.DocConstants.ShipperConfigOperationDescriptions.GET_LOG_LEVEL_FILTER_OD;
+import static org.apache.ambari.logsearch.doc.DocConstants.ShipperConfigOperationDescriptions.GET_SERVICE_NAMES_OD;
+import static org.apache.ambari.logsearch.doc.DocConstants.ShipperConfigOperationDescriptions.GET_SHIPPER_CONFIG_OD;
+import static org.apache.ambari.logsearch.doc.DocConstants.ShipperConfigOperationDescriptions.SET_SHIPPER_CONFIG_OD;
+import static org.apache.ambari.logsearch.doc.DocConstants.ShipperConfigOperationDescriptions.UPDATE_LOG_LEVEL_FILTER_OD;
+
+@Api(value = "shipper", description = "Shipper config operations")
+@Path("shipper")
+@Named
+@Scope("request")
+public class ShipperConfigResource {
+
+  @Inject
+  private ShipperConfigManager shipperConfigManager;
+
+  @GET
+  @Path("/input/{clusterName}/services")
+  @Produces({"application/json"})
+  @ApiOperation(GET_SERVICE_NAMES_OD)
+  public List<String> getServices(@PathParam("clusterName") String clusterName) {
+    return shipperConfigManager.getServices(clusterName);
+  }
+
+  @GET
+  @Path("/input/{clusterName}/services/{serviceName}")
+  @Produces({"application/json"})
+  @ApiOperation(GET_SHIPPER_CONFIG_OD)
+  public LSServerInputConfig getShipperConfig(@PathParam("clusterName") String clusterName, @PathParam("serviceName")
+    String serviceName) {
+    return shipperConfigManager.getInputConfig(clusterName, serviceName);
+  }
+
+  @POST
+  @Path("/input/{clusterName}/services/{serviceName}")
+  @Produces({"application/json"})
+  @ApiOperation(SET_SHIPPER_CONFIG_OD)
+  public Response createShipperConfig(String body, @PathParam("clusterName") String clusterName, @PathParam("serviceName")
+    String serviceName) {
+    return shipperConfigManager.createInputConfig(clusterName, serviceName, body);
+  }
+
+  @PUT
+  @Path("/input/{clusterName}/services/{serviceName}")
+  @Produces({"application/json"})
+  @ApiOperation(SET_SHIPPER_CONFIG_OD)
+  public Response setShipperConfig(String body, @PathParam("clusterName") String clusterName, @PathParam("serviceName")
+    String serviceName) {
+    return shipperConfigManager.setInputConfig(clusterName, serviceName, body);
+  }
+
+  @GET
+  @Path("/filters/{clusterName}/level")
+  @Produces({"application/json"})
+  @ApiOperation(GET_LOG_LEVEL_FILTER_OD)
+  public LSServerLogLevelFilterMap getLogLevelFilters(@PathParam("clusterName") String clusterName) {
+    return shipperConfigManager.getLogLevelFilters(clusterName);
+  }
+
+  @PUT
+  @Path("/filters/{clusterName}/level")
+  @Produces({"application/json"})
+  @ApiOperation(UPDATE_LOG_LEVEL_FILTER_OD)
+  public Response setLogLevelFilter(LSServerLogLevelFilterMap request, @PathParam("clusterName") String clusterName) {
+    return shipperConfigManager.setLogLevelFilters(clusterName, request);
+  }
+
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/UserConfigResource.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/UserConfigResource.java
index 41dda05..00b971a 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/UserConfigResource.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/UserConfigResource.java
@@ -25,7 +25,6 @@
 import javax.ws.rs.DELETE;
 import javax.ws.rs.GET;
 import javax.ws.rs.POST;
-import javax.ws.rs.PUT;
 import javax.ws.rs.Path;
 import javax.ws.rs.PathParam;
 import javax.ws.rs.Produces;
@@ -33,7 +32,6 @@
 import io.swagger.annotations.Api;
 import io.swagger.annotations.ApiOperation;
 import org.apache.ambari.logsearch.manager.UserConfigManager;
-import org.apache.ambari.logsearch.model.common.LogFeederDataMap;
 import org.apache.ambari.logsearch.model.request.impl.UserConfigRequest;
 import org.apache.ambari.logsearch.model.response.UserConfigData;
 import org.apache.ambari.logsearch.model.response.UserConfigDataListResponse;
@@ -74,22 +72,6 @@
   }
 
   @GET
-  @Path("/filters")
-  @Produces({"application/json"})
-  @ApiOperation(GET_USER_FILTER_OD)
-  public LogFeederDataMap getUserFilter() {
-    return userConfigManager.getUserFilter();
-  }
-
-  @PUT
-  @Path("/filters")
-  @Produces({"application/json"})
-  @ApiOperation(UPDATE_USER_FILTER_OD)
-  public LogFeederDataMap updateUserFilter(LogFeederDataMap request) {
-    return userConfigManager.saveUserFiter(request);
-  }
-
-  @GET
   @Path("/names")
   @Produces({"application/json"})
   @ApiOperation(GET_ALL_USER_NAMES_OD)
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/web/filters/LogSearchConfigStateFilter.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/web/filters/LogSearchConfigStateFilter.java
new file mode 100644
index 0000000..9b6cdfe
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/web/filters/LogSearchConfigStateFilter.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.web.filters;
+
+import java.io.IOException;
+
+import javax.servlet.Filter;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.ambari.logsearch.common.MessageEnums;
+import org.apache.ambari.logsearch.common.VResponse;
+import org.apache.ambari.logsearch.conf.global.LogSearchConfigState;
+import org.apache.ambari.logsearch.util.RESTErrorUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.security.web.util.matcher.RequestMatcher;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+
+/**
+ * Filter to decide whether the server us ready to serve requests which require Log Search configuration available.
+ */
+public class LogSearchConfigStateFilter implements Filter {
+  private static final Logger LOG = LoggerFactory.getLogger(LogSearchConfigStateFilter.class);
+
+  private static final String CONFIG_NOT_AVAILABLE = "Configuration is not available";
+
+  private final RequestMatcher requestMatcher;
+  private final LogSearchConfigState logSearchConfigState;
+  
+  public LogSearchConfigStateFilter(RequestMatcher requestMatcher, LogSearchConfigState logSearchConfigState) {
+    this.requestMatcher = requestMatcher;
+    this.logSearchConfigState = logSearchConfigState;
+  }
+
+  @Override
+  public void init(FilterConfig filterConfig) throws ServletException {
+  }
+
+  @Override
+  public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain)
+      throws IOException, ServletException {
+    HttpServletRequest request = (HttpServletRequest) servletRequest;
+    if (requestMatcher.matches(request)) {
+      VResponse errorResponse = getErrorResponse();
+      if (errorResponse != null) {
+        LOG.info("{} request is filtered out: {}", request.getRequestURL(), errorResponse.getMsgDesc());
+        HttpServletResponse resp = (HttpServletResponse) servletResponse;
+        resp.setStatus(500);
+        resp.setContentType("application/json");
+        resp.getWriter().print(createStringFromErrorMessageObject(errorResponse));
+        return;
+      }
+    }
+    
+    filterChain.doFilter(servletRequest, servletResponse);
+  }
+
+  private VResponse getErrorResponse() {
+    if (!logSearchConfigState.isLogSearchConfigAvailable()) {
+      return RESTErrorUtil.createMessageResponse(CONFIG_NOT_AVAILABLE, MessageEnums.CONFIGURATION_NOT_AVAILABLE);
+    }
+    
+    return null;
+  }
+
+  private String createStringFromErrorMessageObject(VResponse responseObject) {
+    try {
+      ObjectMapper mapper = new ObjectMapper();
+      return mapper.writeValueAsString(responseObject);
+    } catch (Exception e) {
+      throw RESTErrorUtil.createRESTException("Cannot parse response object on backend", MessageEnums.ERROR_CREATING_OBJECT);
+    }
+  }
+
+  @Override
+  public void destroy() {
+  }
+}
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/web/security/LogsearchExternalServerAuthenticationProvider.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/web/security/LogsearchExternalServerAuthenticationProvider.java
index e23f0a2..1dab126 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/web/security/LogsearchExternalServerAuthenticationProvider.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/web/security/LogsearchExternalServerAuthenticationProvider.java
@@ -122,8 +122,9 @@
 
     List<String> values = new ArrayList<>();
     JSONUtil.getValuesOfKey(responseJson, PrivilegeInfo.PERMISSION_NAME.toString(), values);
-    if (values.isEmpty())
-      return true;
+    if (values.isEmpty()) {
+      return false;
+    }
     
     if (allowedRoleList.length > 0 && responseJson != null) {
       for (String allowedRole : allowedRoleList) {
diff --git a/ambari-logsearch/ambari-logsearch-web/.gitignore b/ambari-logsearch/ambari-logsearch-web/.gitignore
new file mode 100644
index 0000000..ae3c172
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/.gitignore
@@ -0,0 +1 @@
+/bin/
diff --git a/ambari-logsearch/ambari-logsearch-web/src/main/webapp/templates/common/Header_tmpl.html b/ambari-logsearch/ambari-logsearch-web/src/main/webapp/templates/common/Header_tmpl.html
index 5f1bbdb..24cc392 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/main/webapp/templates/common/Header_tmpl.html
+++ b/ambari-logsearch/ambari-logsearch-web/src/main/webapp/templates/common/Header_tmpl.html
@@ -34,11 +34,12 @@
                         <i class="fa fa-filter  pull-right"></i>
                     </a>
                 </li> -->
-                 <li class="dropdown" data-id="createFilters" title="Logfeeder Filters">
+<!-- TODO: update filters to support multiple clusters
+                <li class="dropdown" data-id="createFilters" title="Logfeeder Filters">
                     <a href="#" class="account excludeStatus" data-toggle="modal">
                         <i class="fa fa-filter"></i>
                     </a>
-                </li>
+                </li>-->
                 <li class="dropdown" title="Menu">
                     <a href="#" class="dropdown-toggle account" data-toggle="dropdown">
                         <!-- <div class="avatar">
diff --git a/ambari-logsearch/docker/Dockerfile b/ambari-logsearch/docker/Dockerfile
index 6e8ea3e..d399fc6 100644
--- a/ambari-logsearch/docker/Dockerfile
+++ b/ambari-logsearch/docker/Dockerfile
@@ -15,17 +15,22 @@
 RUN echo root:changeme | chpasswd
 
 RUN yum clean all -y && yum update -y
-RUN yum -y install vim wget rpm-build sudo which telnet tar openssh-server openssh-clients ntp git python-setuptools python-devel httpd lsof
+RUN yum -y install firefox-45.8.0-2.el6.centos xvfb xeyes vim wget rpm-build sudo which telnet tar openssh-server openssh-clients ntp git python-setuptools python-devel httpd lsof
 RUN rpm -e --nodeps --justdb glibc-common
 RUN yum -y install glibc-common
 
 ENV HOME /root
 
 #Install JAVA
-RUN wget --no-check-certificate --no-cookies --header "Cookie:oraclelicense=accept-securebackup-cookie" http://download.oracle.com/otn-pub/java/jdk/7u55-b13/jdk-7u55-linux-x64.rpm -O jdk-7u55-linux-x64.rpm
-RUN rpm -ivh jdk-7u55-linux-x64.rpm
+ENV JAVA_VERSION 8u31
+ENV BUILD_VERSION b13
+RUN wget --no-cookies --no-check-certificate --header "Cookie: oraclelicense=accept-securebackup-cookie" "http://download.oracle.com/otn-pub/java/jdk/$JAVA_VERSION-$BUILD_VERSION/jdk-$JAVA_VERSION-linux-x64.rpm" -O jdk-8-linux-x64.rpm
+RUN rpm -ivh jdk-8-linux-x64.rpm
 ENV JAVA_HOME /usr/java/default/
 
+#Install Selenium server
+RUN wget --no-check-certificate -O /root/selenium-server-standalone.jar http://selenium-release.storage.googleapis.com/2.53/selenium-server-standalone-2.53.1.jar
+
 #Install Maven
 RUN mkdir -p /opt/maven
 WORKDIR /opt/maven
@@ -34,7 +39,7 @@
 RUN rm -rf /opt/maven/apache-maven-3.0.5-bin.tar.gz
 
 ENV M2_HOME /opt/maven/apache-maven-3.0.5
-ENV MAVEN_OPTS -Xmx2048m -XX:MaxPermSize=256m
+ENV MAVEN_OPTS -Xmx2048m
 ENV PATH $PATH:$JAVA_HOME/bin:$M2_HOME/bin
 
 # SSH key
@@ -42,6 +47,8 @@
 RUN cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys
 RUN chmod 600 /root/.ssh/authorized_keys
 RUN sed -ri 's/UsePAM yes/UsePAM no/g' /etc/ssh/sshd_config
+RUN echo 'X11Forwarding yes\n' /etc/ssh/sshd_config
+RUN echo 'X11DisplayOffset 10\n' /etc/ssh/sshd_config
 
 #To allow bower install behind proxy. See https://github.com/bower/bower/issues/731
 RUN git config --global url."https://".insteadOf git://
@@ -54,7 +61,7 @@
 
 # Install Solr
 ENV SOLR_VERSION 5.5.2
-RUN wget --no-check-certificate -O /root/solr-$SOLR_VERSION.tgz http://archive.apache.org/dist/lucene/solr/$SOLR_VERSION/solr-$SOLR_VERSION.tgz
+RUN wget --no-check-certificate -O /root/solr-$SOLR_VERSION.tgz http://public-repo-1.hortonworks.com/ARTIFACTS/dist/lucene/solr/$SOLR_VERSION/solr-$SOLR_VERSION.tgz
 RUN cd /root && tar -zxvf /root/solr-$SOLR_VERSION.tgz
 ADD bin/start.sh /root/start.sh
 ADD test-config /root/test-config
diff --git a/ambari-logsearch/docker/bin/start.sh b/ambari-logsearch/docker/bin/start.sh
index 1efc85c..28ebf65 100644
--- a/ambari-logsearch/docker/bin/start.sh
+++ b/ambari-logsearch/docker/bin/start.sh
@@ -92,6 +92,10 @@
   touch /var/log/ambari-logsearch-logfeeder/logsearch-logfeeder.log
 }
 
+function start_selenium_server() {
+  nohup java -jar /root/selenium-server-standalone.jar > /var/log/selenium-test.log &
+}
+
 function log() {
   component_log=${COMPONENT_LOG:-"logsearch"}
   case $component_log in
@@ -101,6 +105,9 @@
     "solr")
       tail -f /var/log/ambari-logsearch-solr/solr.log
      ;;
+    "selenium")
+      tail -f /var/log/selenium-test.log
+     ;;
      *)
       tail -f /var/log/ambari-logsearch-portal/logsearch-app.log
      ;;
@@ -109,6 +116,7 @@
 
 create_config
 generate_keys
+start_selenium_server
 start_solr
 start_logsearch
 start_logfeeder
diff --git a/ambari-logsearch/docker/logsearch-docker.sh b/ambari-logsearch/docker/logsearch-docker.sh
index 1fdcd8c..4d53fa1 100755
--- a/ambari-logsearch/docker/logsearch-docker.sh
+++ b/ambari-logsearch/docker/logsearch-docker.sh
@@ -30,15 +30,23 @@
   popd
 }
 
+function get_docker_ip() {
+  local ip=$(ifconfig en0 | grep inet | awk '$1=="inet" {print $2}')
+  echo $ip
+}
+
 function start_logsearch_container() {
   setup_profile
   source $sdir/Profile
-  : ${AMBARI_LOCATION:?"Please set the AMBARI_LOCATION in Profile"}
+  pushd $sdir/../../
+  local AMBARI_LOCATION=$(pwd)
+  popd
   : ${MAVEN_REPOSITORY_LOCATION:?"Please set the MAVEN_REPOSITORY_LOCATION in Profile"}
   kill_logsearch_container
+  local docker_ip=$(get_docker_ip)
   echo "Run Log Search container"
-  docker run -d --name logsearch --hostname logsearch.apache.org \
-    -v $AMBARI_LOCATION:/root/ambari -v $MAVEN_REPOSITORY_LOCATION:/root/.m2 $LOGSEARCH_EXPOSED_PORTS $LOGSEARCH_ENV_OPTS $LOGSEARCH_EXTRA_OPTS $LOGSEARCH_VOLUME_OPTS \
+  docker run -d --name logsearch --hostname logsearch.apache.org -e DISPLAY=$docker_ip:0 \
+    -v $AMBARI_LOCATION:/root/ambari -v $MAVEN_REPOSITORY_LOCATION:/root/.m2 $LOGSEARCH_EXPOSED_PORTS $LOGSEARCH_ENV_OPTS $LOGSEARCH_EXTRA_OPTS $LOGSEARCH_VOLUME_OPTS -p 9983:9983 -p 4444:4444 -p 5910:5910 \
     -v $AMBARI_LOCATION/ambari-logsearch/ambari-logsearch-logfeeder/target/classes:/root/ambari/ambari-logsearch/ambari-logsearch-logfeeder/target/package/classes \
     -v $AMBARI_LOCATION/ambari-logsearch/ambari-logsearch-server/target/classes:/root/ambari/ambari-logsearch/ambari-logsearch-server/target/package/classes \
     -v $AMBARI_LOCATION/ambari-logsearch/ambari-logsearch-web/src/main/webapp:/root/ambari/ambari-logsearch/ambari-logsearch-server/target/package/classes/webapps/app \
@@ -55,8 +63,10 @@
     echo "Profile file exists"
   else
     echo "Profile does not exist, Creating a new one..."
+    pushd $sdir/../../
+    local AMBARI_LOCATION=$(pwd)
+    popd
     cat << EOF > $sdir/Profile
-AMBARI_LOCATION=$HOME/prj/ambari
 MAVEN_REPOSITORY_LOCATION=$HOME/.m2
 LOGSEARCH_EXPOSED_PORTS="-p 8886:8886 -p 61888:61888 -p 5005:5005 -p 5006:5006"
 LOGSEARCH_ENV_OPTS="-e LOGFEEDER_DEBUG_SUSPEND=n -e LOGSEARCH_DEBUG_SUSPEND=n -e COMPONENT_LOG=logsearch -e LOGSEARCH_HTTPS_ENABLED=false -e LOGSEARCH_SOLR_SSL_ENABLED=false -e GENERATE_KEYSTORE_AT_START=false"
diff --git a/ambari-logsearch/docker/test-config/logfeeder/logfeeder.properties b/ambari-logsearch/docker/test-config/logfeeder/logfeeder.properties
index 068bc3a..fb7ddf2 100644
--- a/ambari-logsearch/docker/test-config/logfeeder/logfeeder.properties
+++ b/ambari-logsearch/docker/test-config/logfeeder/logfeeder.properties
@@ -13,17 +13,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+cluster.name=cl1
 logfeeder.checkpoint.folder=/root/checkpoints
 logfeeder.metrics.collector.hosts=
+logfeeder.config.dir=/root/test-config/logfeeder/shipper-conf/
 logfeeder.config.files=shipper-conf/global.config.json,\
-  shipper-conf/output.config.json,\
-  shipper-conf/input.config-zookeeper.json,\
-  shipper-conf/input.config-logsearch.json,\
-  shipper-conf/input.config-hst.json,\
-  shipper-conf/input.config-system_message.json,\
-  shipper-conf/input.config-secure_log.json,\
-  shipper-conf/input.config-hdfs.json,\
-  shipper-conf/input.config-ambari.json
+  shipper-conf/output.config.json
 logfeeder.log.filter.enable=true
 logfeeder.solr.config.interval=5
 logfeeder.solr.core.config.name=history
@@ -33,3 +28,5 @@
 logfeeder.cache.key.field=log_message
 logfeeder.cache.dedup.interval=1000
 logfeeder.cache.last.dedup.enabled=true
+logsearch.config.zk_connect_string=localhost:9983
+logfeeder.include.default.level=FATAL,ERROR,WARN,INFO,DEBUG,TRACE,UNKNOWN
diff --git a/ambari-logsearch/docker/test-config/logsearch/logsearch.properties b/ambari-logsearch/docker/test-config/logsearch/logsearch.properties
index cfa985d..5bde17c 100644
--- a/ambari-logsearch/docker/test-config/logsearch/logsearch.properties
+++ b/ambari-logsearch/docker/test-config/logsearch/logsearch.properties
@@ -43,10 +43,6 @@
 logsearch.solr.metrics.collector.hosts=
 logsearch.solr.jmx.port=18886
 
-# Logfeeder Settings
-
-logsearch.logfeeder.include.default.level=FATAL,ERROR,WARN,INFO,DEBUG,TRACE,UNKNOWN
-
 # logsearch-admin.json
 logsearch.auth.file.enable=true
 logsearch.login.credentials.file=user_pass.json
@@ -56,3 +52,5 @@
 logsearch.auth.external_auth.enable=false
 
 logsearch.protocol=http
+
+logsearch.config.zk_connect_string=localhost:9983
diff --git a/ambari-logsearch/pom.xml b/ambari-logsearch/pom.xml
index 1e63ced..6f78abe 100644
--- a/ambari-logsearch/pom.xml
+++ b/ambari-logsearch/pom.xml
@@ -34,6 +34,8 @@
     <module>ambari-logsearch-server</module>
     <module>ambari-logsearch-web</module>
     <module>ambari-logsearch-logfeeder</module>
+    <module>ambari-logsearch-config-api</module>
+    <module>ambari-logsearch-config-zookeeper</module>
     <module>ambari-logsearch-it</module>
   </modules>
   <properties>
diff --git a/ambari-metrics/ambari-metrics-assembly/pom.xml b/ambari-metrics/ambari-metrics-assembly/pom.xml
index a4b87de..6b81de5 100644
--- a/ambari-metrics/ambari-metrics-assembly/pom.xml
+++ b/ambari-metrics/ambari-metrics-assembly/pom.xml
@@ -35,6 +35,7 @@
   <properties>
     <collector.dir>${project.basedir}/../ambari-metrics-timelineservice</collector.dir>
     <monitor.dir>${project.basedir}/../ambari-metrics-host-monitoring</monitor.dir>
+    <aggregator.dir>${project.basedir}/../ambari-metrics-host-aggregator</aggregator.dir>
     <grafana.dir>${project.basedir}/../ambari-metrics-grafana</grafana.dir>
     <hadoop-sink.dir>${project.basedir}/../ambari-metrics-hadoop-sink</hadoop-sink.dir>
     <storm-sink.dir>${project.basedir}/../ambari-metrics-storm-sink</storm-sink.dir>
@@ -599,6 +600,19 @@
                       </sources>
                     </mapping>
                     <mapping>
+                      <directory>/var/lib/ambari-metrics-monitor/lib</directory>
+                      <sources>
+                        <source>
+                          <location>
+                            ${aggregator.dir}/target/
+                          </location>
+                          <includes>
+                            <include>ambari-metrics-host-aggregator-${project.version}.jar</include>
+                          </includes>
+                        </source>
+                      </sources>
+                    </mapping>
+                    <mapping>
                       <directory>/etc/ambari-metrics-monitor/conf</directory>
                       <configuration>true</configuration>
                     </mapping>
@@ -744,6 +758,7 @@
                     <path>/var/run/ambari-metrics-grafana</path>
                     <path>/var/log/ambari-metrics-grafana</path>
                     <path>/var/lib/ambari-metrics-collector</path>
+                    <path>/var/lib/ambari-metrics-monitor/lib</path>
                     <path>/var/lib/ambari-metrics-grafana</path>
                     <path>/usr/lib/ambari-metrics-hadoop-sink</path>
                     <path>/usr/lib/ambari-metrics-kafka-sink</path>
@@ -1331,6 +1346,11 @@
       <type>pom</type>
       <optional>true</optional>
     </dependency>
+    <dependency>
+      <groupId>org.apache.ambari</groupId>
+      <artifactId>ambari-metrics-host-aggregator</artifactId>
+      <version>${project.version}</version>
+    </dependency>
   </dependencies>
 
 
diff --git a/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor-windows.xml b/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor-windows.xml
index ab309a1..d015d31 100644
--- a/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor-windows.xml
+++ b/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor-windows.xml
@@ -64,6 +64,13 @@
       </includes>
     </fileSet>
     <fileSet>
+      <directory>${aggregator.dir}/conf/windows</directory>
+      <outputDirectory>conf</outputDirectory>
+      <includes>
+        <include>log4j.properties</include>
+      </includes>
+    </fileSet>
+    <fileSet>
       <directory>${monitor.dir}/conf/windows</directory>
       <outputDirectory>/</outputDirectory>
       <includes>
diff --git a/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor.xml b/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor.xml
index 99a41c3..448fe62 100644
--- a/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor.xml
+++ b/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor.xml
@@ -46,6 +46,13 @@
       </includes>
     </fileSet>
     <fileSet>
+      <directory>${aggregator.dir}/conf/unix</directory>
+      <outputDirectory>conf</outputDirectory>
+      <includes>
+        <include>log4j.properties</include>
+      </includes>
+    </fileSet>
+    <fileSet>
       <directory>${monitor.dir}/conf/unix</directory>
       <outputDirectory>bin</outputDirectory>
       <includes>
@@ -68,4 +75,4 @@
 
 
 
-</assembly>
\ No newline at end of file
+</assembly>
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
index 46f32f9..a8dc571 100644
--- a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
@@ -78,7 +78,11 @@
   public static final String SSL_KEYSTORE_PATH_PROPERTY = "truststore.path";
   public static final String SSL_KEYSTORE_TYPE_PROPERTY = "truststore.type";
   public static final String SSL_KEYSTORE_PASSWORD_PROPERTY = "truststore.password";
+  public static final String HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY = "host_in_memory_aggregation";
+  public static final String HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY = "host_in_memory_aggregation_port";
   public static final String COLLECTOR_LIVE_NODES_PATH = "/ws/v1/timeline/metrics/livenodes";
+  public static final String INSTANCE_ID_PROPERTY = "instanceId";
+  public static final String SET_INSTANCE_ID_PROPERTY = "set.instanceId";
 
   protected static final AtomicInteger failedCollectorConnectionsCounter = new AtomicInteger(0);
   public static int NUMBER_OF_SKIPPED_COLLECTOR_EXCEPTIONS = 100;
@@ -239,8 +243,14 @@
   }
 
   protected boolean emitMetrics(TimelineMetrics metrics) {
-    String collectorHost = getCurrentCollectorHost();
-    String connectUrl = getCollectorUri(collectorHost);
+    String connectUrl;
+    if (isHostInMemoryAggregationEnabled()) {
+      connectUrl = constructTimelineMetricUri("http", "localhost", String.valueOf(getHostInMemoryAggregationPort()));
+    } else {
+      String collectorHost  = getCurrentCollectorHost();
+      connectUrl = getCollectorUri(collectorHost);
+    }
+
     String jsonData = null;
     LOG.debug("EmitMetrics connectUrl = "  + connectUrl);
     try {
@@ -560,4 +570,16 @@
    * @return String "host1"
    */
   abstract protected String getHostname();
+
+  /**
+   * Check if host in-memory aggregation is enabled
+   * @return
+   */
+  abstract protected boolean isHostInMemoryAggregationEnabled();
+
+  /**
+   * In memory aggregation port
+   * @return
+   */
+  abstract protected int getHostInMemoryAggregationPort();
 }
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AggregationResult.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AggregationResult.java
new file mode 100644
index 0000000..c903e3d
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AggregationResult.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.sink.timeline;
+
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.util.Set;
+
+@XmlRootElement(name="AggregationResult")
+public class AggregationResult {
+    protected Set<TimelineMetricWithAggregatedValues> result;
+    protected Long timeInMilis;
+
+    @Override
+    public String toString() {
+        return "AggregationResult{" +
+                "result=" + result +
+                ", timeInMilis=" + timeInMilis +
+                '}';
+    }
+
+    public AggregationResult() {
+    }
+
+    public AggregationResult(Set<TimelineMetricWithAggregatedValues> result, Long timeInMilis) {
+        this.result = result;
+        this.timeInMilis = timeInMilis;
+    }
+    @XmlElement
+    public Set<TimelineMetricWithAggregatedValues> getResult() {
+        return result;
+    }
+
+    public void setResult(Set<TimelineMetricWithAggregatedValues> result) {
+        this.result = result;
+    }
+    @XmlElement
+    public Long getTimeInMilis() {
+        return timeInMilis;
+    }
+
+    public void setTimeInMilis(Long timeInMilis) {
+        this.timeInMilis = timeInMilis;
+    }
+}
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricAggregate.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricAggregate.java
new file mode 100644
index 0000000..84cba0e
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricAggregate.java
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.sink.timeline;
+
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.codehaus.jackson.annotate.JsonProperty;
+import org.codehaus.jackson.annotate.JsonSubTypes;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import java.io.IOException;
+
+/**
+*
+*/
+@JsonSubTypes({@JsonSubTypes.Type(value = MetricClusterAggregate.class),
+  @JsonSubTypes.Type(value = MetricHostAggregate.class)})
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public class MetricAggregate {
+  private static final ObjectMapper mapper = new ObjectMapper();
+
+  protected Double sum = 0.0;
+  protected Double deviation;
+  protected Double max = Double.MIN_VALUE;
+  protected Double min = Double.MAX_VALUE;
+
+  public MetricAggregate() {
+  }
+
+  MetricAggregate(Double sum, Double deviation, Double max,
+                  Double min) {
+    this.sum = sum;
+    this.deviation = deviation;
+    this.max = max;
+    this.min = min;
+  }
+
+  public void updateSum(Double sum) {
+    this.sum += sum;
+  }
+
+  public void updateMax(Double max) {
+    if (max > this.max) {
+      this.max = max;
+    }
+  }
+
+  public void updateMin(Double min) {
+    if (min < this.min) {
+      this.min = min;
+    }
+  }
+
+  @JsonProperty("sum")
+  public Double getSum() {
+    return sum;
+  }
+
+  @JsonProperty("deviation")
+  public Double getDeviation() {
+    return deviation;
+  }
+
+  @JsonProperty("max")
+  public Double getMax() {
+    return max;
+  }
+
+  @JsonProperty("min")
+  public Double getMin() {
+    return min;
+  }
+
+  public void setSum(Double sum) {
+    this.sum = sum;
+  }
+
+  public void setDeviation(Double deviation) {
+    this.deviation = deviation;
+  }
+
+  public void setMax(Double max) {
+    this.max = max;
+  }
+
+  public void setMin(Double min) {
+    this.min = min;
+  }
+
+  public String toJSON() throws IOException {
+    return mapper.writeValueAsString(this);
+  }
+}
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricClusterAggregate.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricClusterAggregate.java
new file mode 100644
index 0000000..7ef2c1d
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricClusterAggregate.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.sink.timeline;
+
+
+import org.codehaus.jackson.annotate.JsonCreator;
+import org.codehaus.jackson.annotate.JsonProperty;
+
+/**
+*
+*/
+public class MetricClusterAggregate extends MetricAggregate {
+  private int numberOfHosts;
+
+  @JsonCreator
+  public MetricClusterAggregate() {
+  }
+
+  public MetricClusterAggregate(Double sum, int numberOfHosts, Double deviation,
+                         Double max, Double min) {
+    super(sum, deviation, max, min);
+    this.numberOfHosts = numberOfHosts;
+  }
+
+  @JsonProperty("numberOfHosts")
+  public int getNumberOfHosts() {
+    return numberOfHosts;
+  }
+
+  public void updateNumberOfHosts(int count) {
+    this.numberOfHosts += count;
+  }
+
+  public void setNumberOfHosts(int numberOfHosts) {
+    this.numberOfHosts = numberOfHosts;
+  }
+
+  /**
+   * Find and update min, max and avg for a minute
+   */
+  public void updateAggregates(MetricClusterAggregate hostAggregate) {
+    updateMax(hostAggregate.getMax());
+    updateMin(hostAggregate.getMin());
+    updateSum(hostAggregate.getSum());
+    updateNumberOfHosts(hostAggregate.getNumberOfHosts());
+  }
+
+  @Override
+  public String toString() {
+    return "MetricAggregate{" +
+      "sum=" + sum +
+      ", numberOfHosts=" + numberOfHosts +
+      ", deviation=" + deviation +
+      ", max=" + max +
+      ", min=" + min +
+      '}';
+  }
+}
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricHostAggregate.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricHostAggregate.java
new file mode 100644
index 0000000..e190913
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricHostAggregate.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.sink.timeline;
+
+
+import org.codehaus.jackson.annotate.JsonCreator;
+import org.codehaus.jackson.annotate.JsonProperty;
+
+/**
+ * Represents a collection of minute based aggregation of values for
+ * resolution greater than a minute.
+ */
+public class MetricHostAggregate extends MetricAggregate {
+
+  private long numberOfSamples = 0;
+
+  @JsonCreator
+  public MetricHostAggregate() {
+    super(0.0, 0.0, Double.MIN_VALUE, Double.MAX_VALUE);
+  }
+
+  public MetricHostAggregate(Double sum, int numberOfSamples,
+                             Double deviation,
+                             Double max, Double min) {
+    super(sum, deviation, max, min);
+    this.numberOfSamples = numberOfSamples;
+  }
+
+  @JsonProperty("numberOfSamples")
+  public long getNumberOfSamples() {
+    return numberOfSamples == 0 ? 1 : numberOfSamples;
+  }
+
+  public void updateNumberOfSamples(long count) {
+    this.numberOfSamples += count;
+  }
+
+  public void setNumberOfSamples(long numberOfSamples) {
+    this.numberOfSamples = numberOfSamples;
+  }
+
+  public double calculateAverage() {
+    return sum / numberOfSamples;
+  }
+
+  /**
+   * Find and update min, max and avg for a minute
+   */
+  public void updateAggregates(MetricHostAggregate hostAggregate) {
+    updateMax(hostAggregate.getMax());
+    updateMin(hostAggregate.getMin());
+    updateSum(hostAggregate.getSum());
+    updateNumberOfSamples(hostAggregate.getNumberOfSamples());
+  }
+
+  @Override
+  public String toString() {
+    return "MetricHostAggregate{" +
+      "sum=" + sum +
+      ", numberOfSamples=" + numberOfSamples +
+      ", deviation=" + deviation +
+      ", max=" + max +
+      ", min=" + min +
+      '}';
+  }
+}
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java
index 44c9d4a..edace52 100644
--- a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java
@@ -45,7 +45,7 @@
   private String type;
   private String units;
   private TreeMap<Long, Double> metricValues = new TreeMap<Long, Double>();
-  private Map<String, String> metadata = new HashMap<>();
+  private HashMap<String, String> metadata = new HashMap<>();
 
   // default
   public TimelineMetric() {
@@ -151,11 +151,11 @@
   }
 
   @XmlElement(name = "metadata")
-  public Map<String,String> getMetadata () {
+  public HashMap<String,String> getMetadata () {
     return metadata;
   }
 
-  public void setMetadata (Map<String,String> metadata) {
+  public void setMetadata (HashMap<String,String> metadata) {
     this.metadata = metadata;
   }
 
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetricWithAggregatedValues.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetricWithAggregatedValues.java
new file mode 100644
index 0000000..626ac5f
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetricWithAggregatedValues.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.sink.timeline;
+
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+
+@XmlRootElement(name = "TimelineMetricWithAggregatedValues")
+@XmlAccessorType(XmlAccessType.NONE)
+public class TimelineMetricWithAggregatedValues {
+    private TimelineMetric timelineMetric;
+    private MetricHostAggregate metricAggregate;
+
+    public TimelineMetricWithAggregatedValues() {
+    }
+
+    public TimelineMetricWithAggregatedValues(TimelineMetric metric, MetricHostAggregate metricAggregate) {
+        timelineMetric = metric;
+        this.metricAggregate = metricAggregate;
+    }
+
+    @XmlElement
+    public MetricHostAggregate getMetricAggregate() {
+        return metricAggregate;
+    }
+    @XmlElement
+    public TimelineMetric getTimelineMetric() {
+        return timelineMetric;
+    }
+
+    public void setTimelineMetric(TimelineMetric timelineMetric) {
+        this.timelineMetric = timelineMetric;
+    }
+
+    public void setMetricAggregate(MetricHostAggregate metricAggregate) {
+        this.metricAggregate = metricAggregate;
+    }
+
+    @Override
+    public String toString() {
+        return "TimelineMetricWithAggregatedValues{" +
+                "timelineMetric=" + timelineMetric +
+                ", metricAggregate=" + metricAggregate +
+                '}';
+    }
+}
diff --git a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/AbstractTimelineMetricSinkTest.java b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/AbstractTimelineMetricSinkTest.java
index 9b0cdbe..ce2cf79 100644
--- a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/AbstractTimelineMetricSinkTest.java
+++ b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/AbstractTimelineMetricSinkTest.java
@@ -90,6 +90,16 @@
     }
 
     @Override
+    protected boolean isHostInMemoryAggregationEnabled() {
+      return true;
+    }
+
+    @Override
+    protected int getHostInMemoryAggregationPort() {
+      return 61888;
+    }
+
+    @Override
     public boolean emitMetrics(TimelineMetrics metrics) {
       super.init();
       return super.emitMetrics(metrics);
diff --git a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java
index a393a96..f0174d5 100644
--- a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java
+++ b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java
@@ -192,5 +192,15 @@
     protected String getHostname() {
       return "h1";
     }
+
+    @Override
+    protected boolean isHostInMemoryAggregationEnabled() {
+      return true;
+    }
+
+    @Override
+    protected int getHostInMemoryAggregationPort() {
+      return 61888;
+    }
   }
 }
diff --git a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/HandleConnectExceptionTest.java b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/HandleConnectExceptionTest.java
index 32fe32e..4eb75eb 100644
--- a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/HandleConnectExceptionTest.java
+++ b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/HandleConnectExceptionTest.java
@@ -125,6 +125,16 @@
     }
 
     @Override
+    protected boolean isHostInMemoryAggregationEnabled() {
+      return false;
+    }
+
+    @Override
+    protected int getHostInMemoryAggregationPort() {
+      return 61888;
+    }
+
+    @Override
     public boolean emitMetrics(TimelineMetrics metrics) {
       super.init();
       return super.emitMetrics(metrics);
diff --git a/ambari-metrics/ambari-metrics-flume-sink/src/main/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-flume-sink/src/main/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSink.java
index 3fdf3f4..6277907 100644
--- a/ambari-metrics/ambari-metrics-flume-sink/src/main/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-flume-sink/src/main/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSink.java
@@ -61,6 +61,11 @@
   private final static String COUNTER_METRICS_PROPERTY = "counters";
   private final Set<String> counterMetrics = new HashSet<String>();
   private int timeoutSeconds = 10;
+  private boolean setInstanceId;
+  private String instanceId;
+  private boolean hostInMemoryAggregationEnabled;
+  private int hostInMemoryAggregationPort;
+
 
   @Override
   public void start() {
@@ -106,6 +111,11 @@
     zookeeperQuorum = configuration.getProperty("zookeeper.quorum");
     protocol = configuration.getProperty(COLLECTOR_PROTOCOL, "http");
     port = configuration.getProperty(COLLECTOR_PORT, "6188");
+    setInstanceId = Boolean.valueOf(configuration.getProperty(SET_INSTANCE_ID_PROPERTY, "false"));
+    instanceId = configuration.getProperty(INSTANCE_ID_PROPERTY, "");
+
+    hostInMemoryAggregationEnabled = Boolean.getBoolean(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY));
+    hostInMemoryAggregationPort = Integer.valueOf(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY));
     // Initialize the collector write strategy
     super.init();
 
@@ -158,6 +168,16 @@
     return hostname;
   }
 
+  @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return hostInMemoryAggregationEnabled;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return hostInMemoryAggregationPort;
+  }
+
   public void setPollFrequency(long pollFrequency) {
     this.pollFrequency = pollFrequency;
   }
@@ -227,7 +247,11 @@
       TimelineMetric timelineMetric = new TimelineMetric();
       timelineMetric.setMetricName(attributeName);
       timelineMetric.setHostName(hostname);
-      timelineMetric.setInstanceId(component);
+      if (setInstanceId) {
+        timelineMetric.setInstanceId(instanceId + component);
+      } else {
+        timelineMetric.setInstanceId(component);
+      }
       timelineMetric.setAppId("FLUME_HANDLER");
       timelineMetric.setStartTime(currentTimeMillis);
       timelineMetric.getMetricValues().put(currentTimeMillis, Double.parseDouble(attributeValue));
diff --git a/ambari-metrics/ambari-metrics-grafana/ambari-metrics/datasource.js b/ambari-metrics/ambari-metrics-grafana/ambari-metrics/datasource.js
index 97de6e7..72e64e2 100644
--- a/ambari-metrics/ambari-metrics-grafana/ambari-metrics/datasource.js
+++ b/ambari-metrics/ambari-metrics-grafana/ambari-metrics/datasource.js
@@ -84,7 +84,6 @@
          * AMS Datasource  Query
          */
         AmbariMetricsDatasource.prototype.query = function (options) {
-
           var emptyData = function (metric) {
             var legend = metric.alias ? metric.alias : metric.metric;
             return {
@@ -221,12 +220,16 @@
             var metricTransform = !target.transform || target.transform === "none" ? '' : '._' + target.transform;
             var seriesAggregator = !target.seriesAggregator || target.seriesAggregator === "none" ? '' : '&seriesAggregateFunction=' + target.seriesAggregator;
             return self.doAmbariRequest({ url: '/ws/v1/timeline/metrics?metricNames=' + target.metric + metricTransform +
-                metricAggregator + "&hostname=" + target.hosts + '&appId=' + target.app + '&startTime=' + from +
-                '&endTime=' + to + precision + seriesAggregator }).then(
-                getMetricsData(target)
+            metricAggregator + "&hostname=" + target.hosts + '&appId=' + target.app + '&instanceId=' + target.cluster + '&startTime=' + from +
+            '&endTime=' + to + precision + seriesAggregator }).then(
+              getMetricsData(target)
             );
           };
           //Check if it's a templated dashboard.
+          var templatedClusters = templateSrv.variables.filter(function(o) { return o.name === "cluster"});
+          var templatedCluster = (_.isEmpty(templatedClusters)) ? '' : templatedClusters[0].options.filter(function(cluster)
+            { return cluster.selected; }).map(function(clusterName) { return clusterName.value; });
+
           var templatedHosts = templateSrv.variables.filter(function(o) { return o.name === "hosts"});
           var templatedHost = (_.isEmpty(templatedHosts)) ? '' : templatedHosts[0].options.filter(function(host)
             { return host.selected; }).map(function(hostName) { return hostName.value; });
@@ -236,14 +239,15 @@
           var tComponent = _.isEmpty(tComponents) ? '' : tComponents[0].current.value;
 
           var getServiceAppIdData = function(target) {
+            var tCluster = (_.isEmpty(templateSrv.variables))? templatedCluster : '';
             var tHost = (_.isEmpty(templateSrv.variables)) ? templatedHost : target.templatedHost;
-            var precision = target.precision === 'default' || typeof target.precision == 'undefined'  ? '' : '&precision=' 
+            var precision = target.precision === 'default' || typeof target.precision == 'undefined'  ? '' : '&precision='
             + target.precision;
             var metricAggregator = target.aggregator === "none" ? '' : '._' + target.aggregator;
             var metricTransform = !target.transform || target.transform === "none" ? '' : '._' + target.transform;
             var seriesAggregator = !target.seriesAggregator || target.seriesAggregator === "none" ? '' : '&seriesAggregateFunction=' + target.seriesAggregator;
             return self.doAmbariRequest({ url: '/ws/v1/timeline/metrics?metricNames=' + target.metric + metricTransform
-              + metricAggregator + '&hostname=' + tHost + '&appId=' + target.app + '&startTime=' + from +
+              + metricAggregator + '&hostname=' + tHost + '&appId=' + target.app + '&instanceId=' + tCluster + '&startTime=' + from +
               '&endTime=' + to + precision + seriesAggregator }).then(
               getMetricsData(target)
             );
@@ -265,8 +269,8 @@
             var seriesAggregator = !target.seriesAggregator || target.seriesAggregator === "none" ? '' : '&seriesAggregateFunction=' + target.seriesAggregator;
             var templatedComponent = (_.isEmpty(tComponent)) ? target.app : tComponent;
             return self.doAmbariRequest({ url: '/ws/v1/timeline/metrics?metricNames=' + target.metric + metricTransform
-              + metricAggregator + '&hostname=' + target.templatedHost + '&appId=' + templatedComponent + '&startTime=' + from +
-              '&endTime=' + to + precision + topN + seriesAggregator }).then(
+              + metricAggregator + '&hostname=' + target.templatedHost + '&appId=' + templatedComponent + '&instanceId=' + target.templatedCluster
+              + '&startTime=' + from + '&endTime=' + to + precision + topN + seriesAggregator }).then(
               allHostMetricsData(target)
             );
           };
@@ -277,7 +281,7 @@
             var metricTransform = !target.transform || target.transform === "none" ? '' : '._' + target.transform;
             var seriesAggregator = !target.seriesAggregator || target.seriesAggregator === "none" ? '' : '&seriesAggregateFunction=' + target.seriesAggregator;
             return self.doAmbariRequest({ url: '/ws/v1/timeline/metrics?metricNames=' + target.queue + metricTransform
-              + metricAggregator + '&appId=resourcemanager&startTime=' + from +
+              + metricAggregator + '&appId=resourcemanager&startTime=' + + '&instanceId=' + target.templatedCluster + from +
               '&endTime=' + to + precision + seriesAggregator }).then(
               getMetricsData(target)
             );
@@ -286,7 +290,7 @@
             var precision = target.precision === 'default' || typeof target.precision == 'undefined'  ? '' : '&precision='
             + target.precision;
             var seriesAggregator = !target.seriesAggregator || target.seriesAggregator === "none" ? '' : '&seriesAggregateFunction=' + target.seriesAggregator;
-            return self.doAmbariRequest({ url: '/ws/v1/timeline/metrics?metricNames=' + target.hbMetric + '&appId=hbase&startTime='
+            return self.doAmbariRequest({ url: '/ws/v1/timeline/metrics?metricNames=' + target.hbMetric + + '&instanceId=' + target.templatedCluster + '&appId=hbase&startTime='
             + from + '&endTime=' + to + precision + seriesAggregator }).then(
               allHostMetricsData(target)
             );
@@ -298,7 +302,7 @@
             var metricAggregator = target.aggregator === "none" ? '' : '._' + target.aggregator;
             var metricTransform = !target.transform || target.transform === "none" ? '' : '._' + target.transform;
             var seriesAggregator = !target.seriesAggregator || target.seriesAggregator === "none" ? '' : '&seriesAggregateFunction=' + target.seriesAggregator;
-            return self.doAmbariRequest({ url: '/ws/v1/timeline/metrics?metricNames=' + target.kbMetric + metricTransform
+            return self.doAmbariRequest({ url: '/ws/v1/timeline/metrics?metricNames=' + target.kbMetric + metricTransform + + '&instanceId=' + target.templatedCluster
               + metricAggregator + '&appId=kafka_broker&startTime=' + from +
               '&endTime=' + to + precision + seriesAggregator }).then(
               getMetricsData(target)
@@ -310,7 +314,7 @@
             var metricAggregator = target.aggregator === "none" ? '' : '._' + target.aggregator;
             var metricTransform = !target.transform || target.transform === "none" ? '' : '._' + target.transform;
             var seriesAggregator = !target.seriesAggregator || target.seriesAggregator === "none" ? '' : '&seriesAggregateFunction=' + target.seriesAggregator;
-            return self.doAmbariRequest({ url: '/ws/v1/timeline/metrics?metricNames=' + target.nnMetric + metricTransform
+            return self.doAmbariRequest({ url: '/ws/v1/timeline/metrics?metricNames=' + target.nnMetric + metricTransform + '&instanceId=' + target.templatedCluster
             + metricAggregator + '&appId=namenode&startTime=' + from + '&endTime=' + to + precision + seriesAggregator }).then(
               allHostMetricsData(target)
             );
@@ -323,7 +327,7 @@
             var metricAggregator = target.aggregator === "none" ? '' : '._' + target.aggregator;
             var metricTransform = !target.transform || target.transform === "none" ? '' : '._' + target.transform;
             var seriesAggregator = !target.seriesAggregator || target.seriesAggregator === "none" ? '' : '&seriesAggregateFunction=' + target.seriesAggregator;
-            return self.doAmbariRequest({ url: '/ws/v1/timeline/metrics?metricNames=' + target.sTopoMetric + metricTransform
+            return self.doAmbariRequest({ url: '/ws/v1/timeline/metrics?metricNames=' + target.sTopoMetric + metricTransform + '&instanceId=' + target.templatedCluster
                 + metricAggregator + '&appId=nimbus&startTime=' + from + '&endTime=' + to + precision + seriesAggregator }).then(
                 allHostMetricsData(target)
             );
@@ -336,7 +340,7 @@
             var metricAggregator = target.aggregator === "none" ? '' : '._' + target.aggregator;
             var metricTransform = !target.transform || target.transform === "none" ? '' : '._' + target.transform;
             var seriesAggregator = !target.seriesAggregator || target.seriesAggregator === "none" ? '' : '&seriesAggregateFunction=' + target.seriesAggregator;
-            return self.doAmbariRequest({ url: '/ws/v1/timeline/metrics?metricNames=' + target.sDataSourceMetric + metricTransform
+            return self.doAmbariRequest({ url: '/ws/v1/timeline/metrics?metricNames=' + target.sDataSourceMetric + metricTransform + '&instanceId=' + target.templatedCluster
                           + metricAggregator + '&appId=druid&startTime=' + from + '&endTime=' + to + precision + seriesAggregator }).then(
                           allHostMetricsData(target)
             );
@@ -471,7 +475,6 @@
                 target.sTopology = selectedTopology;
                 target.sComponent = selectedComponent;
                 target.sTopoMetric = target.metric.replace('*', target.sTopology).replace('*', target.sComponent);
-                debugger;
                   return getStormData(target);
               }));
             }
@@ -507,14 +510,14 @@
                 }));
               });
             }
-
             // To speed up querying on templatized dashboards.
-            if (templateSrv.variables[1] && templateSrv.variables[1].name === "hosts") {
+            if (templateSrv.variables[2] && templateSrv.variables[2].name === "hosts") {
               var allHosts = templateSrv._values.hosts.lastIndexOf('}') > 0 ? templateSrv._values.hosts.slice(1,-1) :
               templateSrv._values.hosts;
               allHosts = templateSrv._texts.hosts === "All" ? '%' : allHosts;
               metricsPromises.push(_.map(options.targets, function(target) {
-                  target.templatedHost = allHosts;
+                  target.templatedHost = allHosts? allHosts : '';
+                  target.templatedCluster = templatedCluster;
                   return getAllHostData(target);
               }));
             }
@@ -558,14 +561,19 @@
         AmbariMetricsDatasource.prototype.metricFindQuery = function (query) {
           var interpolated;
           try {
-            interpolated = templateSrv.replace(query);
+            interpolated = query.split('.')[0];
           } catch (err) {
             return $q.reject(err);
           }
+          var templatedClusters = templateSrv.variables.filter(function(o) { return o.name === "cluster"});
+          var templatedCluster = (_.isEmpty(templatedClusters)) ? '' : templatedClusters[0].options.filter(function(cluster)
+          { return cluster.selected; }).map(function(clusterName) { return clusterName.value; });
+
           var tComponents = _.isEmpty(templateSrv.variables) ? '' : templateSrv.variables.filter(function(variable) 
             { return variable.name === "components"});
           var tComponent = _.isEmpty(tComponents) ? '' : tComponents[0].current.value;
 
+
           // Templated Variable for HBase Users
           // It will search the cluster and populate the HBase Users.
           if(interpolated === "hbase-users") {
@@ -837,58 +845,11 @@
               });
           }
 
-          // Templated Variable that will populate all hosts on the cluster.
-          // The variable needs to be set to "hosts".
-          if (!tComponent){
-                  return this.doAmbariRequest({
-                        method: 'GET',
-                        url: '/ws/v1/timeline/metrics/' + interpolated
-                      })
-                      .then(function (results) {
-                        //Remove fakehostname from the list of hosts on the cluster.
-                        var fake = "fakehostname"; delete results.data[fake];
-                        return _.map(_.keys(results.data), function (hostName) {
-                          return {
-                                text: hostName,
-                                expandable: hostName.expandable ? true : false
-                              };
-                        });
-                      });
-          } else {
-            // Create a dropdown in templated dashboards for single components.
-            // This will check for the component set and show hosts only for the
-            // selected component.
-            return this.doAmbariRequest({
-                method: 'GET',
-                url: '/ws/v1/timeline/metrics/hosts'
-              })
-              .then(function(results) {
-                var compToHostMap = {};
-                //Remove fakehostname from the list of hosts on the cluster.
-                var fake = "fakehostname";
-                delete results.data[fake];
-                //Query hosts based on component name
-                _.forEach(results.data, function(comp, hostName) {
-                  comp.forEach(function(component) {
-                    if (!compToHostMap[component]) {
-                      compToHostMap[component] = [];
-                    }
-                    compToHostMap[component].push(hostName);
-                  });
-                });
-                var compHosts = compToHostMap[tComponent];
-                compHosts = _.map(compHosts, function(host) {
-                  return {
-                    text: host,
-                    expandable: host.expandable ? true : false
-                  };
-                });
-                compHosts = _.sortBy(compHosts, function(i) {
-                  return i.text.toLowerCase();
-                });
-                return $q.when(compHosts);
-              });
-           }
+          if (interpolated == 'hosts') {
+            return this.suggestHosts(tComponent, templatedCluster);
+          } else if (interpolated == 'cluster') {
+            return this.suggestClusters(tComponent)
+          }
         };
 
         /**
@@ -941,34 +902,47 @@
           return $q.when(keys);
         };
 
+        AmbariMetricsDatasource.prototype.suggestClusters = function(app) {
+          if (!app) { app = ''; }
+          return this.doAmbariRequest({
+            method: 'GET',
+            url: '/ws/v1/timeline/metrics/instances?' + 'appId=' + app
+          }).then(function(response) {
+              var clusters = [];
+              var data = response.data;
+              for (var cluster in data) {
+                if (data[cluster].hasOwnProperty(app)) {
+                  clusters.push({text: cluster});
+                }
+              }
+              return $q.when(clusters);
+          });
+        };
+
         /**
          * AMS Datasource - Suggest Hosts.
          *
          * Query Hosts on the cluster.
          */
-        AmbariMetricsDatasource.prototype.suggestHosts = function (query, app) {
-          console.log(query);
-          return this.doAmbariRequest({method: 'GET', url: '/ws/v1/timeline/metrics/hosts'})
-            .then(function (results) {
-              var compToHostMap = {};
-              //Remove fakehostname from the list of hosts on the cluster.
-              var fake = "fakehostname"; delete results.data[fake];
-              //Query hosts based on component name
-              _.forEach(results.data, function (comp, hostName) {
-                comp.forEach(function (component) {
-                  if (!compToHostMap[component]){
-                    compToHostMap[component] = [];
-                  }
-                  compToHostMap[component].push(hostName);
-                });
-              });
-              var compHosts = compToHostMap[app];
-              compHosts = _.map(compHosts, function (h) {
-                return {text: h};
-              });
-              compHosts = _.sortBy(compHosts, function (i) { return i.text.toLowerCase(); });
-              return $q.when(compHosts);
-            });
+        AmbariMetricsDatasource.prototype.suggestHosts = function (app, cluster) {
+          if (!app) { app = ''; }
+          if (!cluster) { cluster = ''; }
+          return this.doAmbariRequest({
+            method: 'GET',
+            url: '/ws/v1/timeline/metrics/instances?' + 'appId=' + app + '&instanceId=' + cluster
+          }).then(function (response) {
+            var hosts = [];
+            var data = response.data;
+            for (var cluster in data) {
+              var appHosts = data[cluster][app];
+              if (appHosts) {
+                for (var index in appHosts) {
+                  hosts.push({text: appHosts[index]});
+                }
+              }
+            }
+            return $q.when(hosts);
+          });
         };
 
         /**
diff --git a/ambari-metrics/ambari-metrics-grafana/ambari-metrics/partials/query.editor.html b/ambari-metrics/ambari-metrics-grafana/ambari-metrics/partials/query.editor.html
index 3f322c1..7e78cc0 100644
--- a/ambari-metrics/ambari-metrics-grafana/ambari-metrics/partials/query.editor.html
+++ b/ambari-metrics/ambari-metrics-grafana/ambari-metrics/partials/query.editor.html
@@ -81,6 +81,18 @@
             </a>
         </li>
 
+        <li class="tight-form-item" style="width: 86px" ng-hide="dashboard.templating.list.length > 0">
+            Cluster
+        </li>
+        <li ng-hide="dashboard.templating.list.length > 0">
+            <input type="text" class="input-large tight-form-input" ng-model="target.cluster"
+                   spellcheck='false' bs-typeahead="suggestClusters" placeholder="cluster name" data-min-length=0 data-items=100
+                   ng-blur="targetBlur()">
+            </input>
+            <a bs-tooltip="target.errors.metric" style="color: rgb(229, 189, 28)" ng-show="target.errors.metric">
+                <i class="fa fa-warning"></i>
+            </a>
+        </li>
 
         <li class="tight-form-item" style="width: 86px" ng-hide="dashboard.templating.list.length > 0">
             Hosts
@@ -95,8 +107,6 @@
             </a>
         </li>
 
-
-
         <li class="tight-form-item">
             Aggregator
         </li>
diff --git a/ambari-metrics/ambari-metrics-grafana/ambari-metrics/queryCtrl.js b/ambari-metrics/ambari-metrics-grafana/ambari-metrics/queryCtrl.js
index a26e7d0..02b5813 100644
--- a/ambari-metrics/ambari-metrics-grafana/ambari-metrics/queryCtrl.js
+++ b/ambari-metrics/ambari-metrics-grafana/ambari-metrics/queryCtrl.js
@@ -55,6 +55,7 @@
             if (newValue === '') {
               $scope.target.metric = '';
               $scope.target.hosts = '';
+              $scope.target.cluster = '';
             }
           });
           if (!$scope.target.downsampleAggregator) {
@@ -86,8 +87,14 @@
             .then(callback);
         };
 
+        $scope.suggestClusters = function(query, callback) {
+          $scope.datasource.suggestClusters($scope.target.app)
+            .then($scope.getTextValues)
+            .then(callback);
+        };
+
         $scope.suggestHosts = function(query, callback) {
-          $scope.datasource.suggestHosts(query, $scope.target.app)
+          $scope.datasource.suggestHosts($scope.target.app, $scope.target.cluster)
             .then($scope.getTextValues)
             .then(callback);
         };
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
index a112ef2..a290ced 100644
--- a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
@@ -51,6 +51,8 @@
   private Map<String, Set<String>> useTagsMap = new HashMap<String, Set<String>>();
   private TimelineMetricsCache metricsCache;
   private String hostName = "UNKNOWN.example.com";
+  private String instanceId = null;
+  private boolean setInstanceId;
   private String serviceName = "";
   private Collection<String> collectorHosts;
   private String collectorUri;
@@ -73,6 +75,8 @@
       return t;
     }
   });
+  private int hostInMemoryAggregationPort;
+  private boolean hostInMemoryAggregationEnabled;
 
   @Override
   public void init(SubsetConfiguration conf) {
@@ -94,6 +98,8 @@
     }
 
     serviceName = getServiceName(conf);
+    instanceId = conf.getString(INSTANCE_ID_PROPERTY, null);
+    setInstanceId = conf.getBoolean(SET_INSTANCE_ID_PROPERTY, false);
 
     LOG.info("Identified hostname = " + hostName + ", serviceName = " + serviceName);
     // Initialize the collector write strategy
@@ -103,7 +109,8 @@
     protocol = conf.getString(COLLECTOR_PROTOCOL, "http");
     collectorHosts = parseHostsStringArrayIntoCollection(conf.getStringArray(COLLECTOR_HOSTS_PROPERTY));
     port = conf.getString(COLLECTOR_PORT, "6188");
-
+    hostInMemoryAggregationEnabled = conf.getBoolean(HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY);
+    hostInMemoryAggregationPort = conf.getInt(HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY);
     if (collectorHosts.isEmpty()) {
       LOG.error("No Metric collector configured.");
     } else {
@@ -245,6 +252,16 @@
   }
 
   @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return hostInMemoryAggregationEnabled;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return hostInMemoryAggregationPort;
+  }
+
+  @Override
   public void putMetrics(MetricsRecord record) {
     try {
       String recordName = record.name();
@@ -304,9 +321,10 @@
 
       int sbBaseLen = sb.length();
       List<TimelineMetric> metricList = new ArrayList<TimelineMetric>();
-      Map<String, String> metadata = null;
+      HashMap<String, String> metadata = null;
       if (skipAggregation) {
-        metadata = Collections.singletonMap("skipAggregation", "true");
+        metadata = new HashMap<>();
+        metadata.put("skipAggregation", "true");
       }
       long startTime = record.timestamp();
 
@@ -318,6 +336,9 @@
         timelineMetric.setMetricName(name);
         timelineMetric.setHostName(hostName);
         timelineMetric.setAppId(serviceName);
+        if (setInstanceId) {
+          timelineMetric.setInstanceId(instanceId);
+        }
         timelineMetric.setStartTime(startTime);
         timelineMetric.setType(metric.type() != null ? metric.type().name() : null);
         timelineMetric.getMetricValues().put(startTime, value.doubleValue());
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java b/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java
index 5777639..30c5c23 100644
--- a/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java
@@ -50,12 +50,15 @@
 import java.util.Iterator;
 import java.util.List;
 import java.util.Set;
+import java.util.TreeMap;
 
 import static org.apache.hadoop.metrics2.sink.timeline.AbstractTimelineMetricsSink.COLLECTOR_PORT;
 import static org.apache.hadoop.metrics2.sink.timeline.AbstractTimelineMetricsSink.COLLECTOR_HOSTS_PROPERTY;
 import static org.apache.hadoop.metrics2.sink.timeline.AbstractTimelineMetricsSink.COLLECTOR_PROTOCOL;
+import static org.apache.hadoop.metrics2.sink.timeline.AbstractTimelineMetricsSink.INSTANCE_ID_PROPERTY;
 import static org.apache.hadoop.metrics2.sink.timeline.AbstractTimelineMetricsSink.MAX_METRIC_ROW_CACHE_SIZE;
 import static org.apache.hadoop.metrics2.sink.timeline.AbstractTimelineMetricsSink.METRICS_SEND_INTERVAL;
+import static org.apache.hadoop.metrics2.sink.timeline.AbstractTimelineMetricsSink.SET_INSTANCE_ID_PROPERTY;
 import static org.easymock.EasyMock.anyInt;
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.anyString;
@@ -81,13 +84,17 @@
   }
 
   @Test
-  @PrepareForTest({URL.class, OutputStream.class, AbstractTimelineMetricsSink.class, HttpURLConnection.class})
+  @PrepareForTest({URL.class, OutputStream.class, AbstractTimelineMetricsSink.class, HttpURLConnection.class, TimelineMetric.class, HadoopTimelineMetricsSink.class})
   public void testPutMetrics() throws Exception {
     HadoopTimelineMetricsSink sink = new HadoopTimelineMetricsSink();
 
     HttpURLConnection connection = PowerMock.createNiceMock(HttpURLConnection.class);
     URL url = PowerMock.createNiceMock(URL.class);
     InputStream is = IOUtils.toInputStream(gson.toJson(Collections.singletonList("localhost")));
+    TimelineMetric timelineMetric = PowerMock.createNiceMock(TimelineMetric.class);
+    expectNew(TimelineMetric.class).andReturn(timelineMetric).times(2);
+    expect(timelineMetric.getMetricValues()).andReturn(new TreeMap<Long, Double>()).anyTimes();
+    expect(timelineMetric.getMetricName()).andReturn("metricName").anyTimes();
     expectNew(URL.class, anyString()).andReturn(url).anyTimes();
     expect(url.openConnection()).andReturn(connection).anyTimes();
     expect(connection.getInputStream()).andReturn(is).anyTimes();
@@ -106,6 +113,8 @@
 
     expect(conf.getInt(eq(MAX_METRIC_ROW_CACHE_SIZE), anyInt())).andReturn(10).anyTimes();
     expect(conf.getInt(eq(METRICS_SEND_INTERVAL), anyInt())).andReturn(1000).anyTimes();
+    expect(conf.getBoolean(eq(SET_INSTANCE_ID_PROPERTY), eq(false))).andReturn(true).anyTimes();
+    expect(conf.getString(eq(INSTANCE_ID_PROPERTY), anyString())).andReturn("instanceId").anyTimes();
 
     conf.setListDelimiter(eq(','));
     expectLastCall().anyTimes();
@@ -145,6 +154,9 @@
 
     expect(record.metrics()).andReturn(Arrays.asList(metric)).anyTimes();
 
+    timelineMetric.setInstanceId(eq("instanceId"));
+    EasyMock.expectLastCall();
+
     replay(conf, record, metric);
     replayAll();
 
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/conf/unix/log4j.properties b/ambari-metrics/ambari-metrics-host-aggregator/conf/unix/log4j.properties
new file mode 100644
index 0000000..d7ceedd
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/conf/unix/log4j.properties
@@ -0,0 +1,31 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Define some default values that can be overridden by system properties
+# Root logger option
+log4j.rootLogger=INFO,file
+
+# Direct log messages to a log file
+log4j.appender.file=org.apache.log4j.RollingFileAppender
+log4j.appender.file.File=/var/log/ambari-metrics-monitor/ambari-metrics-aggregator.log
+log4j.appender.file.MaxFileSize=80MB
+log4j.appender.file.MaxBackupIndex=60
+log4j.appender.file.layout=org.apache.log4j.PatternLayout
+log4j.appender.file.layout.ConversionPattern=%d{ABSOLUTE} %5p [%t] %c{1}:%L - %m%n
+
+
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/conf/windows/log4j.properties b/ambari-metrics/ambari-metrics-host-aggregator/conf/windows/log4j.properties
new file mode 100644
index 0000000..d9aabab
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/conf/windows/log4j.properties
@@ -0,0 +1,29 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Define some default values that can be overridden by system properties
+# Root logger option
+log4j.rootLogger=INFO,file
+
+# Direct log messages to a log file
+log4j.appender.file=org.apache.log4j.RollingFileAppender
+log4j.appender.file.File=\\var\\log\\ambari-metrics-monitor\\ambari-metrics-aggregator.log
+log4j.appender.file.MaxFileSize=80MB
+log4j.appender.file.MaxBackupIndex=60
+log4j.appender.file.layout=org.apache.log4j.PatternLayout
+log4j.appender.file.layout.ConversionPattern=%d{ABSOLUTE} %5p [%t] %c{1}:%L - %m%n
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/pom.xml b/ambari-metrics/ambari-metrics-host-aggregator/pom.xml
new file mode 100644
index 0000000..0598bef
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/pom.xml
@@ -0,0 +1,120 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <parent>
+        <artifactId>ambari-metrics</artifactId>
+        <groupId>org.apache.ambari</groupId>
+        <version>2.0.0.0-SNAPSHOT</version>
+    </parent>
+    <modelVersion>4.0.0</modelVersion>
+
+    <artifactId>ambari-metrics-host-aggregator</artifactId>
+    <packaging>jar</packaging>
+
+    <name>Ambari Metrics Host Aggregator</name>
+    <url>http://maven.apache.org</url>
+
+    <properties>
+        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+    </properties>
+
+    <dependencies>
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+            <version>3.8.1</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
+            <version>14.0.1</version>
+        </dependency>
+        <dependency>
+              <groupId>org.apache.ambari</groupId>
+              <artifactId>ambari-metrics-common</artifactId>
+              <version>2.0.0.0-SNAPSHOT</version>
+        </dependency>
+        <dependency>
+            <groupId>javax.servlet</groupId>
+            <artifactId>servlet-api</artifactId>
+            <version>2.5</version>
+        </dependency>
+        <dependency>
+            <groupId>com.sun.jersey</groupId>
+            <artifactId>jersey-json</artifactId>
+            <version>1.11</version>
+        </dependency>
+        <dependency>
+            <groupId>com.sun.jersey</groupId>
+            <artifactId>jersey-server</artifactId>
+            <version>1.11</version>
+        </dependency>
+        <dependency>
+            <groupId>javax.xml.bind</groupId>
+            <artifactId>jaxb-api</artifactId>
+            <version>2.2.2</version>
+        </dependency>
+        <dependency>
+            <groupId>com.sun.jersey</groupId>
+            <artifactId>jersey-core</artifactId>
+            <version>1.11</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-common</artifactId>
+            <version>2.7.1.2.3.4.0-3347</version>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-shade-plugin</artifactId>
+                <version>1.6</version>
+                <configuration>
+                    <createDependencyReducedPom>false</createDependencyReducedPom>
+                    <filters>
+                        <filter>
+                            <artifact>*:*</artifact>
+                            <excludes>
+                                <exclude>META-INF/*.SF</exclude>
+                                <exclude>META-INF/*.DSA</exclude>
+                                <exclude>META-INF/*.RSA</exclude>
+                            </excludes>
+                        </filter>
+                    </filters>
+                </configuration>
+
+                <executions>
+                    <execution>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>shade</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
+
+</project>
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AbstractMetricPublisherThread.java b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AbstractMetricPublisherThread.java
new file mode 100644
index 0000000..b1f60fa
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AbstractMetricPublisherThread.java
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.host.aggregator;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+import org.codehaus.jackson.map.AnnotationIntrospector;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+import org.codehaus.jackson.xc.JaxbAnnotationIntrospector;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.HttpURLConnection;
+import java.net.URL;
+import java.util.Map;
+
+/**
+ * Abstract class that runs a thread that publishes metrics data to AMS collector in specified intervals.
+ */
+public abstract class AbstractMetricPublisherThread extends Thread {
+    protected int publishIntervalInSeconds;
+    protected String publishURL;
+    protected ObjectMapper objectMapper;
+    private Log LOG;
+    protected TimelineMetricsHolder timelineMetricsHolder;
+
+    public AbstractMetricPublisherThread(TimelineMetricsHolder timelineMetricsHolder, String publishURL, int publishIntervalInSeconds) {
+        LOG = LogFactory.getLog(this.getClass());
+        this.publishURL = publishURL;
+        this.publishIntervalInSeconds = publishIntervalInSeconds;
+        this.timelineMetricsHolder = timelineMetricsHolder;
+        objectMapper = new ObjectMapper();
+        AnnotationIntrospector introspector = new JaxbAnnotationIntrospector();
+        objectMapper.setAnnotationIntrospector(introspector);
+        objectMapper.getSerializationConfig()
+                .withSerializationInclusion(JsonSerialize.Inclusion.NON_NULL);
+    }
+
+    /**
+     * Publishes metrics to collector in specified intervals while not interrupted.
+     */
+    @Override
+    public void run() {
+        while (!isInterrupted()) {
+            try {
+                sleep(this.publishIntervalInSeconds * 1000);
+            } catch (InterruptedException e) {
+                //Ignore
+            }
+            try {
+                processAndPublishMetrics(getMetricsFromCache());
+            } catch (Exception e) {
+                LOG.error("Couldn't process and send metrics : ",e);
+            }
+        }
+    }
+
+    /**
+     * Processes and sends metrics to collector.
+     * @param metricsFromCache
+     * @throws Exception
+     */
+    protected void processAndPublishMetrics(Map<Long, TimelineMetrics> metricsFromCache) throws Exception {
+        if (metricsFromCache.size()==0) return;
+
+        LOG.info(String.format("Preparing %s timeline metrics for publishing", metricsFromCache.size()));
+        publishMetricsJson(processMetrics(metricsFromCache));
+    }
+
+    /**
+     * Returns metrics map. Source is based on implementation.
+     * @return
+     */
+    protected abstract Map<Long,TimelineMetrics> getMetricsFromCache();
+
+    /**
+     * Processes given metrics (aggregates or merges them) and converts them into json string that will be send to collector
+     * @param metricValues
+     * @return
+     */
+    protected abstract String processMetrics(Map<Long, TimelineMetrics> metricValues);
+
+    protected void publishMetricsJson(String jsonData) throws Exception {
+        int timeout = 5 * 1000;
+        HttpURLConnection connection = null;
+        if (this.publishURL == null) {
+            throw new IOException("Unknown URL. Unable to connect to metrics collector.");
+        }
+        LOG.info("Collector URL : " + publishURL);
+        connection = (HttpURLConnection) new URL(this.publishURL).openConnection();
+
+        connection.setRequestMethod("POST");
+        connection.setRequestProperty("Content-Type", "application/json");
+        connection.setRequestProperty("Connection", "Keep-Alive");
+        connection.setConnectTimeout(timeout);
+        connection.setReadTimeout(timeout);
+        connection.setDoOutput(true);
+
+        if (jsonData != null) {
+            try (OutputStream os = connection.getOutputStream()) {
+                os.write(jsonData.getBytes("UTF-8"));
+            }
+        }
+        int responseCode = connection.getResponseCode();
+        if (responseCode != 200) {
+            throw new Exception("responseCode is " + responseCode);
+        }
+        LOG.info("Successfully sent metrics.");
+    }
+
+    /**
+     * Interrupts the thread.
+     */
+    protected void stopPublisher() {
+        this.interrupt();
+    }
+}
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatedMetricsPublisher.java b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatedMetricsPublisher.java
new file mode 100644
index 0000000..0540ec9
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatedMetricsPublisher.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.host.aggregator;
+
+
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.sink.timeline.AggregationResult;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetricWithAggregatedValues;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
+/**
+ * Thread that aggregates and publishes metrics to collector on specified interval.
+ */
+public class AggregatedMetricsPublisher extends AbstractMetricPublisherThread {
+
+    private Log LOG;
+
+    public AggregatedMetricsPublisher(TimelineMetricsHolder timelineMetricsHolder, String collectorURL, int interval) {
+        super(timelineMetricsHolder, collectorURL, interval);
+        LOG = LogFactory.getLog(this.getClass());
+    }
+
+    /**
+     * get metrics map form @TimelineMetricsHolder
+     * @return
+     */
+    @Override
+    protected Map<Long, TimelineMetrics> getMetricsFromCache() {
+        return timelineMetricsHolder.extractMetricsForAggregationPublishing();
+    }
+
+    /**
+     * Aggregates given metrics and converts them into json string that will be send to collector
+     * @param metricForAggregationValues
+     * @return
+     */
+    @Override
+    protected String processMetrics(Map<Long, TimelineMetrics> metricForAggregationValues) {
+        HashMap<String, TimelineMetrics> nameToMetricMap = new HashMap<>();
+        for (TimelineMetrics timelineMetrics : metricForAggregationValues.values()) {
+            for (TimelineMetric timelineMetric : timelineMetrics.getMetrics()) {
+                if (!nameToMetricMap.containsKey(timelineMetric.getMetricName())) {
+                    nameToMetricMap.put(timelineMetric.getMetricName(), new TimelineMetrics());
+                }
+                nameToMetricMap.get(timelineMetric.getMetricName()).addOrMergeTimelineMetric(timelineMetric);
+            }
+        }
+        Set<TimelineMetricWithAggregatedValues> metricAggregateMap = new HashSet<>();
+        for (TimelineMetrics metrics : nameToMetricMap.values()) {
+            double sum = 0;
+            double max = Integer.MIN_VALUE;
+            double min = Integer.MAX_VALUE;
+            int count = 0;
+            for (TimelineMetric metric : metrics.getMetrics()) {
+                for (Double value : metric.getMetricValues().values()) {
+                    sum+=value;
+                    max = Math.max(max, value);
+                    min = Math.min(min, value);
+                    count++;
+                }
+            }
+            TimelineMetric tmpMetric = new TimelineMetric(metrics.getMetrics().get(0));
+            tmpMetric.setMetricValues(new TreeMap<Long, Double>());
+            metricAggregateMap.add(new TimelineMetricWithAggregatedValues(tmpMetric, new MetricHostAggregate(sum, count, 0d, max, min)));
+        }
+        String json = null;
+        try {
+            json = objectMapper.writeValueAsString(new AggregationResult(metricAggregateMap, System.currentTimeMillis()));
+            LOG.debug(json);
+        } catch (Exception e) {
+            LOG.error("Failed to convert result into json", e);
+        }
+
+        return json;
+    }
+}
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatorApplication.java b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatorApplication.java
new file mode 100644
index 0000000..c6b703b
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatorApplication.java
@@ -0,0 +1,180 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.host.aggregator;
+
+import com.sun.jersey.api.container.httpserver.HttpServerFactory;
+import com.sun.jersey.api.core.PackagesResourceConfig;
+import com.sun.jersey.api.core.ResourceConfig;
+import com.sun.net.httpserver.HttpServer;
+
+import javax.ws.rs.core.UriBuilder;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.URI;
+import java.net.URL;
+import java.net.UnknownHostException;
+import java.util.HashMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * WEB application with 2 publisher threads that processes received metrics and submits results to the collector
+ */
+public class AggregatorApplication
+{
+    private static final int STOP_SECONDS_DELAY = 0;
+    private static final int JOIN_SECONDS_TIMEOUT = 2;
+    private static String BASE_POST_URL = "%s://%s:%s/ws/v1/timeline/metrics";
+    private static String AGGREGATED_POST_PREFIX = "/aggregated";
+    private static final String METRICS_SITE_CONFIGURATION_FILE = "ams-site.xml";
+    private static Log LOG = LogFactory.getLog("AggregatorApplication.class");
+    private final int webApplicationPort;
+    private final int rawPublishingInterval;
+    private final int aggregationInterval;
+    private Configuration configuration;
+    private String [] collectorHosts;
+    private AggregatedMetricsPublisher aggregatePublisher;
+    private RawMetricsPublisher rawPublisher;
+    private TimelineMetricsHolder timelineMetricsHolder;
+    private HttpServer httpServer;
+
+    public AggregatorApplication(String collectorHosts) {
+        initConfiguration();
+        this.collectorHosts = collectorHosts.split(",");
+        this.aggregationInterval = configuration.getInt("timeline.metrics.host.aggregator.minute.interval", 300);
+        this.rawPublishingInterval = configuration.getInt("timeline.metrics.sink.report.interval", 60);
+        this.webApplicationPort = configuration.getInt("timeline.metrics.host.inmemory.aggregation.port", 61888);
+        this.timelineMetricsHolder = TimelineMetricsHolder.getInstance(rawPublishingInterval, aggregationInterval);
+        try {
+            this.httpServer = createHttpServer();
+        } catch (IOException e) {
+            LOG.error("Exception while starting HTTP server. Exiting", e);
+            System.exit(1);
+        }
+    }
+
+    private void initConfiguration() {
+        ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+        if (classLoader == null) {
+            classLoader = getClass().getClassLoader();
+        }
+
+        URL amsResUrl = classLoader.getResource(METRICS_SITE_CONFIGURATION_FILE);
+        LOG.info("Found metric service configuration: " + amsResUrl);
+        if (amsResUrl == null) {
+            throw new IllegalStateException("Unable to initialize the metrics " +
+                    "subsystem. No ams-site present in the classpath.");
+        }
+        configuration = new Configuration(true);
+        try {
+            configuration.addResource(amsResUrl.toURI().toURL());
+        } catch (Exception e) {
+            LOG.error("Couldn't init configuration. ", e);
+            System.exit(1);
+        }
+    }
+
+    private String getHostName() {
+        String hostName = "localhost";
+        try {
+            hostName = InetAddress.getLocalHost().getCanonicalHostName();
+        } catch (UnknownHostException e) {
+            LOG.error(e);
+        }
+        return hostName;
+    }
+
+    private URI getURI() {
+        URI uri = UriBuilder.fromUri("http://" + getHostName() + "/").port(this.webApplicationPort).build();
+        LOG.info(String.format("Web server at %s", uri));
+        return uri;
+    }
+
+    private HttpServer createHttpServer() throws IOException {
+        ResourceConfig resourceConfig = new PackagesResourceConfig("org.apache.hadoop.metrics2.host.aggregator");
+        HashMap<String, Object> params = new HashMap();
+        params.put("com.sun.jersey.api.json.POJOMappingFeature", "true");
+        resourceConfig.setPropertiesAndFeatures(params);
+        return HttpServerFactory.create(getURI(), resourceConfig);
+    }
+
+    private void startWebServer() {
+        LOG.info("Starting web server.");
+        this.httpServer.start();
+    }
+
+    private void startAggregatePublisherThread() {
+        LOG.info("Starting aggregated metrics publisher.");
+        String collectorURL = buildBasicCollectorURL(collectorHosts[0]) + AGGREGATED_POST_PREFIX;
+        aggregatePublisher = new AggregatedMetricsPublisher(timelineMetricsHolder, collectorURL, aggregationInterval);
+        aggregatePublisher.start();
+    }
+
+    private void startRawPublisherThread() {
+        LOG.info("Starting raw metrics publisher.");
+        String collectorURL = buildBasicCollectorURL(collectorHosts[0]);
+        rawPublisher = new RawMetricsPublisher(timelineMetricsHolder, collectorURL, rawPublishingInterval);
+        rawPublisher.start();
+    }
+
+
+
+    private void stop() {
+        aggregatePublisher.stopPublisher();
+        rawPublisher.stopPublisher();
+        httpServer.stop(STOP_SECONDS_DELAY);
+        LOG.info("Stopped web server.");
+        try {
+            LOG.info("Waiting for threads to join.");
+            aggregatePublisher.join(JOIN_SECONDS_TIMEOUT * 1000);
+            rawPublisher.join(JOIN_SECONDS_TIMEOUT * 1000);
+            LOG.info("Gracefully stopped Aggregator Application.");
+        } catch (InterruptedException e) {
+            LOG.error("Received exception during stop : ", e);
+
+        }
+
+    }
+
+    private String buildBasicCollectorURL(String host) {
+        String port = configuration.get("timeline.metrics.service.webapp.address", "0.0.0.0:6188").split(":")[1];
+        String protocol = configuration.get("timeline.metrics.service.http.policy", "HTTP_ONLY").equalsIgnoreCase("HTTP_ONLY") ? "http" : "https";
+        return String.format(BASE_POST_URL, protocol, host, port);
+    }
+
+    public static void main( String[] args ) throws Exception {
+        LOG.info("Starting aggregator application");
+        if (args.length != 1) {
+            throw new Exception("This jar should be run with 1 argument - collector hosts separated with coma");
+        }
+
+        final AggregatorApplication app = new AggregatorApplication(args[0]);
+        app.startAggregatePublisherThread();
+        app.startRawPublisherThread();
+        app.startWebServer();
+
+        Runtime.getRuntime().addShutdownHook(new Thread() {
+            public void run() {
+                LOG.info("Stopping aggregator application");
+                app.stop();
+            }
+        });
+    }
+}
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatorWebService.java b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatorWebService.java
new file mode 100644
index 0000000..f96d0ed
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatorWebService.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.host.aggregator;
+
+
+
+import com.sun.jersey.spi.resource.Singleton;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import java.io.IOException;
+
+@Singleton
+@Path("/ws/v1/timeline")
+public class AggregatorWebService {
+    TimelineMetricsHolder metricsHolder = TimelineMetricsHolder.getInstance();
+
+    @GET
+    @Produces("text/json")
+    @Path("/metrics")
+    public Response helloWorld() throws IOException {
+        return Response.ok().build();
+    }
+
+    @POST
+    @Produces(MediaType.TEXT_PLAIN)
+    @Consumes({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
+    @Path("/metrics")
+    public Response postMetrics(
+            TimelineMetrics metrics) {
+        metricsHolder.putMetricsForAggregationPublishing(metrics);
+        metricsHolder.putMetricsForRawPublishing(metrics);
+        return Response.ok().build();
+    }
+}
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/RawMetricsPublisher.java b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/RawMetricsPublisher.java
new file mode 100644
index 0000000..f317ed9
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/RawMetricsPublisher.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.host.aggregator;
+
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+
+import java.util.Map;
+
+public class RawMetricsPublisher extends AbstractMetricPublisherThread {
+    private final Log LOG;
+
+    public RawMetricsPublisher(TimelineMetricsHolder timelineMetricsHolder, String collectorURL, int interval) {
+        super(timelineMetricsHolder, collectorURL, interval);
+        LOG = LogFactory.getLog(this.getClass());
+    }
+
+
+    @Override
+    protected Map<Long, TimelineMetrics> getMetricsFromCache() {
+        return timelineMetricsHolder.extractMetricsForRawPublishing();
+    }
+
+    @Override
+    protected String processMetrics(Map<Long, TimelineMetrics> metricValues) {
+        //merge everything in one TimelineMetrics object
+        TimelineMetrics timelineMetrics = new TimelineMetrics();
+        for (TimelineMetrics metrics : metricValues.values()) {
+            for (TimelineMetric timelineMetric : metrics.getMetrics())
+                timelineMetrics.addOrMergeTimelineMetric(timelineMetric);
+        }
+        //map TimelineMetrics to json string
+        String json = null;
+        try {
+            json = objectMapper.writeValueAsString(timelineMetrics);
+            LOG.debug(json);
+        } catch (Exception e) {
+            LOG.error("Failed to convert result into json", e);
+        }
+        return json;
+    }
+}
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/TimelineMetricsHolder.java b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/TimelineMetricsHolder.java
new file mode 100644
index 0000000..b355c97
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/TimelineMetricsHolder.java
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.host.aggregator;
+
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+/**
+ * Singleton class with 2 guava caches for raw and aggregated metrics storing
+ */
+public class TimelineMetricsHolder {
+    private static final int DEFAULT_RAW_CACHE_EXPIRE_TIME = 60;
+    private static final int DEFAULT_AGGREGATION_CACHE_EXPIRE_TIME = 300;
+    private Cache<Long, TimelineMetrics> aggregationMetricsCache;
+    private Cache<Long, TimelineMetrics> rawMetricsCache;
+    private static TimelineMetricsHolder instance = null;
+    //to ensure no metric values are expired
+    private static int EXPIRE_DELAY = 30;
+    ReadWriteLock aggregationCacheLock = new ReentrantReadWriteLock();
+    ReadWriteLock rawCacheLock = new ReentrantReadWriteLock();
+
+    private TimelineMetricsHolder(int rawCacheExpireTime, int aggregationCacheExpireTime) {
+        this.rawMetricsCache = CacheBuilder.newBuilder().expireAfterWrite(rawCacheExpireTime + EXPIRE_DELAY, TimeUnit.SECONDS).build();
+        this.aggregationMetricsCache = CacheBuilder.newBuilder().expireAfterWrite(aggregationCacheExpireTime + EXPIRE_DELAY, TimeUnit.SECONDS).build();
+    }
+
+    public static TimelineMetricsHolder getInstance(int rawCacheExpireTime, int aggregationCacheExpireTime) {
+        if (instance == null) {
+            instance = new TimelineMetricsHolder(rawCacheExpireTime, aggregationCacheExpireTime);
+        }
+        return instance;
+    }
+
+    /**
+     * Uses default expiration time for caches initialization if they are not initialized yet.
+     * @return
+     */
+    public static TimelineMetricsHolder getInstance() {
+        return getInstance(DEFAULT_RAW_CACHE_EXPIRE_TIME, DEFAULT_AGGREGATION_CACHE_EXPIRE_TIME);
+    }
+
+    public void putMetricsForAggregationPublishing(TimelineMetrics timelineMetrics) {
+        aggregationCacheLock.writeLock().lock();
+        aggregationMetricsCache.put(System.currentTimeMillis(), timelineMetrics);
+        aggregationCacheLock.writeLock().unlock();
+    }
+
+    public Map<Long, TimelineMetrics> extractMetricsForAggregationPublishing() {
+        return extractMetricsFromCacheWithLock(aggregationMetricsCache, aggregationCacheLock);
+    }
+
+    public void putMetricsForRawPublishing(TimelineMetrics metrics) {
+        rawCacheLock.writeLock().lock();
+        rawMetricsCache.put(System.currentTimeMillis(), metrics);
+        rawCacheLock.writeLock().unlock();
+    }
+
+    public Map<Long, TimelineMetrics> extractMetricsForRawPublishing() {
+        return extractMetricsFromCacheWithLock(rawMetricsCache, rawCacheLock);
+    }
+
+    /**
+     * Returns values from cache and clears the cache
+     * @param cache
+     * @param lock
+     * @return
+     */
+    private Map<Long, TimelineMetrics> extractMetricsFromCacheWithLock(Cache<Long, TimelineMetrics> cache, ReadWriteLock lock) {
+        lock.writeLock().lock();
+        Map<Long, TimelineMetrics> metricsMap = new TreeMap<>(cache.asMap());
+        cache.invalidateAll();
+        lock.writeLock().unlock();
+        return metricsMap;
+    }
+
+}
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/ambari-metrics-monitor b/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/ambari-metrics-monitor
index 967e133..9bbb271 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/ambari-metrics-monitor
+++ b/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/ambari-metrics-monitor
@@ -24,7 +24,7 @@
 PIDFILE=/var/run/ambari-metrics-monitor/ambari-metrics-monitor.pid
 OUTFILE=/var/log/ambari-metrics-monitor/ambari-metrics-monitor.out
 
-STOP_TIMEOUT=5
+STOP_TIMEOUT=10
 
 OK=0
 NOTOK=1
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/aggregator.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/aggregator.py
new file mode 100644
index 0000000..2249e53
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/aggregator.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import threading
+import subprocess
+import logging
+import urllib2
+
+logger = logging.getLogger()
+class Aggregator(threading.Thread):
+  def __init__(self, config, stop_handler):
+    threading.Thread.__init__(self)
+    self._config = config
+    self._stop_handler = stop_handler
+    self._aggregator_process = None
+    self._sleep_interval = config.get_collector_sleep_interval()
+    self.stopped = False
+
+  def run(self):
+    java_home = self._config.get_java_home()
+    collector_hosts = self._config.get_metrics_collector_hosts_as_string()
+    jvm_agrs = self._config.get_aggregator_jvm_agrs()
+    config_dir = self._config.get_config_dir()
+    class_name = "org.apache.hadoop.metrics2.host.aggregator.AggregatorApplication"
+    ams_log_file = "ambari-metrics-aggregator.log"
+    additional_classpath = ':{0}'.format(config_dir)
+    ams_log_dir = self._config.ams_monitor_log_dir()
+    logger.info('Starting Aggregator thread.')
+    cmd = "{0}/bin/java {1} -Dams.log.dir={2} -Dams.log.file={3} -cp /var/lib/ambari-metrics-monitor/lib/*{4} {5} {6}"\
+      .format(java_home, jvm_agrs, ams_log_dir, ams_log_file, additional_classpath, class_name, collector_hosts)
+
+    logger.info("Executing : {0}".format(cmd))
+
+    self._aggregator_process = subprocess.Popen([cmd], stdout = None, stderr = None, shell = True)
+    while not self.stopped:
+      if 0 == self._stop_handler.wait(self._sleep_interval):
+        break
+    pass
+    self.stop()
+
+  def stop(self):
+    self.stopped = True
+    if self._aggregator_process :
+      logger.info('Stopping Aggregator thread.')
+      self._aggregator_process.terminate()
+
+class AggregatorWatchdog(threading.Thread):
+  SLEEP_TIME = 30
+  CONNECTION_TIMEOUT = 5
+  AMS_AGGREGATOR_METRICS_CHECK_URL = "/ws/v1/timeline/metrics/"
+  def __init__(self, config, stop_handler):
+    threading.Thread.__init__(self)
+    self._config = config
+    self._stop_handler = stop_handler
+    self.URL = 'http://localhost:' + self._config.get_inmemory_aggregation_port() + self.AMS_AGGREGATOR_METRICS_CHECK_URL
+    self._is_ok = threading.Event()
+    self.set_is_ok(True)
+    self.stopped = False
+
+  def run(self):
+    logger.info('Starting Aggregator Watchdog thread.')
+    while not self.stopped:
+      if 0 == self._stop_handler.wait(self.SLEEP_TIME):
+        break
+      try:
+        conn = urllib2.urlopen(self.URL, timeout=self.CONNECTION_TIMEOUT)
+        self.set_is_ok(True)
+      except (KeyboardInterrupt, SystemExit):
+        raise
+      except Exception, e:
+        self.set_is_ok(False)
+        continue
+      if conn.code != 200:
+        self.set_is_ok(False)
+        continue
+      conn.close()
+
+  def is_ok(self):
+    return self._is_ok.is_set()
+
+  def set_is_ok(self, value):
+    if value == False and self.is_ok() != value:
+      logger.warning("Watcher couldn't connect to aggregator.")
+      self._is_ok.clear()
+    else:
+      self._is_ok.set()
+
+
+  def stop(self):
+    logger.info('Stopping watcher thread.')
+    self.stopped = True
+
+
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/application_metric_map.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/application_metric_map.py
index 0052808..34a6787 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/application_metric_map.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/application_metric_map.py
@@ -66,7 +66,7 @@
     del self.app_metric_map[ app_id ]
   pass
 
-  def flatten(self, application_id = None, clear_once_flattened = False):
+  def flatten(self, application_id = None, clear_once_flattened = False, set_instanceid = False, instanceid = None):
     """
     Return flatten dict to caller in json format.
     Json format:
@@ -89,11 +89,14 @@
       for appId, metrics in local_metric_map.iteritems():
         for metricId, metricData in dict(metrics).iteritems():
           # Create a timeline metric object
+          result_instanceid = ""
+          if set_instanceid:
+            result_instanceid = instanceid
           timeline_metric = {
             "hostname" : self.hostname,
             "metricname" : metricId,
             "appid" : "HOST",
-            "instanceid" : "",
+            "instanceid" : result_instanceid,
             "starttime" : self.get_start_time(appId, metricId),
             "metrics" : metricData
           }
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
index 5686c50..d1429ed 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
@@ -30,6 +30,8 @@
 # Abstraction for OS-dependent configuration defaults
 #
 class ConfigDefaults(object):
+  def get_config_dir(self):
+    pass
   def get_config_file_path(self):
     pass
   def get_metric_file_path(self):
@@ -40,11 +42,14 @@
 @OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
 class ConfigDefaultsWindows(ConfigDefaults):
   def __init__(self):
+    self._CONFIG_DIR = "conf"
     self._CONFIG_FILE_PATH = "conf\\metric_monitor.ini"
     self._METRIC_FILE_PATH = "conf\\metric_groups.conf"
     self._METRIC_FILE_PATH = "conf\\ca.pem"
     pass
 
+  def get_config_dir(self):
+    return self._CONFIG_DIR
   def get_config_file_path(self):
     return self._CONFIG_FILE_PATH
   def get_metric_file_path(self):
@@ -55,11 +60,13 @@
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class ConfigDefaultsLinux(ConfigDefaults):
   def __init__(self):
+    self._CONFIG_DIR = "/etc/ambari-metrics-monitor/conf/"
     self._CONFIG_FILE_PATH = "/etc/ambari-metrics-monitor/conf/metric_monitor.ini"
     self._METRIC_FILE_PATH = "/etc/ambari-metrics-monitor/conf/metric_groups.conf"
     self._CA_CERTS_FILE_PATH = "/etc/ambari-metrics-monitor/conf/ca.pem"
     pass
-
+  def get_config_dir(self):
+    return self._CONFIG_DIR
   def get_config_file_path(self):
     return self._CONFIG_FILE_PATH
   def get_metric_file_path(self):
@@ -71,6 +78,7 @@
 
 config = ConfigParser.RawConfigParser()
 
+CONFIG_DIR = configDefaults.get_config_dir()
 CONFIG_FILE_PATH = configDefaults.get_config_file_path()
 METRIC_FILE_PATH = configDefaults.get_metric_file_path()
 CA_CERTS_FILE_PATH = configDefaults.get_ca_certs_file_path()
@@ -191,6 +199,8 @@
         # No hostname script identified in the ambari agent conf
         pass
     pass
+  def get_config_dir(self):
+    return CONFIG_DIR
 
   def getConfig(self):
     return self.config
@@ -214,10 +224,14 @@
   def get_hostname_config(self):
     return self.get("default", "hostname", None)
 
-  def get_metrics_collector_hosts(self):
+  def get_metrics_collector_hosts_as_list(self):
     hosts = self.get("default", "metrics_servers", "localhost")
     return hosts.split(",")
 
+  def get_metrics_collector_hosts_as_string(self):
+    hosts = self.get("default", "metrics_servers", "localhost")
+    return hosts
+
   def get_failover_strategy(self):
     return self.get("collector", "failover_strategy", ROUND_ROBIN_FAILOVER_STRATEGY)
 
@@ -239,9 +253,32 @@
   def is_server_https_enabled(self):
     return "true" == str(self.get("collector", "https_enabled")).lower()
 
+  def get_java_home(self):
+    return self.get("aggregation", "java_home")
+
+  def is_inmemory_aggregation_enabled(self):
+    return "true" == str(self.get("aggregation", "host_in_memory_aggregation")).lower()
+
+  def get_inmemory_aggregation_port(self):
+    return self.get("aggregation", "host_in_memory_aggregation_port")
+
+  def get_aggregator_jvm_agrs(self):
+    hosts = self.get("aggregation", "jvm_arguments", "-Xmx256m -Xms128m -XX:PermSize=68m")
+    return hosts
+
+  def ams_monitor_log_dir(self):
+    hosts = self.get("aggregation", "ams_monitor_log_dir", "/var/log/ambari-metrics-monitor")
+    return hosts
+
+  def is_set_instanceid(self):
+    return "true" == str(self.get("default", "set.instanceId", 'false')).lower()
+
   def get_server_host(self):
     return self.get("collector", "host")
 
+  def get_instanceid(self):
+    return self.get("default", "instanceid")
+
   def get_server_port(self):
     try:
       return int(self.get("collector", "port"))
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/controller.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/controller.py
index c0feed5..e5da9ba 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/controller.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/controller.py
@@ -27,6 +27,9 @@
 from metric_collector import MetricsCollector
 from emitter import Emitter
 from host_info import HostInfo
+from aggregator import Aggregator
+from aggregator import AggregatorWatchdog
+
 
 logger = logging.getLogger()
 
@@ -50,11 +53,15 @@
     self.initialize_events_cache()
     self.emitter = Emitter(self.config, self.application_metric_map, stop_handler)
     self._t = None
+    self.aggregator = None
+    self.aggregator_watchdog = None
 
   def run(self):
     logger.info('Running Controller thread: %s' % threading.currentThread().getName())
 
     self.start_emitter()
+    if self.config.is_inmemory_aggregation_enabled():
+      self.start_aggregator_with_watchdog()
 
     # Wake every 5 seconds to push events to the queue
     while True:
@@ -62,6 +69,10 @@
         logger.warn('Event Queue full!! Suspending further collections.')
       else:
         self.enqueque_events()
+      # restart aggregator if needed
+      if self.config.is_inmemory_aggregation_enabled() and not self.aggregator_watchdog.is_ok():
+        logger.warning("Aggregator is not available. Restarting aggregator.")
+        self.start_aggregator_with_watchdog()
       pass
       # Wait for the service stop event instead of sleeping blindly
       if 0 == self._stop_handler.wait(self.sleep_interval):
@@ -75,6 +86,12 @@
     # The emitter thread should have stopped by now, just ensure it has shut
     # down properly
     self.emitter.join(5)
+
+    if self.config.is_inmemory_aggregation_enabled():
+      self.aggregator.stop()
+      self.aggregator_watchdog.stop()
+      self.aggregator.join(5)
+      self.aggregator_watchdog.join(5)
     pass
 
   # TODO: Optimize to not use Timer class and use the Queue instead
@@ -115,3 +132,14 @@
 
   def start_emitter(self):
     self.emitter.start()
+
+  # Start aggregator and watcher threads
+  def start_aggregator_with_watchdog(self):
+    if self.aggregator:
+      self.aggregator.stop()
+    if self.aggregator_watchdog:
+      self.aggregator.stop()
+    self.aggregator = Aggregator(self.config, self._stop_handler)
+    self.aggregator_watchdog = AggregatorWatchdog(self.config, self._stop_handler)
+    self.aggregator.start()
+    self.aggregator_watchdog.start()
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py
index ba3f18e..77b8c23 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py
@@ -44,8 +44,16 @@
     self._stop_handler = stop_handler
     self.application_metric_map = application_metric_map
     self.collector_port = config.get_server_port()
-    self.all_metrics_collector_hosts = config.get_metrics_collector_hosts()
+    self.all_metrics_collector_hosts = config.get_metrics_collector_hosts_as_list()
     self.is_server_https_enabled = config.is_server_https_enabled()
+    self.set_instanceid = config.is_set_instanceid()
+    self.instanceid = config.get_instanceid()
+    self.is_inmemory_aggregation_enabled = config.is_inmemory_aggregation_enabled()
+
+    if self.is_inmemory_aggregation_enabled:
+      self.collector_port = config.get_inmemory_aggregation_port()
+      self.all_metrics_collector_hosts = ['localhost']
+      self.is_server_https_enabled = False
 
     if self.is_server_https_enabled:
       self.ca_certs = config.get_ca_certs()
@@ -74,7 +82,7 @@
     # This call will acquire lock on the map and clear contents before returning
     # After configured number of retries the data will not be sent to the
     # collector
-    json_data = self.application_metric_map.flatten(None, True)
+    json_data = self.application_metric_map.flatten(None, True, set_instanceid=self.set_instanceid, instanceid=self.instanceid)
     if json_data is None:
       logger.info("Nothing to emit, resume waiting.")
       return
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/stop_handler.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/stop_handler.py
index bfb6957..7a9fbec 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/stop_handler.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/stop_handler.py
@@ -117,7 +117,8 @@
 
   def wait(self, timeout=None):
     # Stop process when stop event received
-    if self.stop_event.wait(timeout):
+    self.stop_event.wait(timeout)
+    if self.stop_event.isSet():
       logger.info("Stop event received")
       return 0
     # Timeout
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/main.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/main.py
index d218015..53d27f8 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/main.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/main.py
@@ -21,7 +21,7 @@
 import logging
 import os
 import sys
-
+import signal
 from ambari_commons.os_utils import remove_file
 
 from core.controller import Controller
@@ -73,6 +73,10 @@
   if scmStatus is not None:
     scmStatus.reportStarted()
 
+  # For some reason this is needed to catch system signals like SIGTERM
+  # TODO fix if possible
+  signal.pause()
+
   #The controller thread finishes when the stop event is signaled
   controller.join()
 
diff --git a/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java b/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
index 9d492cb..6f5e9e0 100644
--- a/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
+++ b/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
@@ -70,6 +70,10 @@
   private static final String TIMELINE_METRICS_SSL_KEYSTORE_PATH_PROPERTY = TIMELINE_METRICS_KAFKA_PREFIX + SSL_KEYSTORE_PATH_PROPERTY;
   private static final String TIMELINE_METRICS_SSL_KEYSTORE_TYPE_PROPERTY = TIMELINE_METRICS_KAFKA_PREFIX + SSL_KEYSTORE_TYPE_PROPERTY;
   private static final String TIMELINE_METRICS_SSL_KEYSTORE_PASSWORD_PROPERTY = TIMELINE_METRICS_KAFKA_PREFIX + SSL_KEYSTORE_PASSWORD_PROPERTY;
+  private static final String TIMELINE_METRICS_KAFKA_INSTANCE_ID_PROPERTY = TIMELINE_METRICS_KAFKA_PREFIX + INSTANCE_ID_PROPERTY;
+  private static final String TIMELINE_METRICS_KAFKA_SET_INSTANCE_ID_PROPERTY = TIMELINE_METRICS_KAFKA_PREFIX + SET_INSTANCE_ID_PROPERTY;
+  private static final String TIMELINE_METRICS_KAFKA_HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY = TIMELINE_METRICS_KAFKA_PREFIX + HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY;
+  private static final String TIMELINE_METRICS_KAFKA_HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY = TIMELINE_METRICS_KAFKA_PREFIX + HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY;
   private static final String TIMELINE_DEFAULT_HOST = "localhost";
   private static final String TIMELINE_DEFAULT_PORT = "6188";
   private static final String TIMELINE_DEFAULT_PROTOCOL = "http";
@@ -87,11 +91,15 @@
   private TimelineMetricsCache metricsCache;
   private int timeoutSeconds = 10;
   private String zookeeperQuorum = null;
+  private boolean setInstanceId;
+  private String instanceId;
 
   private String[] excludedMetricsPrefixes;
   private String[] includedMetricsPrefixes;
   // Local cache to avoid prefix matching everytime
   private Set<String> excludedMetrics = new HashSet<>();
+  private boolean hostInMemoryAggregationEnabled;
+  private int hostInMemoryAggregationPort;
 
   @Override
   protected String getCollectorUri(String host) {
@@ -128,6 +136,17 @@
     return hostname;
   }
 
+
+  @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return hostInMemoryAggregationEnabled;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return hostInMemoryAggregationPort;
+  }
+
   public void setMetricsCache(TimelineMetricsCache metricsCache) {
     this.metricsCache = metricsCache;
   }
@@ -162,6 +181,11 @@
         collectorHosts = parseHostsStringIntoCollection(props.getString(TIMELINE_HOSTS_PROPERTY, TIMELINE_DEFAULT_HOST));
         metricCollectorProtocol = props.getString(TIMELINE_PROTOCOL_PROPERTY, TIMELINE_DEFAULT_PROTOCOL);
 
+        instanceId = props.getString(TIMELINE_METRICS_KAFKA_INSTANCE_ID_PROPERTY, null);
+        setInstanceId = props.getBoolean(TIMELINE_METRICS_KAFKA_SET_INSTANCE_ID_PROPERTY, false);
+
+        hostInMemoryAggregationEnabled = props.getBoolean(TIMELINE_METRICS_KAFKA_HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY, false);
+        hostInMemoryAggregationPort = props.getInt(TIMELINE_METRICS_KAFKA_HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY, 61888);
         setMetricsCache(new TimelineMetricsCache(maxRowCacheSize, metricsSendInterval));
 
         if (metricCollectorProtocol.contains("https")) {
@@ -315,6 +339,9 @@
       TimelineMetric timelineMetric = new TimelineMetric();
       timelineMetric.setMetricName(attributeName);
       timelineMetric.setHostName(hostname);
+      if (setInstanceId) {
+        timelineMetric.setInstanceId(instanceId);
+      }
       timelineMetric.setAppId(component);
       timelineMetric.setStartTime(currentTimeMillis);
       timelineMetric.setType(ClassUtils.getShortCanonicalName(attributeValue, "Number"));
diff --git a/ambari-metrics/ambari-metrics-kafka-sink/src/test/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporterTest.java b/ambari-metrics/ambari-metrics-kafka-sink/src/test/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporterTest.java
index e1ac48c..b05190c 100644
--- a/ambari-metrics/ambari-metrics-kafka-sink/src/test/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporterTest.java
+++ b/ambari-metrics/ambari-metrics-kafka-sink/src/test/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporterTest.java
@@ -84,6 +84,8 @@
     properties.setProperty("kafka.timeline.metrics.reporter.enabled", "true");
     properties.setProperty("external.kafka.metrics.exclude.prefix", "a.b.c");
     properties.setProperty("external.kafka.metrics.include.prefix", "a.b.c.d");
+    properties.setProperty("kafka.timeline.metrics.instanceId", "cluster");
+    properties.setProperty("kafka.timeline.metrics.set.instanceId", "false");
     props = new VerifiableProperties(properties);
   }
 
@@ -120,6 +122,8 @@
     properties.setProperty("kafka.timeline.metrics.truststore.path", "");
     properties.setProperty("kafka.timeline.metrics.truststore.type", "");
     properties.setProperty("kafka.timeline.metrics.truststore.password", "");
+    properties.setProperty("kafka.timeline.metrics.instanceId", "cluster");
+    properties.setProperty("kafka.timeline.metrics.set.instanceId", "false");
     kafkaTimelineMetricsReporter.init(new VerifiableProperties(properties));
     kafkaTimelineMetricsReporter.stopReporter();
     verifyAll();
diff --git a/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java b/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
index 9a55f10..d408e1a 100644
--- a/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
+++ b/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
@@ -50,9 +50,13 @@
   private Collection<String> collectorHosts;
   private String zkQuorum;
   private String protocol;
+  private boolean setInstanceId;
+  private String instanceId;
   private NimbusClient nimbusClient;
   private String applicationId;
   private int timeoutSeconds;
+  private boolean hostInMemoryAggregationEnabled;
+  private int hostInMemoryAggregationPort;
 
   public StormTimelineMetricsReporter() {
 
@@ -94,6 +98,16 @@
   }
 
   @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return hostInMemoryAggregationEnabled;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return hostInMemoryAggregationPort;
+  }
+
+  @Override
   public void prepare(Map conf) {
     LOG.info("Preparing Storm Metrics Reporter");
     try {
@@ -126,6 +140,12 @@
           Integer.parseInt(cf.get(METRICS_POST_TIMEOUT_SECONDS).toString()) :
           DEFAULT_POST_TIMEOUT_SECONDS;
       applicationId = cf.get(APP_ID).toString();
+      if (cf.containsKey(SET_INSTANCE_ID_PROPERTY)) {
+        setInstanceId = Boolean.getBoolean(cf.get(SET_INSTANCE_ID_PROPERTY).toString());
+        instanceId = cf.get(INSTANCE_ID_PROPERTY).toString();
+      }
+      hostInMemoryAggregationEnabled = Boolean.valueOf(cf.get(HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY).toString());
+      hostInMemoryAggregationPort = Integer.valueOf(cf.get(HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY).toString());
 
       collectorUri = constructTimelineMetricUri(protocol, findPreferredCollectHost(), port);
       if (protocol.contains("https")) {
@@ -196,6 +216,9 @@
     TimelineMetric timelineMetric = new TimelineMetric();
     timelineMetric.setMetricName(attributeName);
     timelineMetric.setHostName(hostname);
+    if (setInstanceId) {
+      timelineMetric.setInstanceId(instanceId);
+    }
     timelineMetric.setAppId(component);
     timelineMetric.setStartTime(currentTimeMillis);
     timelineMetric.getMetricValues().put(currentTimeMillis, Double.parseDouble(attributeValue));
diff --git a/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
index 60c1427..ff72f24 100644
--- a/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
@@ -59,6 +59,10 @@
   private String port;
   private String topologyName;
   private String applicationId;
+  private boolean setInstanceId;
+  private String instanceId;
+  private boolean hostInMemoryAggregationEnabled;
+  private int hostInMemoryAggregationPort;
 
   @Override
   protected String getCollectorUri(String host) {
@@ -96,6 +100,16 @@
   }
 
   @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return hostInMemoryAggregationEnabled;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return hostInMemoryAggregationPort;
+  }
+
+  @Override
   public void prepare(Map map, Object o, TopologyContext topologyContext, IErrorReporter iErrorReporter) {
     LOG.info("Preparing Storm Metrics Sink");
     try {
@@ -122,6 +136,10 @@
     protocol = configuration.getProperty(COLLECTOR_PROTOCOL, "http");
     port = configuration.getProperty(COLLECTOR_PORT, "6188");
 
+    instanceId = configuration.getProperty(INSTANCE_ID_PROPERTY, null);
+    setInstanceId = Boolean.valueOf(configuration.getProperty(SET_INSTANCE_ID_PROPERTY, "false"));
+    hostInMemoryAggregationEnabled = Boolean.valueOf(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY));
+    hostInMemoryAggregationPort = Integer.valueOf(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY));
     // Initialize the collector write strategy
     super.init();
 
@@ -243,6 +261,9 @@
     TimelineMetric timelineMetric = new TimelineMetric();
     timelineMetric.setMetricName(attributeName);
     timelineMetric.setHostName(hostName);
+    if (setInstanceId) {
+      timelineMetric.setInstanceId(instanceId);
+    }
     timelineMetric.setAppId(applicationId);
     timelineMetric.setStartTime(currentTimeMillis);
     timelineMetric.setType(ClassUtils.getShortCanonicalName(
diff --git a/ambari-metrics/ambari-metrics-storm-sink/pom.xml b/ambari-metrics/ambari-metrics-storm-sink/pom.xml
index 612ad63..9e11539 100644
--- a/ambari-metrics/ambari-metrics-storm-sink/pom.xml
+++ b/ambari-metrics/ambari-metrics-storm-sink/pom.xml
@@ -31,7 +31,7 @@
   <packaging>jar</packaging>
 
   <properties>
-    <storm.version>1.1.0-SNAPSHOT</storm.version>
+    <storm.version>1.1.0</storm.version>
   </properties>
 
   <build>
diff --git a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
index 535fae0..5b75065 100644
--- a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
+++ b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
@@ -46,8 +46,12 @@
   private Collection<String> collectorHosts;
   private String zkQuorum;
   private String protocol;
+  private boolean setInstanceId;
+  private String instanceId;
   private String applicationId;
   private int timeoutSeconds;
+  private boolean hostInMemoryAggregationEnabled;
+  private int hostInMemoryAggregationPort;
 
   public StormTimelineMetricsReporter() {
 
@@ -89,6 +93,16 @@
   }
 
   @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return hostInMemoryAggregationEnabled;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return hostInMemoryAggregationPort;
+  }
+
+  @Override
   public void prepare(Object registrationArgument) {
     LOG.info("Preparing Storm Metrics Reporter");
     try {
@@ -115,6 +129,11 @@
           Integer.parseInt(configuration.getProperty(METRICS_POST_TIMEOUT_SECONDS)) :
           DEFAULT_POST_TIMEOUT_SECONDS;
       applicationId = configuration.getProperty(CLUSTER_REPORTER_APP_ID, DEFAULT_CLUSTER_REPORTER_APP_ID);
+      setInstanceId = Boolean.valueOf(configuration.getProperty(SET_INSTANCE_ID_PROPERTY));
+      instanceId = configuration.getProperty(INSTANCE_ID_PROPERTY);
+
+      hostInMemoryAggregationEnabled = Boolean.valueOf(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY));
+      hostInMemoryAggregationPort = Integer.valueOf(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY));
 
       if (protocol.contains("https")) {
         String trustStorePath = configuration.getProperty(SSL_KEYSTORE_PATH_PROPERTY).trim();
@@ -226,6 +245,9 @@
     TimelineMetric timelineMetric = new TimelineMetric();
     timelineMetric.setMetricName(attributeName);
     timelineMetric.setHostName(hostname);
+    if (setInstanceId) {
+      timelineMetric.setInstanceId(instanceId);
+    }
     timelineMetric.setAppId(component);
     timelineMetric.setStartTime(currentTimeMillis);
     timelineMetric.setType(ClassUtils.getShortCanonicalName(attributeValue, "Number"));
diff --git a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
index f58f549..4d5a229 100644
--- a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
@@ -68,6 +68,10 @@
   private String port;
   private String topologyName;
   private String applicationId;
+  private String instanceId;
+  private boolean setInstanceId;
+  private boolean hostInMemoryAggregationEnabled;
+  private int hostInMemoryAggregationPort;
 
   @Override
   protected String getCollectorUri(String host) {
@@ -105,6 +109,16 @@
   }
 
   @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return hostInMemoryAggregationEnabled;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return hostInMemoryAggregationPort;
+  }
+
+  @Override
   public void prepare(Map map, Object o, TopologyContext topologyContext, IErrorReporter iErrorReporter) {
     LOG.info("Preparing Storm Metrics Sink");
     try {
@@ -133,6 +147,11 @@
 
     protocol = configuration.getProperty(COLLECTOR_PROTOCOL, "http");
     port = configuration.getProperty(COLLECTOR_PORT, "6188");
+    instanceId = configuration.getProperty(INSTANCE_ID_PROPERTY, null);
+    setInstanceId = Boolean.valueOf(configuration.getProperty(SET_INSTANCE_ID_PROPERTY, "false"));
+
+    hostInMemoryAggregationEnabled = Boolean.valueOf(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY));
+    hostInMemoryAggregationPort = Integer.valueOf(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY));
 
     // Initialize the collector write strategy
     super.init();
@@ -332,6 +351,9 @@
     TimelineMetric timelineMetric = new TimelineMetric();
     timelineMetric.setMetricName(attributeName);
     timelineMetric.setHostName(hostName);
+    if (setInstanceId) {
+      timelineMetric.setInstanceId(instanceId);
+    }
     timelineMetric.setAppId(applicationId);
     timelineMetric.setStartTime(currentTimeMillis);
     timelineMetric.setType(ClassUtils.getShortCanonicalName(
diff --git a/ambari-metrics/ambari-metrics-timelineservice/pom.xml b/ambari-metrics/ambari-metrics-timelineservice/pom.xml
index d848eab..f9d7e19 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/pom.xml
+++ b/ambari-metrics/ambari-metrics-timelineservice/pom.xml
@@ -477,7 +477,7 @@
     <dependency>
       <groupId>org.apache.httpcomponents</groupId>
       <artifactId>httpclient</artifactId>
-      <version>4.2.5</version>
+      <version>4.5.2</version>
     </dependency>
     <dependency>
       <groupId>com.google.guava</groupId>
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
index 17c58f0..f984253 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
@@ -19,14 +19,18 @@
 
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Multimap;
+import org.apache.commons.collections.MapUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.AggregationResult;
 import org.apache.hadoop.metrics2.sink.timeline.ContainerMetric;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.Precision;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetricMetadata;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetricWithAggregatedValues;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
 import org.apache.hadoop.metrics2.sink.timeline.TopNConfig;
 import org.apache.hadoop.service.AbstractService;
@@ -40,6 +44,7 @@
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.discovery.TimelineMetricMetadataManager;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.Condition;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.ConditionBuilder;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.TopNCondition;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.function.SeriesAggregateFunction;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.function.TimelineMetricsSeriesAggregateFunction;
@@ -51,6 +56,7 @@
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -60,6 +66,7 @@
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.TimeUnit;
 
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_HOST_INMEMORY_AGGREGATION;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.USE_GROUPBY_AGGREGATOR_QUERIES;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.DEFAULT_TOPN_HOSTS_LIMIT;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.availability.AggregationTaskRunner.ACTUAL_AGGREGATOR_NAMES;
@@ -150,10 +157,14 @@
       scheduleAggregatorThread(dailyClusterAggregator);
 
       // Start the minute host aggregator
-      TimelineMetricAggregator minuteHostAggregator =
-        TimelineMetricAggregatorFactory.createTimelineMetricAggregatorMinute(
-          hBaseAccessor, metricsConf, haController);
-      scheduleAggregatorThread(minuteHostAggregator);
+      if (Boolean.parseBoolean(metricsConf.get(TIMELINE_METRICS_HOST_INMEMORY_AGGREGATION, "true"))) {
+        LOG.info("timeline.metrics.host.inmemory.aggregation is set to True, disabling host minute aggregation on collector");
+      } else {
+        TimelineMetricAggregator minuteHostAggregator =
+          TimelineMetricAggregatorFactory.createTimelineMetricAggregatorMinute(
+            hBaseAccessor, metricsConf, haController);
+        scheduleAggregatorThread(minuteHostAggregator);
+      }
 
       // Start the hourly host aggregator
       TimelineMetricAggregator hourlyHostAggregator =
@@ -388,6 +399,65 @@
   }
 
   @Override
+  public TimelinePutResponse putHostAggregatedMetrics(AggregationResult aggregationResult) throws SQLException, IOException {
+    Map<TimelineMetric, MetricHostAggregate> aggregateMap = new HashMap<>();
+    for (TimelineMetricWithAggregatedValues entry : aggregationResult.getResult()) {
+      aggregateMap.put(entry.getTimelineMetric(), entry.getMetricAggregate());
+    }
+    hBaseAccessor.saveHostAggregateRecords(aggregateMap, PhoenixTransactSQL.METRICS_AGGREGATE_MINUTE_TABLE_NAME);
+
+
+    return new TimelinePutResponse();
+  }
+
+  @Override
+  public Map<String, Map<String,Set<String>>> getInstanceHostsMetadata(String instanceId, String appId)
+          throws SQLException, IOException {
+
+    Map<String, Set<String>> hostedApps = metricMetadataManager.getHostedAppsCache();
+    Map<String, Set<String>> instanceHosts = metricMetadataManager.getHostedInstanceCache();
+    Map<String, Map<String, Set<String>>> instanceAppHosts = new HashMap<>();
+
+    if (MapUtils.isEmpty(instanceHosts)) {
+      Map<String, Set<String>> appHostMap = new HashMap<String, Set<String>>();
+      for (String host : hostedApps.keySet()) {
+        for (String app : hostedApps.get(host)) {
+          if (!appHostMap.containsKey(app)) {
+            appHostMap.put(app, new HashSet<String>());
+          }
+          appHostMap.get(app).add(host);
+        }
+      }
+      instanceAppHosts.put("", appHostMap);
+    } else {
+      for (String instance : instanceHosts.keySet()) {
+
+        if (StringUtils.isNotEmpty(instanceId) && !instance.equals(instanceId)) {
+          continue;
+        }
+        Map<String, Set<String>> appHostMap = new  HashMap<String, Set<String>>();
+        instanceAppHosts.put(instance, appHostMap);
+
+        Set<String> hostsWithInstance = instanceHosts.get(instance);
+        for (String host : hostsWithInstance) {
+          for (String app : hostedApps.get(host)) {
+            if (StringUtils.isNotEmpty(appId) && !app.equals(appId)) {
+              continue;
+            }
+
+            if (!appHostMap.containsKey(app)) {
+              appHostMap.put(app, new HashSet<String>());
+            }
+            appHostMap.get(app).add(host);
+          }
+        }
+      }
+    }
+
+    return instanceAppHosts;
+  }
+
+  @Override
   public List<String> getLiveInstances() {
 
     List<String> instances = null;
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
index 8b0d84b..3b2a119 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
@@ -31,6 +31,8 @@
 import org.apache.hadoop.hbase.util.RetryCounter;
 import org.apache.hadoop.hbase.util.RetryCounterFactory;
 import org.apache.hadoop.metrics2.sink.timeline.ContainerMetric;
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.Precision;
 import org.apache.hadoop.metrics2.sink.timeline.SingleValuedTimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
@@ -40,8 +42,6 @@
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.AggregatorUtils;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.Function;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricClusterAggregate;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetric;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricReadHelper;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.discovery.TimelineMetricMetadataKey;
@@ -115,6 +115,7 @@
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.CONTAINER_METRICS_TABLE_NAME;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.CREATE_CONTAINER_METRICS_TABLE_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.CREATE_HOSTED_APPS_METADATA_TABLE_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.CREATE_INSTANCE_HOST_TABLE_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.CREATE_METRICS_AGGREGATE_TABLE_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.CREATE_METRICS_CLUSTER_AGGREGATE_GROUPED_TABLE_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.CREATE_METRICS_CLUSTER_AGGREGATE_TABLE_SQL;
@@ -124,6 +125,7 @@
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.DEFAULT_ENCODING;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.DEFAULT_TABLE_COMPRESSION;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.GET_HOSTED_APPS_METADATA_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.GET_INSTANCE_HOST_METADATA_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.GET_METRIC_METADATA_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.METRICS_AGGREGATE_DAILY_TABLE_NAME;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.METRICS_AGGREGATE_HOURLY_TABLE_NAME;
@@ -138,6 +140,7 @@
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.UPSERT_CLUSTER_AGGREGATE_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.UPSERT_CLUSTER_AGGREGATE_TIME_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.UPSERT_HOSTED_APPS_METADATA_SQL;
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.UPSERT_INSTANCE_HOST_METADATA_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.UPSERT_METADATA_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.UPSERT_METRICS_SQL;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.UPSERT_CONTAINER_METRICS_SQL;
@@ -430,6 +433,11 @@
         encoding, compression);
       stmt.executeUpdate(hostedAppSql);
 
+      //Host Instances table
+      String hostedInstancesSql = String.format(CREATE_INSTANCE_HOST_TABLE_SQL,
+        encoding, compression);
+      stmt.executeUpdate(hostedInstancesSql);
+
       // Container Metrics
       stmt.executeUpdate( String.format(CREATE_CONTAINER_METRICS_TABLE_SQL,
         encoding, tableTTL.get(CONTAINER_METRICS_TABLE_NAME), compression));
@@ -778,6 +786,10 @@
 
         metadataManager.putIfModifiedHostedAppsMetadata(
                 tm.getHostName(), tm.getAppId());
+
+        if (!tm.getAppId().equals("FLUME_HANDLER")) {
+          metadataManager.putIfModifiedHostedInstanceMetadata(tm.getInstanceId(), tm.getHostName());
+        }
       }
       if (!acceptMetric) {
         iterator.remove();
@@ -1552,6 +1564,55 @@
     }
   }
 
+  public void saveInstanceHostsMetadata(Map<String, Set<String>> instanceHostsMap) throws SQLException {
+    Connection conn = getConnection();
+    PreparedStatement stmt = null;
+    try {
+      stmt = conn.prepareStatement(UPSERT_INSTANCE_HOST_METADATA_SQL);
+      int rowCount = 0;
+
+      for (Map.Entry<String, Set<String>> hostInstancesEntry : instanceHostsMap.entrySet()) {
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("Host Instances Entry: " + hostInstancesEntry);
+        }
+
+        String instanceId = hostInstancesEntry.getKey();
+
+        for(String hostname : hostInstancesEntry.getValue()) {
+          stmt.clearParameters();
+          stmt.setString(1, instanceId);
+          stmt.setString(2, hostname);
+          try {
+            stmt.executeUpdate();
+            rowCount++;
+          } catch (SQLException sql) {
+            LOG.error("Error saving host instances metadata.", sql);
+          }
+        }
+
+      }
+
+      conn.commit();
+      LOG.info("Saved " + rowCount + " host instances metadata records.");
+
+    } finally {
+      if (stmt != null) {
+        try {
+          stmt.close();
+        } catch (SQLException e) {
+          // Ignore
+        }
+      }
+      if (conn != null) {
+        try {
+          conn.close();
+        } catch (SQLException sql) {
+          // Ignore
+        }
+      }
+    }
+  }
+
   /**
    * Save metdata on updates.
    * @param metricMetadata @Collection<@TimelineMetricMetadata>
@@ -1658,6 +1719,53 @@
     return hostedAppMap;
   }
 
+  public Map<String, Set<String>> getInstanceHostsMetdata() throws SQLException {
+    Map<String, Set<String>> instanceHostsMap = new HashMap<>();
+    Connection conn = getConnection();
+    PreparedStatement stmt = null;
+    ResultSet rs = null;
+
+    try {
+      stmt = conn.prepareStatement(GET_INSTANCE_HOST_METADATA_SQL);
+      rs = stmt.executeQuery();
+
+      while (rs.next()) {
+        String instanceId = rs.getString("INSTANCE_ID");
+        String hostname = rs.getString("HOSTNAME");
+
+        if (!instanceHostsMap.containsKey(instanceId)) {
+          instanceHostsMap.put(instanceId, new HashSet<String>());
+        }
+        instanceHostsMap.get(instanceId).add(hostname);
+      }
+
+    } finally {
+      if (rs != null) {
+        try {
+          rs.close();
+        } catch (SQLException e) {
+          // Ignore
+        }
+      }
+      if (stmt != null) {
+        try {
+          stmt.close();
+        } catch (SQLException e) {
+          // Ignore
+        }
+      }
+      if (conn != null) {
+        try {
+          conn.close();
+        } catch (SQLException sql) {
+          // Ignore
+        }
+      }
+    }
+
+    return instanceHostsMap;
+  }
+
   // No filter criteria support for now.
   public Map<TimelineMetricMetadataKey, TimelineMetricMetadata> getTimelineMetricMetadata() throws SQLException {
     Map<TimelineMetricMetadataKey, TimelineMetricMetadata> metadataMap = new HashMap<>();
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
index 0d5042f..023465b 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
@@ -296,6 +296,8 @@
 
   public static final String AMSHBASE_METRICS_WHITESLIST_FILE = "amshbase_metrics_whitelist";
 
+  public static final String TIMELINE_METRICS_HOST_INMEMORY_AGGREGATION = "timeline.metrics.host.inmemory.aggregation";
+
   private Configuration hbaseConf;
   private Configuration metricsConf;
   private Configuration amsEnvConf;
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java
index d049e33..d052d54 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
 
+import org.apache.hadoop.metrics2.sink.timeline.AggregationResult;
 import org.apache.hadoop.metrics2.sink.timeline.ContainerMetric;
 import org.apache.hadoop.metrics2.sink.timeline.Precision;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
@@ -80,6 +81,7 @@
    */
   Map<String, List<TimelineMetricMetadata>> getTimelineMetricMetadata(String query) throws SQLException, IOException;
 
+  TimelinePutResponse putHostAggregatedMetrics(AggregationResult aggregationResult) throws SQLException, IOException;
   /**
    * Returns all hosts that have written metrics with the apps on the host
    * @return { hostname : [ appIds ] }
@@ -89,6 +91,14 @@
   Map<String, Set<String>> getHostAppsMetadata() throws SQLException, IOException;
 
   /**
+   * Returns all instances and the set of hosts each instance is present on
+   * @return { instanceId : [ hosts ] }
+   * @throws SQLException
+   * @throws IOException
+   */
+  Map<String, Map<String,Set<String>>> getInstanceHostsMetadata(String instanceId, String appId) throws SQLException, IOException;
+
+  /**
    * Return a list of known live collector nodes
    * @return [ hostname ]
    */
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorSink.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorSink.java
index 65d54c0..7b03b30 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorSink.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorSink.java
@@ -19,10 +19,10 @@
 
 import java.util.Map;
 
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.Precision;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricClusterAggregate;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetric;
 
 /**
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricAggregate.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricAggregate.java
deleted file mode 100644
index 825ac25..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricAggregate.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
-
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.codehaus.jackson.annotate.JsonProperty;
-import org.codehaus.jackson.annotate.JsonSubTypes;
-import org.codehaus.jackson.map.ObjectMapper;
-
-import java.io.IOException;
-
-/**
-*
-*/
-@JsonSubTypes({@JsonSubTypes.Type(value = MetricClusterAggregate.class),
-  @JsonSubTypes.Type(value = MetricHostAggregate.class)})
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public class MetricAggregate {
-  private static final ObjectMapper mapper = new ObjectMapper();
-
-  protected Double sum = 0.0;
-  protected Double deviation;
-  protected Double max = Double.MIN_VALUE;
-  protected Double min = Double.MAX_VALUE;
-
-  public MetricAggregate() {
-  }
-
-  MetricAggregate(Double sum, Double deviation, Double max,
-                  Double min) {
-    this.sum = sum;
-    this.deviation = deviation;
-    this.max = max;
-    this.min = min;
-  }
-
-  public void updateSum(Double sum) {
-    this.sum += sum;
-  }
-
-  public void updateMax(Double max) {
-    if (max > this.max) {
-      this.max = max;
-    }
-  }
-
-  public void updateMin(Double min) {
-    if (min < this.min) {
-      this.min = min;
-    }
-  }
-
-  @JsonProperty("sum")
-  public Double getSum() {
-    return sum;
-  }
-
-  @JsonProperty("deviation")
-  public Double getDeviation() {
-    return deviation;
-  }
-
-  @JsonProperty("max")
-  public Double getMax() {
-    return max;
-  }
-
-  @JsonProperty("min")
-  public Double getMin() {
-    return min;
-  }
-
-  public void setSum(Double sum) {
-    this.sum = sum;
-  }
-
-  public void setDeviation(Double deviation) {
-    this.deviation = deviation;
-  }
-
-  public void setMax(Double max) {
-    this.max = max;
-  }
-
-  public void setMin(Double min) {
-    this.min = min;
-  }
-
-  public String toJSON() throws IOException {
-    return mapper.writeValueAsString(this);
-  }
-}
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricClusterAggregate.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricClusterAggregate.java
deleted file mode 100644
index 9c837b6..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricClusterAggregate.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
-
-
-import org.codehaus.jackson.annotate.JsonCreator;
-import org.codehaus.jackson.annotate.JsonProperty;
-
-/**
-*
-*/
-public class MetricClusterAggregate extends MetricAggregate {
-  private int numberOfHosts;
-
-  @JsonCreator
-  public MetricClusterAggregate() {
-  }
-
-  public MetricClusterAggregate(Double sum, int numberOfHosts, Double deviation,
-                         Double max, Double min) {
-    super(sum, deviation, max, min);
-    this.numberOfHosts = numberOfHosts;
-  }
-
-  @JsonProperty("numberOfHosts")
-  public int getNumberOfHosts() {
-    return numberOfHosts;
-  }
-
-  public void updateNumberOfHosts(int count) {
-    this.numberOfHosts += count;
-  }
-
-  public void setNumberOfHosts(int numberOfHosts) {
-    this.numberOfHosts = numberOfHosts;
-  }
-
-  /**
-   * Find and update min, max and avg for a minute
-   */
-  public void updateAggregates(MetricClusterAggregate hostAggregate) {
-    updateMax(hostAggregate.getMax());
-    updateMin(hostAggregate.getMin());
-    updateSum(hostAggregate.getSum());
-    updateNumberOfHosts(hostAggregate.getNumberOfHosts());
-  }
-
-  @Override
-  public String toString() {
-    return "MetricAggregate{" +
-      "sum=" + sum +
-      ", numberOfHosts=" + numberOfHosts +
-      ", deviation=" + deviation +
-      ", max=" + max +
-      ", min=" + min +
-      '}';
-  }
-}
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricHostAggregate.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricHostAggregate.java
deleted file mode 100644
index 340ec75..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricHostAggregate.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
-
-
-import org.codehaus.jackson.annotate.JsonCreator;
-import org.codehaus.jackson.annotate.JsonProperty;
-
-/**
- * Represents a collection of minute based aggregation of values for
- * resolution greater than a minute.
- */
-public class MetricHostAggregate extends MetricAggregate {
-
-  private long numberOfSamples = 0;
-
-  @JsonCreator
-  public MetricHostAggregate() {
-    super(0.0, 0.0, Double.MIN_VALUE, Double.MAX_VALUE);
-  }
-
-  public MetricHostAggregate(Double sum, int numberOfSamples,
-                             Double deviation,
-                             Double max, Double min) {
-    super(sum, deviation, max, min);
-    this.numberOfSamples = numberOfSamples;
-  }
-
-  @JsonProperty("numberOfSamples")
-  public long getNumberOfSamples() {
-    return numberOfSamples == 0 ? 1 : numberOfSamples;
-  }
-
-  public void updateNumberOfSamples(long count) {
-    this.numberOfSamples += count;
-  }
-
-  public void setNumberOfSamples(long numberOfSamples) {
-    this.numberOfSamples = numberOfSamples;
-  }
-
-  public double getAvg() {
-    return sum / numberOfSamples;
-  }
-
-  /**
-   * Find and update min, max and avg for a minute
-   */
-  public void updateAggregates(MetricHostAggregate hostAggregate) {
-    updateMax(hostAggregate.getMax());
-    updateMin(hostAggregate.getMin());
-    updateSum(hostAggregate.getSum());
-    updateNumberOfSamples(hostAggregate.getNumberOfSamples());
-  }
-
-  @Override
-  public String toString() {
-    return "MetricHostAggregate{" +
-      "sum=" + sum +
-      ", numberOfSamples=" + numberOfSamples +
-      ", deviation=" + deviation +
-      ", max=" + max +
-      ", min=" + min +
-      '}';
-  }
-}
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAppAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAppAggregator.java
index 44aca03..9eaf456 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAppAggregator.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAppAggregator.java
@@ -21,6 +21,7 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetricMetadata;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricsFilter;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.discovery.TimelineMetricMetadataKey;
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregator.java
index 02677b9..ba16b43 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregator.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregator.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.availability.AggregationTaskRunner.AGGREGATOR_NAME;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.availability.MetricCollectorHAController;
@@ -91,6 +93,7 @@
     MetricHostAggregate hostAggregate = null;
     Map<TimelineClusterMetric, MetricHostAggregate> hostAggregateMap =
       new HashMap<TimelineClusterMetric, MetricHostAggregate>();
+    int perMetricCount = 0;
 
     while (rs.next()) {
       TimelineClusterMetric currentMetric = readHelper.fromResultSet(rs);
@@ -106,14 +109,20 @@
         currentMetric.setTimestamp(endTime);
         hostAggregate = new MetricHostAggregate();
         hostAggregateMap.put(currentMetric, hostAggregate);
+        perMetricCount++;
       }
 
       if (existingMetric.equalsExceptTime(currentMetric)) {
         // Recalculate totals with current metric
         updateAggregatesFromHost(hostAggregate, currentHostAggregate);
-
+        perMetricCount++;
       } else {
-        // Switched over to a new metric - save existing
+        // Switched over to a new metric - save new metric
+
+        hostAggregate.setSum(hostAggregate.getSum() / perMetricCount);
+        hostAggregate.setNumberOfSamples(Math.round((float)hostAggregate.getNumberOfSamples() / (float)perMetricCount));
+        perMetricCount = 1;
+
         hostAggregate = new MetricHostAggregate();
         currentMetric.setTimestamp(endTime);
         updateAggregatesFromHost(hostAggregate, currentHostAggregate);
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java
index 5310906..34b1f9b 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java
@@ -38,6 +38,7 @@
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.mutable.MutableInt;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.PostProcessingUtil;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetricMetadata;
@@ -129,6 +130,7 @@
     condition.addOrderByColumn("METRIC_NAME");
     condition.addOrderByColumn("HOSTNAME");
     condition.addOrderByColumn("APP_ID");
+    condition.addOrderByColumn("INSTANCE_ID");
     condition.addOrderByColumn("SERVER_TIME");
     return condition;
   }
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricHostAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricHostAggregator.java
index 0ea9c08..a17433b 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricHostAggregator.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricHostAggregator.java
@@ -20,6 +20,7 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.availability.AggregationTaskRunner.AGGREGATOR_NAME;
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricReadHelper.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricReadHelper.java
index b5f49fb..672f85f 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricReadHelper.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricReadHelper.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
 
 
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.SingleValuedTimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataManager.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataManager.java
index 7eb2457..f904ebe 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataManager.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataManager.java
@@ -54,8 +54,10 @@
   private final Map<TimelineMetricMetadataKey, TimelineMetricMetadata> METADATA_CACHE = new ConcurrentHashMap<>();
   // Map to lookup apps on a host
   private final Map<String, Set<String>> HOSTED_APPS_MAP = new ConcurrentHashMap<>();
+  private final Map<String, Set<String>> INSTANCE_HOST_MAP = new ConcurrentHashMap<>();
   // Sync only when needed
   AtomicBoolean SYNC_HOSTED_APPS_METADATA = new AtomicBoolean(false);
+  AtomicBoolean SYNC_HOSTED_INSTANCES_METADATA = new AtomicBoolean(false);
 
   // Single thread to sync back new writes to the store
   private final ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor();
@@ -122,14 +124,25 @@
     return HOSTED_APPS_MAP;
   }
 
+  public Map<String, Set<String>> getHostedInstanceCache() {
+    return INSTANCE_HOST_MAP;
+  }
+
   public boolean syncHostedAppsMetadata() {
     return SYNC_HOSTED_APPS_METADATA.get();
   }
 
+  public boolean syncHostedInstanceMetadata() {
+    return SYNC_HOSTED_INSTANCES_METADATA.get();
+  }
+
   public void markSuccessOnSyncHostedAppsMetadata() {
     SYNC_HOSTED_APPS_METADATA.set(false);
   }
 
+  public void markSuccessOnSyncHostedInstanceMetadata() {
+    SYNC_HOSTED_INSTANCES_METADATA.set(false);
+  }
   /**
    * Test metric name for valid patterns and return true/false
    */
@@ -189,6 +202,23 @@
     }
   }
 
+  public void putIfModifiedHostedInstanceMetadata(String instanceId, String hostname) {
+    if (StringUtils.isEmpty(instanceId)) {
+      return;
+    }
+
+    Set<String> hosts = INSTANCE_HOST_MAP.get(instanceId);
+    if (hosts == null) {
+      hosts = new HashSet<>();
+      INSTANCE_HOST_MAP.put(instanceId, hosts);
+    }
+
+    if (!hosts.contains(hostname)) {
+      hosts.add(hostname);
+      SYNC_HOSTED_INSTANCES_METADATA.set(true);
+    }
+  }
+
   public void persistMetadata(Collection<TimelineMetricMetadata> metadata) throws SQLException {
     hBaseAccessor.saveMetricMetadata(metadata);
   }
@@ -197,6 +227,10 @@
     hBaseAccessor.saveHostAppsMetadata(hostedApps);
   }
 
+  public void persistHostedInstanceMetadata(Map<String, Set<String>> hostedInstancesMetadata) throws SQLException {
+    hBaseAccessor.saveInstanceHostsMetadata(hostedInstancesMetadata);
+  }
+
   public TimelineMetricMetadata getTimelineMetricMetadata(TimelineMetric timelineMetric, boolean isWhitelisted) {
     return new TimelineMetricMetadata(
       timelineMetric.getMetricName(),
@@ -233,6 +267,10 @@
     return hBaseAccessor.getHostedAppsMetadata();
   }
 
+  Map<String, Set<String>> getHostedInstancesFromStore() throws SQLException {
+    return hBaseAccessor.getInstanceHostsMetdata();
+  }
+
   private boolean supportAggregates(TimelineMetric metric) {
     return MapUtils.isEmpty(metric.getMetadata()) ||
       !(String.valueOf(true).equals(metric.getMetadata().get("skipAggregation")));
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataSync.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataSync.java
index 25b525a..6d519f6 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataSync.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataSync.java
@@ -45,11 +45,15 @@
     persistMetricMetadata();
     LOG.debug("Persisting hosted apps metadata...");
     persistHostAppsMetadata();
+    LOG.debug("Persisting hosted instance metadata...");
+    persistHostInstancesMetadata();
     if (cacheManager.isDistributedModeEnabled()) {
       LOG.debug("Refreshing metric metadata...");
       refreshMetricMetadata();
       LOG.debug("Refreshing hosted apps metadata...");
       refreshHostAppsMetadata();
+      LOG.debug("Refreshing hosted instances metadata...");
+      refreshHostedInstancesMetadata();
     }
   }
 
@@ -147,6 +151,41 @@
   }
 
   /**
+   * Sync apps instances data if needed
+   */
+  private void persistHostInstancesMetadata() {
+    if (cacheManager.syncHostedInstanceMetadata()) {
+      Map<String, Set<String>> persistedData = null;
+      try {
+        persistedData = cacheManager.getHostedInstancesFromStore();
+      } catch (SQLException e) {
+        LOG.warn("Failed on fetching hosted instances data from store.", e);
+        return; // Something wrong with store
+      }
+
+      Map<String, Set<String>> cachedData = cacheManager.getHostedInstanceCache();
+      Map<String, Set<String>> dataToSync = new HashMap<>();
+      if (cachedData != null && !cachedData.isEmpty()) {
+        for (Map.Entry<String, Set<String>> cacheEntry : cachedData.entrySet()) {
+          // No persistence / stale data in store
+          if (persistedData == null || persistedData.isEmpty() ||
+            !persistedData.containsKey(cacheEntry.getKey()) ||
+            !persistedData.get(cacheEntry.getKey()).containsAll(cacheEntry.getValue())) {
+            dataToSync.put(cacheEntry.getKey(), cacheEntry.getValue());
+          }
+        }
+        try {
+          cacheManager.persistHostedInstanceMetadata(dataToSync);
+          cacheManager.markSuccessOnSyncHostedInstanceMetadata();
+
+        } catch (SQLException e) {
+          LOG.warn("Error persisting hosted apps metadata.", e);
+        }
+      }
+
+    }
+  }
+  /**
    * Read all hosted apps metadata and update cached values - HA
    */
   private void refreshHostAppsMetadata() {
@@ -166,4 +205,22 @@
       }
     }
   }
+
+  private void refreshHostedInstancesMetadata() {
+    Map<String, Set<String>> hostedInstancesFromStore = null;
+    try {
+      hostedInstancesFromStore = cacheManager.getHostedInstancesFromStore();
+    } catch (SQLException e) {
+      LOG.warn("Error refreshing metadata from store.", e);
+    }
+    if (hostedInstancesFromStore != null) {
+      Map<String, Set<String>> cachedData = cacheManager.getHostedInstanceCache();
+
+      for (Map.Entry<String, Set<String>> storeEntry : hostedInstancesFromStore.entrySet()) {
+        if (!cachedData.containsKey(storeEntry.getKey())) {
+          cachedData.put(storeEntry.getKey(), storeEntry.getValue());
+        }
+      }
+    }
+  }
 }
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/PhoenixTransactSQL.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/PhoenixTransactSQL.java
index 0c8e5a7..d39230d 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/PhoenixTransactSQL.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/PhoenixTransactSQL.java
@@ -148,6 +148,12 @@
       "CONSTRAINT pk PRIMARY KEY (HOSTNAME))" +
       "DATA_BLOCK_ENCODING='%s', COMPRESSION='%s'";
 
+  public static final String CREATE_INSTANCE_HOST_TABLE_SQL =
+    "CREATE TABLE IF NOT EXISTS INSTANCE_HOST_METADATA " +
+      "(INSTANCE_ID VARCHAR, HOSTNAME VARCHAR, " +
+      "CONSTRAINT pk PRIMARY KEY (INSTANCE_ID, HOSTNAME))" +
+      "DATA_BLOCK_ENCODING='%s', COMPRESSION='%s'";
+
   public static final String ALTER_METRICS_METADATA_TABLE =
     "ALTER TABLE METRICS_METADATA ADD IF NOT EXISTS IS_WHITELISTED BOOLEAN";
 
@@ -230,6 +236,9 @@
   public static final String UPSERT_HOSTED_APPS_METADATA_SQL =
     "UPSERT INTO HOSTED_APPS_METADATA (HOSTNAME, APP_IDS) VALUES (?, ?)";
 
+  public static final String UPSERT_INSTANCE_HOST_METADATA_SQL =
+    "UPSERT INTO INSTANCE_HOST_METADATA (INSTANCE_ID, HOSTNAME) VALUES (?, ?)";
+
   /**
    * Retrieve a set of rows from metrics records table.
    */
@@ -309,6 +318,9 @@
   public static final String GET_HOSTED_APPS_METADATA_SQL = "SELECT " +
     "HOSTNAME, APP_IDS FROM HOSTED_APPS_METADATA";
 
+  public static final String GET_INSTANCE_HOST_METADATA_SQL = "SELECT " +
+    "INSTANCE_ID, HOSTNAME FROM INSTANCE_HOST_METADATA";
+
   /**
    * Aggregate host metrics using a GROUP BY clause to take advantage of
    * N - way parallel scan where N = number of regions.
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TimelineWebServices.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TimelineWebServices.java
index 304a8e0..50cfb08 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TimelineWebServices.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TimelineWebServices.java
@@ -25,6 +25,7 @@
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.sink.timeline.AggregationResult;
 import org.apache.hadoop.metrics2.sink.timeline.ContainerMetric;
 import org.apache.hadoop.metrics2.sink.timeline.PrecisionLimitExceededException;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetricMetadata;
@@ -285,6 +286,36 @@
     }
   }
 
+  /**
+   * Store the given metrics into the timeline store, and return errors that
+   * happened during storing.
+   */
+  @Path("/metrics/aggregated")
+  @POST
+  @Consumes({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
+  public TimelinePutResponse postAggregatedMetrics(
+    @Context HttpServletRequest req,
+    @Context HttpServletResponse res,
+    AggregationResult metrics) {
+
+    init(res);
+    if (metrics == null) {
+      return new TimelinePutResponse();
+    }
+
+    try {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Storing aggregated metrics: " +
+                TimelineUtils.dumpTimelineRecordtoJSON(metrics, true));
+      }
+
+      return timelineMetricStore.putHostAggregatedMetrics(metrics);
+    } catch (Exception e) {
+      LOG.error("Error saving metrics.", e);
+      throw new WebApplicationException(e, Response.Status.INTERNAL_SERVER_ERROR);
+    }
+  }
+
   @Path("/containermetrics")
   @POST
   @Consumes({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
@@ -412,6 +443,24 @@
     }
   }
 
+  @GET
+  @Path("/metrics/instances")
+  @Produces({ MediaType.APPLICATION_JSON })
+  public Map<String, Map<String, Set<String>>> getClusterHostsMetadata(
+    @Context HttpServletRequest req,
+    @Context HttpServletResponse res,
+    @QueryParam("appId") String appId,
+    @QueryParam("instanceId") String instanceId
+  ) {
+    init(res);
+
+    try {
+      return timelineMetricStore.getInstanceHostsMetadata(instanceId, appId);
+    } catch (Exception e) {
+      throw new WebApplicationException(e, Response.Status.INTERNAL_SERVER_ERROR);
+    }
+  }
+
   /**
    * This is a discovery endpoint that advertises known live collector
    * instances. Note: It will always answer with current instance as live.
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java
index 0087fd9..d5baaef 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java
@@ -26,12 +26,12 @@
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.metrics2.sink.timeline.ContainerMetric;
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.Precision;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.Function;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricClusterAggregate;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetric;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricAggregator;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricAggregatorFactory;
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricTestHelper.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricTestHelper.java
index 37ec134..7eeb9c4 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricTestHelper.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricTestHelper.java
@@ -17,9 +17,9 @@
  */
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
 
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetric;
 
 import java.util.Arrays;
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessorTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessorTest.java
index a910cc2..d668178 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessorTest.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessorTest.java
@@ -22,11 +22,11 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.Precision;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricClusterAggregate;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetric;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.Function;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.Condition;
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestMetricHostAggregate.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestMetricHostAggregate.java
index 44f48e8..3009163 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestMetricHostAggregate.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestMetricHostAggregate.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics
   .timeline;
 
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.junit.Test;
 
 import static org.assertj.core.api.Assertions.assertThat;
@@ -34,7 +34,7 @@
     assertThat(aggregate.getSum()).isEqualTo(3.0);
     assertThat(aggregate.getMin()).isEqualTo(1.0);
     assertThat(aggregate.getMax()).isEqualTo(2.0);
-    assertThat(aggregate.getAvg()).isEqualTo(3.0 / 2);
+    assertThat(aggregate.calculateAverage()).isEqualTo(3.0 / 2);
   }
 
   @Test
@@ -50,7 +50,7 @@
     assertThat(aggregate.getSum()).isEqualTo(12.0);
     assertThat(aggregate.getMin()).isEqualTo(0.5);
     assertThat(aggregate.getMax()).isEqualTo(7.5);
-    assertThat(aggregate.getAvg()).isEqualTo((3.0 + 8.0 + 1.0) / 5);
+    assertThat(aggregate.calculateAverage()).isEqualTo((3.0 + 8.0 + 1.0) / 5);
   }
 
   static MetricHostAggregate createAggregate (Double sum, Double min,
@@ -63,4 +63,4 @@
     aggregate.setNumberOfSamples(samplesCount);
     return aggregate;
   }
-}
\ No newline at end of file
+}
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
index b2e8cac..ac2f9d7 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
 
+import org.apache.hadoop.metrics2.sink.timeline.AggregationResult;
 import org.apache.hadoop.metrics2.sink.timeline.ContainerMetric;
 import org.apache.hadoop.metrics2.sink.timeline.Precision;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
@@ -92,11 +93,21 @@
   }
 
   @Override
+  public TimelinePutResponse putHostAggregatedMetrics(AggregationResult aggregationResult) throws SQLException, IOException {
+    return null;
+  }
+
+  @Override
   public Map<String, Set<String>> getHostAppsMetadata() throws SQLException, IOException {
     return Collections.emptyMap();
   }
 
   @Override
+  public Map<String, Map<String,Set<String>>> getInstanceHostsMetadata(String instanceId, String appId) throws SQLException, IOException {
+    return Collections.emptyMap();
+  }
+
+  @Override
   public List<String> getLiveInstances() {
     return Collections.emptyList();
   }
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorMemorySink.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorMemorySink.java
index fa0cfe9..53f6f6c 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorMemorySink.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorMemorySink.java
@@ -17,10 +17,10 @@
  */
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
 
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.Precision;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricClusterAggregate;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetric;
 
 import java.util.Collections;
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITClusterAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITClusterAggregator.java
index 590f82a..07fd85d 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITClusterAggregator.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITClusterAggregator.java
@@ -20,13 +20,13 @@
 
 import junit.framework.Assert;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.AbstractMiniHBaseClusterTest;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.MetricTestHelper;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricClusterAggregate;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetric;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricAggregator;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricAggregatorFactory;
@@ -659,14 +659,14 @@
     while (rs.next()) {
       if ("disk_used".equals(rs.getString("METRIC_NAME"))) {
         assertEquals("APP_ID", "test_app", rs.getString("APP_ID"));
-        assertEquals("METRIC_SUM", 16.0, rs.getDouble("METRIC_SUM"));
-        assertEquals("METRIC_COUNT", 8, rs.getLong("METRIC_COUNT"));
+        assertEquals("METRIC_SUM", 4.0, rs.getDouble("METRIC_SUM"));
+        assertEquals("METRIC_COUNT", 2, rs.getLong("METRIC_COUNT"));
         assertEquals("METRIC_MAX", 4.0, rs.getDouble("METRIC_MAX"));
         assertEquals("METRIC_MIN", 0.0, rs.getDouble("METRIC_MIN"));
       } else if ("disk_free".equals(rs.getString("METRIC_NAME"))) {
         assertEquals("APP_ID", "test_app", rs.getString("APP_ID"));
-        assertEquals("METRIC_SUM", 4.0, rs.getDouble("METRIC_SUM"));
-        assertEquals("METRIC_COUNT", 8, rs.getLong("METRIC_COUNT"));
+        assertEquals("METRIC_SUM", 1.0, rs.getDouble("METRIC_SUM"));
+        assertEquals("METRIC_COUNT", 2, rs.getLong("METRIC_COUNT"));
         assertEquals("METRIC_MAX", 1.0, rs.getDouble("METRIC_MAX"));
         assertEquals("METRIC_MIN", 1.0, rs.getDouble("METRIC_MIN"));
       }
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITMetricAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITMetricAggregator.java
index 9873643..75b3f91 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITMetricAggregator.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITMetricAggregator.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.AbstractMiniHBaseClusterTest;
@@ -124,14 +125,14 @@
         assertEquals(0.0, currentHostAggregate.getMin());
         assertEquals(20, currentHostAggregate.getNumberOfSamples());
         assertEquals(15.0, currentHostAggregate.getSum());
-        assertEquals(15.0 / 20, currentHostAggregate.getAvg());
+        assertEquals(15.0 / 20, currentHostAggregate.calculateAverage());
         count++;
       } else if ("mem_free".equals(currentMetric.getMetricName())) {
         assertEquals(2.0, currentHostAggregate.getMax());
         assertEquals(0.0, currentHostAggregate.getMin());
         assertEquals(20, currentHostAggregate.getNumberOfSamples());
         assertEquals(15.0, currentHostAggregate.getSum());
-        assertEquals(15.0 / 20, currentHostAggregate.getAvg());
+        assertEquals(15.0 / 20, currentHostAggregate.calculateAverage());
         count++;
       } else {
         fail("Unexpected entry");
@@ -198,7 +199,7 @@
         assertEquals(0.0, currentHostAggregate.getMin());
         assertEquals(12 * 20, currentHostAggregate.getNumberOfSamples());
         assertEquals(12 * 15.0, currentHostAggregate.getSum());
-        assertEquals(15.0 / 20, currentHostAggregate.getAvg());
+        assertEquals(15.0 / 20, currentHostAggregate.calculateAverage());
       }
     }
   }
@@ -260,7 +261,7 @@
         assertEquals(0.0, currentHostAggregate.getMin());
         assertEquals(12 * 20, currentHostAggregate.getNumberOfSamples());
         assertEquals(12 * 15.0, currentHostAggregate.getSum());
-        assertEquals(15.0 / 20, currentHostAggregate.getAvg());
+        assertEquals(15.0 / 20, currentHostAggregate.calculateAverage());
       }
     }
   }
@@ -309,14 +310,14 @@
         assertEquals(0.0, currentHostAggregate.getMin());
         assertEquals(20, currentHostAggregate.getNumberOfSamples());
         assertEquals(15.0, currentHostAggregate.getSum());
-        assertEquals(15.0 / 20, currentHostAggregate.getAvg());
+        assertEquals(15.0 / 20, currentHostAggregate.calculateAverage());
         count++;
       } else if ("mem_free".equals(currentMetric.getMetricName())) {
         assertEquals(2.0, currentHostAggregate.getMax());
         assertEquals(0.0, currentHostAggregate.getMin());
         assertEquals(20, currentHostAggregate.getNumberOfSamples());
         assertEquals(15.0, currentHostAggregate.getSum());
-        assertEquals(15.0 / 20, currentHostAggregate.getAvg());
+        assertEquals(15.0 / 20, currentHostAggregate.calculateAverage());
         count++;
       } else {
         fail("Unexpected entry");
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecondTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecondTest.java
index 78db11d..6541b2c 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecondTest.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecondTest.java
@@ -31,6 +31,7 @@
 import java.util.TreeMap;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.discovery.TimelineMetricMetadataKey;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.discovery.TimelineMetricMetadataManager;
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TestMetadataManager.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TestMetadataManager.java
index b243e0b..c62fd34 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TestMetadataManager.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TestMetadataManager.java
@@ -69,6 +69,7 @@
     metric2.setStartTime(now - 1000);
     metric2.setAppId("dummy_app2");
     metric2.setType("Integer");
+    metric2.setInstanceId("instance2");
     metric2.setMetricValues(new TreeMap<Long, Double>() {{
       put(now - 100, 1.0);
       put(now - 200, 2.0);
@@ -144,5 +145,12 @@
     Assert.assertEquals("dummy_app1", savedHostData.get("dummy_host1").iterator().next());
     Assert.assertEquals("dummy_app2", savedHostData.get("dummy_host2").iterator().next());
     Assert.assertEquals("dummy_app3", cachedHostData.get("dummy_host3").iterator().next());
+
+
+    Map<String, Set<String>> cachedHostInstanceData = metadataManager.getHostedInstanceCache();
+    Map<String, Set<String>> savedHostInstanceData = metadataManager.getHostedInstancesFromStore();
+    Assert.assertEquals(cachedHostInstanceData.size(), savedHostInstanceData.size());
+    Assert.assertEquals("dummy_host2", cachedHostInstanceData.get("instance2").iterator().next());
+
   }
 }
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TestMetadataSync.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TestMetadataSync.java
index 5eab903..181abca 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TestMetadataSync.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TestMetadataSync.java
@@ -56,9 +56,15 @@
       put("h2", new HashSet<>(Arrays.asList("a1", "a2")));
     }};
 
+    Map<String, Set<String>> hostedInstances = new HashMap<String, Set<String>>() {{
+      put("i1", new HashSet<>(Arrays.asList("h1")));
+      put("i2", new HashSet<>(Arrays.asList("h1", "h2")));
+    }};
+
     expect(configuration.get("timeline.metrics.service.operation.mode", "")).andReturn("distributed");
     expect(hBaseAccessor.getTimelineMetricMetadata()).andReturn(metadata);
     expect(hBaseAccessor.getHostedAppsMetadata()).andReturn(hostedApps);
+    expect(hBaseAccessor.getInstanceHostsMetdata()).andReturn(hostedInstances);
 
     replay(configuration, hBaseAccessor);
 
@@ -80,6 +86,12 @@
     Assert.assertEquals(2, hostedApps.size());
     Assert.assertEquals(1, hostedApps.get("h1").size());
     Assert.assertEquals(2, hostedApps.get("h2").size());
+
+    hostedInstances = metadataManager.getHostedInstanceCache();
+    Assert.assertEquals(2, hostedInstances.size());
+    Assert.assertEquals(1, hostedInstances.get("i1").size());
+    Assert.assertEquals(2, hostedInstances.get("i2").size());
+
   }
 
   @Test
diff --git a/ambari-metrics/pom.xml b/ambari-metrics/pom.xml
index 2d88912..02f9574 100644
--- a/ambari-metrics/pom.xml
+++ b/ambari-metrics/pom.xml
@@ -33,6 +33,7 @@
     <module>ambari-metrics-host-monitoring</module>
     <module>ambari-metrics-grafana</module>
     <module>ambari-metrics-assembly</module>
+    <module>ambari-metrics-host-aggregator</module>
   </modules>
   <properties>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
diff --git a/ambari-server/conf/unix/install-helper.sh b/ambari-server/conf/unix/install-helper.sh
index 8def27a..6d7b3c7 100644
--- a/ambari-server/conf/unix/install-helper.sh
+++ b/ambari-server/conf/unix/install-helper.sh
@@ -33,6 +33,9 @@
 SIMPLEJSON_SERVER_DIR="${ROOT}/usr/lib/ambari-server/lib/ambari_simplejson"
 AMBARI_PROPERTIES="${ROOT}/etc/ambari-server/conf/ambari.properties"
 AMBARI_ENV_RPMSAVE="${ROOT}/var/lib/ambari-server/ambari-env.sh.rpmsave" # this turns into ambari-env.sh during ambari-server start
+AMBARI_SERVER_KEYS_FOLDER="${ROOT}/var/lib/ambari-server/keys"
+AMBARI_SERVER_KEYS_DB_FOLDER="${ROOT}/var/lib/ambari-server/keys/db"
+AMBARI_SERVER_NEWCERTS_FOLDER="${ROOT}/var/lib/ambari-server/keys/db/newcerts"
 
 PYTHON_WRAPER_DIR="${ROOT}/usr/bin/"
 PYTHON_WRAPER_TARGET="${PYTHON_WRAPER_DIR}/ambari-python-wrap"
@@ -83,7 +86,7 @@
   rm -f "$PYTHON_WRAPER_TARGET"
 
   AMBARI_PYTHON=""
-  python_binaries=( "/usr/bin/python" "/usr/bin/python2" "/usr/bin/python2.7", "/usr/bin/python2.6" )
+  python_binaries=( "/usr/bin/python" "/usr/bin/python2" "/usr/bin/python2.7" "/usr/bin/python2.6" )
   for python_binary in "${python_binaries[@]}"
   do
     $python_binary -c "import sys ; ver = sys.version_info ; sys.exit(not (ver >= (2,6) and ver<(3,0)))" 1>/dev/null 2>/dev/null
@@ -126,6 +129,20 @@
 	$AUTOSTART_SERVER_CMD
   fi
 
+  if [ -d "$AMBARI_SERVER_KEYS_FOLDER" ]
+  then
+      chmod 700 "$AMBARI_SERVER_KEYS_FOLDER"
+      if [ -d "$AMBARI_SERVER_KEYS_DB_FOLDER" ]
+      then
+          chmod 700 "$AMBARI_SERVER_KEYS_DB_FOLDER"
+          if [ -d "$AMBARI_SERVER_NEWCERTS_FOLDER" ]
+          then
+              chmod 700 "$AMBARI_SERVER_NEWCERTS_FOLDER"
+
+          fi
+      fi
+  fi
+
   if [ -f "$AMBARI_ENV_RPMSAVE" ] ; then
     PYTHON_PATH_LINE='export PYTHONPATH=$PYTHONPATH:/usr/lib/python2.6/site-packages'
     grep "^$PYTHON_PATH_LINE\$" "$AMBARI_ENV_RPMSAVE" > /dev/null
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index ba97f88..c0aed7b 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -322,7 +322,6 @@
             <exclude>pass.txt</exclude>
             <exclude>src/test/resources/version</exclude>
             <exclude>src/test/resources/users.ldif</exclude>
-            <exclude>src/test/resources/gsInstaller-hosts.txt</exclude>
             <exclude>src/test/resources/temporal_ganglia_data.txt</exclude>
             <exclude>src/test/resources/users.ldif</exclude>
             <exclude>src/test/resources/mpacks_replay.log</exclude>
@@ -775,6 +774,19 @@
               <goal>exec</goal>
             </goals>
           </execution>
+          <execution>
+            <configuration>
+              <executable>${project.basedir}/src/main/sh/azuredb_create_generator.sh</executable>
+              <arguments>
+                <argument>${project.basedir}</argument>
+              </arguments>
+            </configuration>
+            <id>azuredb-gen</id>
+            <phase>generate-resources</phase>
+            <goals>
+              <goal>exec</goal>
+            </goals>
+          </execution>
         </executions>
       </plugin>
       <plugin>
@@ -1118,7 +1130,7 @@
     <dependency>
       <groupId>commons-io</groupId>
       <artifactId>commons-io</artifactId>
-      <version>1.4</version>
+      <version>2.4</version>
     </dependency>
     <dependency>
       <groupId>org.apache.commons</groupId>
diff --git a/ambari-server/src/main/assemblies/server.xml b/ambari-server/src/main/assemblies/server.xml
index 43053fb..53e65fd 100644
--- a/ambari-server/src/main/assemblies/server.xml
+++ b/ambari-server/src/main/assemblies/server.xml
@@ -177,6 +177,7 @@
       <excludes>
 	    <exclude>/2.1.GlusterFS/services/STORM/package/files/wordCount.jar</exclude>
         <exclude>/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar</exclude>
+        <exclude>/3.0/hooks/before-START/files/fast-hdfs-resource.jar</exclude>
 	  </excludes>
     </fileSet>
     <fileSet>
@@ -186,6 +187,7 @@
       <includes>
 	    <include>/2.1.GlusterFS/services/STORM/package/files/wordCount.jar</include>
         <include>/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar</include>
+        <include>/3.0/hooks/before-START/files/fast-hdfs-resource.jar</include>
 	  </includes>
     </fileSet>
     <fileSet>
@@ -334,6 +336,11 @@
     </file>
     <file>
       <fileMode>755</fileMode>
+      <source>target/classes/Ambari-DDL-AzureDB-CREATE.sql</source>
+      <outputDirectory>/var/lib/ambari-server/resources</outputDirectory>
+    </file>
+    <file>
+      <fileMode>755</fileMode>
       <source>target/classes/Ambari-DDL-SQLServer-CREATE.sql</source>
       <outputDirectory>/var/lib/ambari-server/resources</outputDirectory>
     </file>
diff --git a/ambari-server/src/main/java/org/apache/ambari/annotations/TransactionalLock.java b/ambari-server/src/main/java/org/apache/ambari/annotations/TransactionalLock.java
index a8b89a9..7c8485c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/annotations/TransactionalLock.java
+++ b/ambari-server/src/main/java/org/apache/ambari/annotations/TransactionalLock.java
@@ -60,7 +60,7 @@
    * The area that the lock is being applied to. There is exactly 1
    * {@link ReadWriteLock} for every area defined.
    */
-  public enum LockArea {
+  enum LockArea {
     /**
      * Joinpoint lock around work performed on caching the host role command
      * status in a given stage and request.
@@ -89,7 +89,7 @@
      *
      * @param configurationProperty
      */
-    private LockArea(String configurationProperty) {
+    LockArea(String configurationProperty) {
       m_configurationProperty = configurationProperty;
     }
 
@@ -134,7 +134,7 @@
   /**
    * The type of lock which should be acquired.
    */
-  public enum LockType {
+  enum LockType {
     /**
      * Read Lock.
      */
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessor.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessor.java
index b0550c0..47a74b2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessor.java
@@ -32,12 +32,12 @@
   /**
    * Given an action id of the form requestId-stageId, retrieve the Stage
    */
-  public Stage getStage(String actionId);
+  Stage getStage(String actionId);
 
   /**
    * Get all stages associated with a single request id
    */
-  public List<Stage> getAllStages(long requestId);
+  List<Stage> getAllStages(long requestId);
 
   /**
    * Gets the request entity by id.  Will not load the entire
@@ -62,12 +62,12 @@
    *
    * Returns the list of the aborted operations.
    */
-  public Collection<HostRoleCommandEntity> abortOperation(long requestId);
+  Collection<HostRoleCommandEntity> abortOperation(long requestId);
 
   /**
    * Mark the task as to have timed out
    */
-  public void timeoutHostRole(String host, long requestId, long stageId, String role);
+  void timeoutHostRole(String host, long requestId, long stageId, String role);
 
   /**
    * Mark the task as to have timed out
@@ -88,7 +88,7 @@
    *
    * @see HostRoleStatus#IN_PROGRESS_STATUSES
    */
-  public List<Stage> getFirstStageInProgressPerRequest();
+  List<Stage> getFirstStageInProgressPerRequest();
 
   /**
    * Returns all the pending stages in a request, including queued and not-queued. A stage is
@@ -101,14 +101,14 @@
    *
    * @see HostRoleStatus#IN_PROGRESS_STATUSES
    */
-  public List<Stage> getStagesInProgressForRequest(Long requestId);
+  List<Stage> getStagesInProgressForRequest(Long requestId);
 
   /**
    * Gets the number of commands in progress.
    *
    * @return the number of commands in progress.
    */
-  public int getCommandsInProgressCount();
+  int getCommandsInProgressCount();
 
   /**
    * Persists all tasks for a given request
@@ -135,19 +135,19 @@
   /**
    * For the given host, update all the tasks based on the command report
    */
-  public void updateHostRoleState(String hostname, long requestId,
-                                  long stageId, String role, CommandReport report);
+  void updateHostRoleState(String hostname, long requestId,
+                           long stageId, String role, CommandReport report);
 
   /**
    * Mark the task as to have been aborted
    */
-  public void abortHostRole(String host, long requestId, long stageId, String role);
+  void abortHostRole(String host, long requestId, long stageId, String role);
 
   /**
    * Mark the task as to have been aborted. Reason should be specified manually.
    */
-  public void abortHostRole(String host, long requestId, long stageId,
-                            String role, String reason);
+  void abortHostRole(String host, long requestId, long stageId,
+                     String role, String reason);
 
   /**
    * Return the last persisted Request ID as seen when the DBAccessor object
@@ -156,7 +156,7 @@
    *
    * @return Request Id seen at init time
    */
-  public long getLastPersistedRequestIdWhenInitialized();
+  long getLastPersistedRequestIdWhenInitialized();
 
   /**
    * Bulk update scheduled commands
@@ -171,37 +171,37 @@
   /**
    * Updates scheduled stage.
    */
-  public void hostRoleScheduled(Stage s, String hostname, String roleStr);
+  void hostRoleScheduled(Stage s, String hostname, String roleStr);
 
   /**
    * Given a request id, get all the tasks that belong to this request
    */
-  public List<HostRoleCommand> getRequestTasks(long requestId);
+  List<HostRoleCommand> getRequestTasks(long requestId);
 
   /**
    * Given a list of request ids, get all the tasks that belong to these requests
    */
-  public List<HostRoleCommand> getAllTasksByRequestIds(Collection<Long> requestIds);
+  List<HostRoleCommand> getAllTasksByRequestIds(Collection<Long> requestIds);
 
   /**
    * Given a list of task ids, get all the host role commands
    */
-  public Collection<HostRoleCommand> getTasks(Collection<Long> taskIds);
+  Collection<HostRoleCommand> getTasks(Collection<Long> taskIds);
 
   /**
    * Get a List of host role commands where the host, role and status are as specified
    */
-  public List<HostRoleCommand> getTasksByHostRoleAndStatus(String hostname, String role, HostRoleStatus status);
+  List<HostRoleCommand> getTasksByHostRoleAndStatus(String hostname, String role, HostRoleStatus status);
 
   /**
    * Get a List of host role commands where the role and status are as specified
    */
-  public List<HostRoleCommand> getTasksByRoleAndStatus(String role, HostRoleStatus status);
+  List<HostRoleCommand> getTasksByRoleAndStatus(String role, HostRoleStatus status);
 
   /**
    * Gets the host role command corresponding to the task id
    */
-  public HostRoleCommand getTask(long taskId);
+  HostRoleCommand getTask(long taskId);
 
   /**
    * Get first or last maxResults requests that are in the specified status
@@ -215,28 +215,28 @@
    * @return First or last maxResults request id's if ascOrder is true or false,
    *         respectively
    */
-  public List<Long> getRequestsByStatus(RequestStatus status, int maxResults, boolean ascOrder);
+  List<Long> getRequestsByStatus(RequestStatus status, int maxResults, boolean ascOrder);
 
   /**
    * Gets request contexts associated with the list of request id
    */
-  public Map<Long, String> getRequestContext(List<Long> requestIds);
+  Map<Long, String> getRequestContext(List<Long> requestIds);
 
   /**
    * Gets the request context associated with the request id
    */
-  public String getRequestContext(long requestId);
+  String getRequestContext(long requestId);
 
   /**
    * Gets request objects by ids
    */
-  public List<Request> getRequests(Collection<Long> requestIds);
+  List<Request> getRequests(Collection<Long> requestIds);
 
   /**
    * Resubmits a series of tasks
    * @param taskIds
    */
-  public void resubmitTasks(List<Long> taskIds);
+  void resubmitTasks(List<Long> taskIds);
 
 
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessorImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessorImpl.java
index 8c4eae8..4b7e71d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessorImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessorImpl.java
@@ -377,6 +377,7 @@
       stageDAO.create(stageEntity);
 
       List<HostRoleCommand> orderedHostRoleCommands = stage.getOrderedHostRoleCommands();
+      List<HostRoleCommandEntity> hostRoleCommandEntities = new ArrayList<>();
 
       for (HostRoleCommand hostRoleCommand : orderedHostRoleCommands) {
         hostRoleCommand.setRequestId(requestId);
@@ -384,8 +385,8 @@
         HostRoleCommandEntity hostRoleCommandEntity = hostRoleCommand.constructNewPersistenceEntity();
         hostRoleCommandEntity.setStage(stageEntity);
         hostRoleCommandDAO.create(hostRoleCommandEntity);
+        hostRoleCommandEntities.add(hostRoleCommandEntity);
 
-        assert hostRoleCommandEntity.getTaskId() != null;
         hostRoleCommand.setTaskId(hostRoleCommandEntity.getTaskId());
 
         String prefix = "";
@@ -442,6 +443,7 @@
         roleSuccessCriteriaDAO.create(roleSuccessCriteriaEntity);
       }
 
+      stageEntity.setHostRoleCommands(hostRoleCommandEntities);
       stageEntity = stageDAO.merge(stageEntity);
     }
 
@@ -516,6 +518,7 @@
     long now = System.currentTimeMillis();
 
     List<Long> requestsToCheck = new ArrayList<>();
+    List<Long> abortedCommandUpdates = new ArrayList<>();
 
     List<HostRoleCommandEntity> commandEntities = hostRoleCommandDAO.findByPKs(taskReports.keySet());
     List<HostRoleCommandEntity> commandEntitiesToMerge = new ArrayList<>();
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionManager.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionManager.java
index 398bc9d..13cdce1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionManager.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionManager.java
@@ -77,8 +77,8 @@
     scheduler.stop();
   }
 
-  public void sendActions(List<Stage> stages, ExecuteActionRequest actionRequest) throws AmbariException {
-    Request request = requestFactory.createNewFromStages(stages, actionRequest);
+  public void sendActions(List<Stage> stages, String clusterHostInfo, ExecuteActionRequest actionRequest) throws AmbariException {
+    Request request = requestFactory.createNewFromStages(stages, clusterHostInfo, actionRequest);
     sendActions(request, actionRequest);
   }
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionScheduler.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionScheduler.java
index 316f2bd..d3157e2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionScheduler.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionScheduler.java
@@ -465,9 +465,10 @@
 
         //Schedule what we have so far
 
+
         for (ExecutionCommand cmd : commandsToSchedule) {
           ConfigHelper.processHiddenAttribute(cmd.getConfigurations(), cmd.getConfigurationAttributes(), cmd.getRole(), false);
-          processHostRole(stage, cmd, commandsToStart, commandsToUpdate);
+          processHostRole(request, stage, cmd, commandsToStart, commandsToUpdate);
         }
 
         LOG.debug("==> Commands to start: {}", commandsToStart.size());
@@ -1090,7 +1091,7 @@
     return serviceEventMap;
   }
 
-  private void processHostRole(Stage s, ExecutionCommand cmd, List<ExecutionCommand> commandsToStart,
+  private void processHostRole(RequestEntity r, Stage s, ExecutionCommand cmd, List<ExecutionCommand> commandsToStart,
                                List<ExecutionCommand> commandsToUpdate)
     throws AmbariException {
     long now = System.currentTimeMillis();
@@ -1106,23 +1107,23 @@
     }
     s.setLastAttemptTime(hostname, roleStr, now);
     s.incrementAttemptCount(hostname, roleStr);
-    /** change the hostname in the command for the host itself **/
-    cmd.setHostname(hostsMap.getHostMap(hostname));
 
 
-    //Try to get clusterHostInfo from cache
+    String requestPK = r.getRequestId().toString();
     String stagePk = s.getStageId() + "-" + s.getRequestId();
-    Map<String, Set<String>> clusterHostInfo = clusterHostInfoCache.getIfPresent(stagePk);
+
+    // Try to get clusterHostInfo from cache
+    Map<String, Set<String>> clusterHostInfo = clusterHostInfoCache.getIfPresent(requestPK);
 
     if (clusterHostInfo == null) {
       Type type = new TypeToken<Map<String, Set<String>>>() {}.getType();
-      clusterHostInfo = StageUtils.getGson().fromJson(s.getClusterHostInfo(), type);
-      clusterHostInfoCache.put(stagePk, clusterHostInfo);
+      clusterHostInfo = StageUtils.getGson().fromJson(r.getClusterHostInfo(), type);
+      clusterHostInfoCache.put(requestPK, clusterHostInfo);
     }
 
     cmd.setClusterHostInfo(clusterHostInfo);
 
-    //Try to get commandParams from cache and merge them with command-level parameters
+    // Try to get commandParams from cache and merge them with command-level parameters
     Map<String, String> commandParams = commandParamsStageCache.getIfPresent(stagePk);
 
     if (commandParams == null){
@@ -1143,10 +1144,10 @@
         }
       }
     } catch (ClusterNotFoundException cnfe) {
-      //NOP
+      // NOP
     }
 
-    //Try to get hostParams from cache and merge them with command-level parameters
+    // Try to get hostParams from cache and merge them with command-level parameters
     Map<String, String> hostParams = hostParamsStageCache.getIfPresent(stagePk);
     if (hostParams == null) {
       Type type = new TypeToken<Map<String, String>>() {}.getType();
@@ -1157,6 +1158,8 @@
     hostParamsCmd.putAll(hostParams);
     cmd.setHostLevelParams(hostParamsCmd);
 
+    // change the hostname in the command for the host itself
+    cmd.setHostname(hostsMap.getHostMap(hostname));
 
     commandsToUpdate.add(cmd);
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/HostRoleCommand.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/HostRoleCommand.java
index 87a6edf..bd354d9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/HostRoleCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/HostRoleCommand.java
@@ -69,6 +69,7 @@
   private String customCommandName;
   private ExecutionCommandWrapper executionCommandWrapper;
   private boolean isBackgroundCommand = false;
+  private String opsDisplayName;
 
   @Inject
   private ExecutionCommandDAO executionCommandDAO;
@@ -179,6 +180,7 @@
     roleCommand = hostRoleCommandEntity.getRoleCommand();
     event = new ServiceComponentHostEventWrapper(hostRoleCommandEntity.getEvent());
     commandDetail = hostRoleCommandEntity.getCommandDetail();
+    opsDisplayName = hostRoleCommandEntity.getOpsDisplayName();
     customCommandName = hostRoleCommandEntity.getCustomCommandName();
     isBackgroundCommand = hostRoleCommandEntity.isBackgroundCommand();
   }
@@ -202,6 +204,7 @@
     hostRoleCommandEntity.setAutoSkipOnFailure(autoSkipFailure);
     hostRoleCommandEntity.setRoleCommand(roleCommand);
     hostRoleCommandEntity.setCommandDetail(commandDetail);
+    hostRoleCommandEntity.setOpsDisplayName(opsDisplayName);
     hostRoleCommandEntity.setCustomCommandName(customCommandName);
     hostRoleCommandEntity.setBackgroundCommand(isBackgroundCommand);
 
@@ -294,6 +297,13 @@
     this.commandDetail = commandDetail;
   }
 
+  public String getOpsDisplayName() {
+    return opsDisplayName;
+  }
+
+  public void setOpsDisplayName(String opsDisplayName) {
+    this.opsDisplayName = opsDisplayName;
+  }
   public String getCustomCommandName() {
     return customCommandName;
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Request.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Request.java
index 10e0d57..baf67fe 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Request.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Request.java
@@ -58,6 +58,7 @@
   private long createTime;
   private long startTime;
   private long endTime;
+  private String clusterHostInfo;
 
   /**
    * If true, this request can not be executed in parallel with any another
@@ -94,6 +95,7 @@
     this.startTime = -1;
     this.endTime = -1;
     this.exclusive = false;
+    this.clusterHostInfo = "{}";
 
     if (-1L != this.clusterId) {
       try {
@@ -110,7 +112,7 @@
    * Construct new entity from stages provided
    */
   //TODO remove when not needed
-  public Request(@Assisted Collection<Stage> stages, Clusters clusters){
+  public Request(@Assisted Collection<Stage> stages, @Assisted String clusterHostInfo, Clusters clusters){
     if (stages != null && !stages.isEmpty()) {
       this.stages.addAll(stages);
       Stage stage = stages.iterator().next();
@@ -129,6 +131,7 @@
       this.createTime = System.currentTimeMillis();
       this.startTime = -1;
       this.endTime = -1;
+      this.clusterHostInfo = clusterHostInfo;
       this.requestType = RequestType.INTERNAL_REQUEST;
       this.exclusive = false;
     } else {
@@ -143,9 +146,9 @@
    * Construct new entity from stages provided
    */
   //TODO remove when not needed
-  public Request(@Assisted Collection<Stage> stages, @Assisted ExecuteActionRequest actionRequest,
+  public Request(@Assisted Collection<Stage> stages, @Assisted String clusterHostInfo, @Assisted ExecuteActionRequest actionRequest,
                  Clusters clusters, Gson gson) throws AmbariException {
-    this(stages, clusters);
+    this(stages, clusterHostInfo, clusters);
     if (actionRequest != null) {
       this.resourceFilters = actionRequest.getResourceFilters();
       this.operationLevel = actionRequest.getOperationLevel();
@@ -183,6 +186,7 @@
     this.exclusive = entity.isExclusive();
     this.requestContext = entity.getRequestContext();
     this.inputs = entity.getInputs();
+    this.clusterHostInfo = entity.getClusterHostInfo();
 
     this.requestType = entity.getRequestType();
     this.commandName = entity.getCommandName();
@@ -245,6 +249,7 @@
     requestEntity.setRequestScheduleId(requestScheduleId);
     requestEntity.setStatus(status);
     requestEntity.setDisplayStatus(displayStatus);
+    requestEntity.setClusterHostInfo(clusterHostInfo);
     //TODO set all fields
 
     if (resourceFilters != null) {
@@ -281,6 +286,13 @@
     return requestEntity;
   }
 
+  public String getClusterHostInfo() {
+    return clusterHostInfo;
+  }
+
+  public void setClusterHostInfo(String clusterHostInfo) {
+    this.clusterHostInfo = clusterHostInfo;
+  }
 
   public Long getClusterId() {
     return Long.valueOf(clusterId);
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/RequestFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/RequestFactory.java
index bc0223c..8a22796 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/RequestFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/RequestFactory.java
@@ -30,9 +30,9 @@
 
   Request createNew(long requestId, @Assisted("clusterId") Long clusterName) throws AmbariException;
 
-  Request createNewFromStages(Collection<Stage> stages);
+  Request createNewFromStages(Collection<Stage> stages, String clusterHostInfo);
 
-  Request createNewFromStages(Collection<Stage> stages, ExecuteActionRequest actionRequest);
+  Request createNewFromStages(Collection<Stage> stages, String clusterHostInfo, ExecuteActionRequest actionRequest);
 
   Request createExisting(RequestEntity entity);
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java
index a46e6a2..5295536 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java
@@ -76,7 +76,6 @@
   private final String requestContext;
   private HostRoleStatus status = HostRoleStatus.PENDING;
   private HostRoleStatus displayStatus = HostRoleStatus.PENDING;
-  private String clusterHostInfo;
   private String commandParamsStage;
   private String hostParamsStage;
 
@@ -110,7 +109,6 @@
       @Assisted("clusterName") @Nullable String clusterName,
       @Assisted("clusterId") long clusterId,
       @Assisted("requestContext") @Nullable String requestContext,
-      @Assisted("clusterHostInfo") String clusterHostInfo,
       @Assisted("commandParamsStage") String commandParamsStage,
       @Assisted("hostParamsStage") String hostParamsStage,
       HostRoleCommandFactory hostRoleCommandFactory, ExecutionCommandWrapperFactory ecwFactory) {
@@ -120,7 +118,6 @@
     this.clusterName = clusterName;
     this.clusterId = clusterId;
     this.requestContext = requestContext == null ? "" : requestContext;
-    this.clusterHostInfo = clusterHostInfo;
     this.commandParamsStage = commandParamsStage;
     this.hostParamsStage = hostParamsStage;
 
@@ -155,7 +152,6 @@
     }
 
     requestContext = stageEntity.getRequestContext();
-    clusterHostInfo = stageEntity.getClusterHostInfo();
     commandParamsStage = stageEntity.getCommandParamsStage();
     hostParamsStage = stageEntity.getHostParamsStage();
     commandExecutionType = stageEntity.getCommandExecutionType();
@@ -197,7 +193,6 @@
     stageEntity.setRequestContext(requestContext);
     stageEntity.setHostRoleCommands(new ArrayList<HostRoleCommandEntity>());
     stageEntity.setRoleSuccessCriterias(new ArrayList<RoleSuccessCriteriaEntity>());
-    stageEntity.setClusterHostInfo(clusterHostInfo);
     stageEntity.setCommandParamsStage(commandParamsStage);
     stageEntity.setHostParamsStage(hostParamsStage);
     stageEntity.setCommandExecutionType(commandExecutionType);
@@ -264,14 +259,6 @@
     return commandsToScheduleSet;
   }
 
-  public String getClusterHostInfo() {
-    return clusterHostInfo;
-  }
-
-  public void setClusterHostInfo(String clusterHostInfo) {
-    this.clusterHostInfo = clusterHostInfo;
-  }
-
   public String getCommandParamsStage() {
     return commandParamsStage;
   }
@@ -811,7 +798,7 @@
   /**
    * This method should be used only in stage planner. To add
    * a new execution command use
-   * {@link #addHostRoleExecutionCommand(String, org.apache.ambari.server.Role, org.apache.ambari.server.RoleCommand, org.apache.ambari.server.state.ServiceComponentHostEvent, String, String, boolean)}
+   * {@link #addHostRoleExecutionCommand(String, Role, RoleCommand, ServiceComponentHostEvent, String, String, boolean, boolean)}
    * @param origStage the stage
    * @param hostname  the hostname; {@code null} for a server-side stage
    * @param r         the role
@@ -935,7 +922,6 @@
     builder.append("clusterName=").append(clusterName).append("\n");
     builder.append("logDir=").append(logDir).append("\n");
     builder.append("requestContext=").append(requestContext).append("\n");
-    builder.append("clusterHostInfo=").append(clusterHostInfo).append("\n");
     builder.append("commandParamsStage=").append(commandParamsStage).append("\n");
     builder.append("hostParamsStage=").append(hostParamsStage).append("\n");
     builder.append("status=").append(status).append("\n");
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/StageFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/StageFactory.java
index a88558c..0d1a326 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/StageFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/StageFactory.java
@@ -29,7 +29,6 @@
       @Assisted("clusterName") String clusterName,
       @Assisted("clusterId") long clusterId,
       @Assisted("requestContext") String requestContext,
-      @Assisted("clusterHostInfo") String clusterHostInfo,
       @Assisted("commandParamsStage") String commandParamsStage,
       @Assisted("hostParamsStage") String hostParamsStage);
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/StageFactoryImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/StageFactoryImpl.java
index 3cad82d..0827639 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/StageFactoryImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/StageFactoryImpl.java
@@ -43,7 +43,6 @@
    * @param clusterName Cluster name
    * @param clusterId Cluster ID
    * @param requestContext Information about the context of the request
-   * @param clusterHostInfo Information about the host
    * @param commandParamsStage Information about the command parameters
    * @param hostParamsStage Information about the host parameters for the stage
    * @return An instance of a Stage with the provided params.
@@ -54,10 +53,9 @@
                          @Assisted("clusterName") String clusterName,
                          @Assisted("clusterId") long clusterId,
                          @Assisted("requestContext") String requestContext,
-                         @Assisted("clusterHostInfo") String clusterHostInfo,
                          @Assisted("commandParamsStage") String commandParamsStage,
                          @Assisted("hostParamsStage") String hostParamsStage) {
-    return new Stage(requestId, logDir, clusterName, clusterId, requestContext, clusterHostInfo, commandParamsStage, hostParamsStage,
+    return new Stage(requestId, logDir, clusterName, clusterId, requestContext, commandParamsStage, hostParamsStage,
         injector.getInstance(HostRoleCommandFactory.class),
         injector.getInstance(ExecutionCommandWrapperFactory.class));
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/ComponentStatus.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/ComponentStatus.java
index 5591ae8..68e1734 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/ComponentStatus.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/ComponentStatus.java
@@ -28,12 +28,6 @@
   private String msg;
   private String status;
 
-  /**
-   * A String declaring the component's security state
-   *
-   * @see org.apache.ambari.server.state.SecurityState
-   */
-  private String securityState;
   private String sendExecCmdDet = "False";
 
   private String serviceName;
@@ -74,26 +68,6 @@
     this.status = status;
   }
 
-  /**
-   * Gets the relevant component's security state.
-   *
-   * @return a String declaring this component's security state
-   * @see org.apache.ambari.server.state.SecurityState
-   */
-  public String getSecurityState() {
-    return securityState;
-  }
-
-  /**
-   * Sets the relevant component's security state.
-   *
-   * @param securityState a String declaring this component's security state
-   * @see org.apache.ambari.server.state.SecurityState
-   */
-  public void setSecurityState(String securityState) {
-    this.securityState = securityState;
-  }
-
   public String getStackVersion() {
     return stackVersion;
   }
@@ -158,7 +132,7 @@
   @Override
   public String toString() {
     return "ComponentStatus [componentName=" + componentName + ", msg=" + msg
-        + ", status=" + status + ", securityState=" + securityState
+        + ", status=" + status
         + ", serviceName=" + serviceName + ", clusterName=" + clusterName
         + ", stackVersion=" + stackVersion + ", configurationTags="
         + configurationTags + ", extra=" + extra + "]";
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
index 0d1ef13..8c726a0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
@@ -398,7 +398,7 @@
    * Contains key name strings. These strings are used inside maps
    * incapsulated inside command.
    */
-  public static interface KeyNames {
+  public interface KeyNames {
     String COMMAND_TIMEOUT = "command_timeout";
     String SCRIPT = "script";
     String SCRIPT_TYPE = "script_type";
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatResponse.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatResponse.java
index 43c484c..0cfc68d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatResponse.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatResponse.java
@@ -138,9 +138,6 @@
   /**
    * Gets the alert definition commands that contain the alert definitions for
    * each cluster that the host is a member of.
-   *
-   * @param commands
-   *          the commands, or {@code null} for none.
    */
   public List<AlertDefinitionCommand> getAlertDefinitionCommands() {
     return alertDefinitionCommands;
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
index 8cd2804..c1028dc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
@@ -56,7 +56,6 @@
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostHealthStatus;
 import org.apache.ambari.server.state.MaintenanceState;
-import org.apache.ambari.server.state.SecurityState;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentHost;
@@ -617,25 +616,6 @@
                 }
               }
 
-              SecurityState prevSecurityState = scHost.getSecurityState();
-              SecurityState currentSecurityState = SecurityState.valueOf(status.getSecurityState());
-              if((prevSecurityState != currentSecurityState)) {
-                if(prevSecurityState.isEndpoint()) {
-                  scHost.setSecurityState(currentSecurityState);
-                  LOG.info(String.format("Security of service component %s of service %s of cluster %s " +
-                          "has changed from %s to %s on host %s",
-                      componentName, status.getServiceName(), status.getClusterName(), prevSecurityState,
-                      currentSecurityState, hostname));
-                }
-                else {
-                  LOG.debug(String.format("Security of service component %s of service %s of cluster %s " +
-                          "has changed from %s to %s on host %s but will be ignored since %s is a " +
-                          "transitional state",
-                      componentName, status.getServiceName(), status.getClusterName(),
-                      prevSecurityState, currentSecurityState, hostname, prevSecurityState));
-                }
-              }
-
               if (null != status.getStackVersion() && !status.getStackVersion().isEmpty()) {
                 scHost.setStackVersion(gson.fromJson(status.getStackVersion(), StackId.class));
               }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/alerts/AlertRunnable.java b/ambari-server/src/main/java/org/apache/ambari/server/alerts/AlertRunnable.java
index ea583e4..057a273 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/alerts/AlertRunnable.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/alerts/AlertRunnable.java
@@ -27,6 +27,7 @@
 import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
 import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
 import org.apache.ambari.server.state.Alert;
+import org.apache.ambari.server.state.AlertState;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.commons.lang.math.NumberUtils;
@@ -162,4 +163,30 @@
     Number number = NumberUtils.createNumber((String) value);
     return number.intValue();
   }
+
+  /**
+   * Builds an {@link Alert} instance.
+   *
+   * @param cluster
+   *          the cluster the alert is for (not {@code null}).
+   * @param myDefinition
+   *          the alert's definition (not {@code null}).
+   * @param alertState
+   *          the state of the alert (not {@code null}).
+   * @param message
+   *          the alert text.
+   * @return and alert.
+   */
+  protected Alert buildAlert(Cluster cluster, AlertDefinitionEntity myDefinition,
+      AlertState alertState, String message) {
+    Alert alert = new Alert(myDefinition.getDefinitionName(), null, myDefinition.getServiceName(),
+        myDefinition.getComponentName(), null, alertState);
+
+    alert.setLabel(myDefinition.getLabel());
+    alert.setText(message);
+    alert.setTimestamp(System.currentTimeMillis());
+    alert.setCluster(cluster.getClusterName());
+
+    return alert;
+  }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/alerts/AmbariPerformanceRunnable.java b/ambari-server/src/main/java/org/apache/ambari/server/alerts/AmbariPerformanceRunnable.java
index 8faa2a1..a35e6fd 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/alerts/AmbariPerformanceRunnable.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/alerts/AmbariPerformanceRunnable.java
@@ -221,8 +221,8 @@
      *          the default value to use if the definition does not have a
      *          critical threshold paramter.
      */
-    private PerformanceArea(String label, String warningParameter, int defaultWarningThreshold,
-        String criticalParameter, int defaultCriticalThreshold) {
+    PerformanceArea(String label, String warningParameter, int defaultWarningThreshold,
+                    String criticalParameter, int defaultCriticalThreshold) {
       m_label = label;
       m_warningParameter = warningParameter;
       m_defaultWarningThreshold = defaultWarningThreshold;
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/alerts/ComponentVersionAlertRunnable.java b/ambari-server/src/main/java/org/apache/ambari/server/alerts/ComponentVersionAlertRunnable.java
new file mode 100644
index 0000000..7dfbe47
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/alerts/ComponentVersionAlertRunnable.java
@@ -0,0 +1,195 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.alerts;
+
+import java.text.MessageFormat;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeMap;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
+import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.orm.entities.UpgradeEntity;
+import org.apache.ambari.server.state.Alert;
+import org.apache.ambari.server.state.AlertState;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.ComponentInfo;
+import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.State;
+import org.apache.commons.lang.StringUtils;
+
+import com.google.inject.Inject;
+
+/**
+ * The {@link ComponentVersionAlertRunnable} is used to determine if the
+ * reported version of host components match what is expected. If there is a
+ * mismatch, then an alert will be triggered indicating which components are in
+ * need of attention.
+ * <p/>
+ * This alert will not run during upgrades or when the cluster is still being
+ * provisioned.
+ */
+public class ComponentVersionAlertRunnable extends AlertRunnable {
+
+  /**
+   * The message for the alert when all components are reporting correct
+   * versions.
+   */
+  private static final String ALL_COMPONENTS_CORRECT_MSG = "All components are reporting their expected versions.";
+
+  /**
+   * The message for the alert when there is an upgrade in progress.
+   */
+  private static final String UPGRADE_IN_PROGRESS_MSG = "This alert will be suspended while the upgrade to {0} is in progress.";
+
+  /**
+   * The unknown component error message.
+   */
+  private static final String UNKNOWN_COMPONENT_MSG_TEMPLATE = "Unable to retrieve component information for {0}/{1}";
+
+  /**
+   * The version mismatch message.
+   */
+  private static final String MISMATCHED_VERSIONS_MSG = "The following components are reporting unexpected versions: ";
+
+  /**
+   * The message when there is no CURRENT cluster version, but the cluster is
+   * still being setup.
+   */
+  private static final String CLUSTER_PROVISIONING_MSG = "The cluster is currently being provisioned. This alert will be skipped.";
+
+  /**
+   * The message when there is no CURRENT cluster version.
+   */
+  private static final String CLUSTER_OUT_OF_SYNC_MSG = "The cluster's CURRENT version could not be determined.";
+
+  @Inject
+  private AmbariMetaInfo m_metaInfo;
+
+  /**
+   * Constructor.
+   *
+   * @param definitionName
+   */
+  public ComponentVersionAlertRunnable(String definitionName) {
+    super(definitionName);
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  List<Alert> execute(Cluster cluster, AlertDefinitionEntity myDefinition) {
+    // if there is an upgrade in progress, then skip running this alert
+    UpgradeEntity upgrade = cluster.getUpgradeInProgress();
+    if (null != upgrade) {
+      String message = MessageFormat.format(UPGRADE_IN_PROGRESS_MSG, upgrade.getToVersion());
+
+      return Collections.singletonList(
+          buildAlert(cluster, myDefinition, AlertState.SKIPPED, message));
+    }
+
+    TreeMap<Host, Set<ServiceComponentHost>> versionMismatches = new TreeMap<>();
+    Collection<Host> hosts = cluster.getHosts();
+
+    // no cluster version is very bad ...
+    ClusterVersionEntity clusterVersionEntity = cluster.getCurrentClusterVersion();
+    if (null == clusterVersionEntity) {
+      if (cluster.getProvisioningState() == State.INIT
+          || cluster.getAllClusterVersions().size() == 1) {
+        return Collections.singletonList(
+            buildAlert(cluster, myDefinition, AlertState.SKIPPED, CLUSTER_PROVISIONING_MSG));
+      } else {
+        return Collections.singletonList(
+            buildAlert(cluster, myDefinition, AlertState.CRITICAL, CLUSTER_OUT_OF_SYNC_MSG));
+      }
+    }
+
+    RepositoryVersionEntity repositoryVersionEntity = clusterVersionEntity.getRepositoryVersion();
+    String clusterVersion = repositoryVersionEntity.getVersion();
+
+    for (Host host : hosts) {
+      List<ServiceComponentHost> hostComponents = cluster.getServiceComponentHosts(
+          host.getHostName());
+      for (ServiceComponentHost hostComponent : hostComponents) {
+        StackId desiredStackId = hostComponent.getDesiredStackVersion();
+
+        final ComponentInfo componentInfo;
+        try {
+          componentInfo = m_metaInfo.getComponent(desiredStackId.getStackName(),
+              desiredStackId.getStackVersion(), hostComponent.getServiceName(),
+              hostComponent.getServiceComponentName());
+        } catch (AmbariException ambariException) {
+          // throw an UNKNOWN response if we can't load component info
+          String message = MessageFormat.format(UNKNOWN_COMPONENT_MSG_TEMPLATE,
+              hostComponent.getServiceName(), hostComponent.getServiceComponentName());
+
+          return Collections.singletonList(
+              buildAlert(cluster, myDefinition, AlertState.UNKNOWN, message));
+        }
+
+        // skip components that don't advertise a version
+        if (!componentInfo.isVersionAdvertised()) {
+          continue;
+        }
+
+        String version = hostComponent.getVersion();
+        if (!StringUtils.equals(version, clusterVersion)) {
+          Set<ServiceComponentHost> mismatchedComponents = versionMismatches.get(host);
+          if (null == mismatchedComponents) {
+            mismatchedComponents = new HashSet<>();
+            versionMismatches.put(host, mismatchedComponents);
+          }
+
+          mismatchedComponents.add(hostComponent);
+        }
+      }
+    }
+
+    AlertState alertState = AlertState.OK;
+    String alertText = ALL_COMPONENTS_CORRECT_MSG;
+
+    // if there are any components reporting the wrong version, fire off a warning
+    if (!versionMismatches.isEmpty()) {
+      StringBuilder buffer = new StringBuilder(MISMATCHED_VERSIONS_MSG);
+      buffer.append(System.lineSeparator());
+
+      for (Host host : versionMismatches.keySet()) {
+        buffer.append("  ").append(host.getHostName());
+        buffer.append(System.lineSeparator());
+        for (ServiceComponentHost hostComponent : versionMismatches.get(host)) {
+          buffer.append("    ").append(hostComponent.getServiceComponentName()).append(": ").append(
+              hostComponent.getVersion()).append(System.lineSeparator());
+        }
+      }
+
+      alertText = buffer.toString();
+      alertState = AlertState.WARNING;
+    }
+
+    return Collections.singletonList(buildAlert(cluster, myDefinition, alertState, alertText));
+  }
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/RequestHandler.java b/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/RequestHandler.java
index 9e2d923..1c7a220 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/RequestHandler.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/RequestHandler.java
@@ -31,5 +31,5 @@
    * @param request the request to handle
    * @return the result of the request
    */
-  public Result handleRequest(Request request);
+  Result handleRequest(Request request);
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/Expression.java b/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/Expression.java
index 206f267..6412799 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/Expression.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/Expression.java
@@ -43,47 +43,47 @@
    *
    * @return a list of expressions after merging.  Do not return any null elements.
    */
-  public List<Expression> merge(Expression left, Expression right, int precedence);
+  List<Expression> merge(Expression left, Expression right, int precedence);
 
 
   /**
    * Get the predicate representation of the expression.
    * @return a predicate instance for the expression
    */
-  public Predicate toPredicate() throws InvalidQueryException;
+  Predicate toPredicate() throws InvalidQueryException;
 
   /**
    * Set the expressions left operand.
    *
    * @param left  the left operand
    */
-  public void setLeftOperand(T left);
+  void setLeftOperand(T left);
 
   /**
    * Set the expressions right operand.
    *
    * @param right  the right operand
    */
-  public void setRightOperand(T right);
+  void setRightOperand(T right);
 
   /**
    * Get the left operand expression.
    *
    * @return the left operand
    */
-  public T getLeftOperand();
+  T getLeftOperand();
 
   /**
    * Get the right operand expression.
    *
    * @return the right operand.
    */
-  public T getRightOperand();
+  T getRightOperand();
 
   /**
    * Get the expression operator.
    *
    * @return the logical operator for the expression
    */
-  public Operator getOperator();
+  Operator getOperator();
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LogicalOperator.java b/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LogicalOperator.java
index 8881aad..80e9a69 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LogicalOperator.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LogicalOperator.java
@@ -31,5 +31,5 @@
    * @param right  right operand
    * @return a predicate instance for this operator
    */
-  public Predicate toPredicate(Predicate left, Predicate right);
+  Predicate toPredicate(Predicate left, Predicate right);
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/Operator.java b/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/Operator.java
index f9c3acc..a1c0622 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/Operator.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/Operator.java
@@ -26,7 +26,7 @@
   /**
    * Operator types.
    */
-  public enum TYPE {
+  enum TYPE {
     LESS,
     LESS_EQUAL,
     GREATER,
@@ -44,14 +44,14 @@
   /**
    * The highest base operator precedence level.
    */
-  public static final int MAX_OP_PRECEDENCE = 3;
+  int MAX_OP_PRECEDENCE = 3;
 
   /**
    * Get the operator type.
    *
    * @return the operator type
    */
-  public TYPE getType();
+  TYPE getType();
 
   /**
    * Obtain the precedence of the operator.
@@ -60,5 +60,5 @@
    *
    * @return  the precedence of this operator in it's current context
    */
-  public int getPrecedence();
+  int getPrecedence();
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/RelationalOperator.java b/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/RelationalOperator.java
index cbcba0c..f1d2fb4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/RelationalOperator.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/RelationalOperator.java
@@ -33,5 +33,5 @@
    * @return  a predicate instance for this operator.
    * @throws  InvalidQueryException if unable to build the predicate because of invalid operands
    */
-  public Predicate toPredicate(String prop, String val) throws InvalidQueryException;
+  Predicate toPredicate(String prop, String val) throws InvalidQueryException;
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaSortBuilder.java b/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaSortBuilder.java
index 6d76945..dffc73b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaSortBuilder.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaSortBuilder.java
@@ -56,7 +56,7 @@
    * Builds the list of sort orders based on the supplied request and JPA
    * predicate visitor.
    *
-   * @param sortRequests
+   * @param sortRequest
    *          the Ambari sort request properties to turn into a JPA sort
    *          request. If {@code null} or the {@link SortRequestProperty} list
    *          is null, an empty list is returned.
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/query/Query.java b/ambari-server/src/main/java/org/apache/ambari/server/api/query/Query.java
index 8ac38ac..dc9b82c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/query/Query.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/query/Query.java
@@ -45,7 +45,7 @@
    * @param propertyId    the property id
    * @param temporalInfo  temporal information for the property
    */
-  public void addProperty(String propertyId, TemporalInfo temporalInfo);
+  void addProperty(String propertyId, TemporalInfo temporalInfo);
 
   /**
    * Add a local (not sub-resource) property to the query.
@@ -54,7 +54,7 @@
    * @param property the property id which contains the group, property name
    *                 and whether the property is temporal
    */
-  public void addLocalProperty(String property);
+  void addLocalProperty(String property);
 
   /**
    * Obtain the properties of the query.
@@ -63,7 +63,7 @@
    *
    * @return the query properties
    */
-  public Set<String> getProperties();
+  Set<String> getProperties();
 
   /**
    * Execute the query.
@@ -75,7 +75,7 @@
    * @throws NoSuchResourceException the query didn't match any resources
    * @throws NoSuchParentResourceException a specified parent resource doesn't exist
    */
-  public Result execute()
+  Result execute()
       throws UnsupportedPropertyException, SystemException, NoSuchResourceException, NoSuchParentResourceException;
 
   /**
@@ -84,7 +84,7 @@
    *
    * @return the predicate used to identify the associated resource
    */
-  public Predicate getPredicate();
+  Predicate getPredicate();
 
   /**
    * Set the user provided predicated on this query.
@@ -92,21 +92,21 @@
    *
    * @param predicate  the user provided predicate
    */
-  public void setUserPredicate(Predicate predicate);
+  void setUserPredicate(Predicate predicate);
 
   /**
    * Set the page request information for this query.
    *
    * @param pageRequest  the page request information
    */
-  public void setPageRequest(PageRequest pageRequest);
+  void setPageRequest(PageRequest pageRequest);
 
   /**
    * Set the order request information on the query
    *
    * @param sortRequest the ordering info
    */
-  public void setSortRequest(SortRequest sortRequest);
+  void setSortRequest(SortRequest sortRequest);
 
   /**
    * Set the corresponding renderer.
@@ -115,7 +115,7 @@
    *
    * @param renderer  renderer for the query
    */
-  public void setRenderer(Renderer renderer);
+  void setRenderer(Renderer renderer);
 
   /**
    * Set this Query's requestInfoProperties from the original request.  This will contain information
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/query/render/Renderer.java b/ambari-server/src/main/java/org/apache/ambari/server/api/query/render/Renderer.java
index b71d96f..0eea056 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/query/render/Renderer.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/query/render/Renderer.java
@@ -43,7 +43,7 @@
    *
    * @param schemaFactory  factory of schema instances
    */
-  public void init(SchemaFactory schemaFactory);
+  void init(SchemaFactory schemaFactory);
 
   /**
    * Finalize which properties are requested by the query.
@@ -57,8 +57,8 @@
    *
    * @return tree of sets of string properties for each query including any sub-queries
    */
-  public TreeNode<Set<String>> finalizeProperties(
-      TreeNode<QueryInfo> queryProperties, boolean isCollection);
+  TreeNode<Set<String>> finalizeProperties(
+    TreeNode<QueryInfo> queryProperties, boolean isCollection);
 
   /**
    * Finalize the query results.
@@ -67,7 +67,7 @@
    *
    * @return result in the format dictated by the renderer
    */
-  public Result finalizeResult(Result queryResult);
+  Result finalizeResult(Result queryResult);
 
   /**
    * Obtain the associated post processor.
@@ -79,7 +79,7 @@
    *
    * @return associated post processor
    */
-  public ResultPostProcessor getResultPostProcessor(Request request);
+  ResultPostProcessor getResultPostProcessor(Request request);
 
 
   /**
@@ -89,5 +89,5 @@
    * @return true if property provider support is required
    *         false if property provider support is not required
    */
-  public boolean requiresPropertyProviderInput();
+  boolean requiresPropertyProviderInput();
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/resources/BaseResourceDefinition.java b/ambari-server/src/main/java/org/apache/ambari/server/api/resources/BaseResourceDefinition.java
index 99bcd03..4983920 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/resources/BaseResourceDefinition.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/resources/BaseResourceDefinition.java
@@ -132,8 +132,7 @@
               || name.contains("zero_padding")) {
       return new MetricsPaddingRenderer(name);
     } else {
-      throw new IllegalArgumentException("Invalid renderer name: " + name +
-          " for resource of type: " + m_type);
+      throw new IllegalArgumentException("Invalid renderer name for resource of type " + m_type);
     }
   }
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/resources/RequestScheduleResourceDefinition.java b/ambari-server/src/main/java/org/apache/ambari/server/api/resources/RequestScheduleResourceDefinition.java
index 6de5e25..dfbefd7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/resources/RequestScheduleResourceDefinition.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/resources/RequestScheduleResourceDefinition.java
@@ -25,11 +25,7 @@
 import org.apache.ambari.server.controller.spi.Resource;
 
 public class RequestScheduleResourceDefinition extends BaseResourceDefinition {
-  /**
-   * Constructor.
-   *
-   * @param resourceType resource type
-   */
+
   public RequestScheduleResourceDefinition() {
     super(Resource.Type.RequestSchedule);
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstance.java b/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstance.java
index 3695807..ca4357f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstance.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstance.java
@@ -33,14 +33,14 @@
    *
    * @param keyValueMap  map of all parent foreign keys. Map from resource type to id value.
    */
-  public void setKeyValueMap(Map<Resource.Type, String> keyValueMap);
+  void setKeyValueMap(Map<Resource.Type, String> keyValueMap);
 
   /**
    * Obtain the primary and foreign key properties for the resource.
    *
    * @return map of primary and foreign key values keyed by resource type
    */
-  public Map<Resource.Type, String> getKeyValueMap();
+  Map<Resource.Type, String> getKeyValueMap();
 
   /**
    * Return the query associated with the resource.
@@ -48,7 +48,7 @@
    *
    * @return the associated query
    */
-  public Query getQuery();
+  Query getQuery();
 
   /**
    * Return the resource definition for this resource type.
@@ -57,7 +57,7 @@
    *
    * @return  the associated resource definition
    */
-  public ResourceDefinition getResourceDefinition();
+  ResourceDefinition getResourceDefinition();
 
   /**
    * Return all sub-resource instances.
@@ -66,12 +66,12 @@
    *
    * @return all sub-resource instances
    */
-  public Map<String, ResourceInstance> getSubResources();
+  Map<String, ResourceInstance> getSubResources();
 
   /**
    * Determine if resource is a collection resource.
    *
    * @return true if the resource is a collection resource; false otherwise
    */
-  public boolean isCollectionResource();
+  boolean isCollectionResource();
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceFactory.java
index 0b5859b..0b154fb 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceFactory.java
@@ -35,5 +35,5 @@
    *
    * @return  a new resource instance of the specified type
    */
-  public ResourceInstance createResource(Resource.Type type, Map<Resource.Type, String> mapIds);
+  ResourceInstance createResource(Resource.Type type, Map<Resource.Type, String> mapIds);
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/rest/BootStrapResource.java b/ambari-server/src/main/java/org/apache/ambari/server/api/rest/BootStrapResource.java
index 4aad530..3250a3b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/rest/BootStrapResource.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/rest/BootStrapResource.java
@@ -100,7 +100,7 @@
   /**
    * Gets a list of bootstrapped hosts.
    *
-   * @param info  the host info, with no SSL key information
+   * @param uriInfo the host info, with no SSL key information
    */
   @GET
   @Path("/hosts")
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AlertDefinitionService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AlertDefinitionService.java
index d3f88ec..fb5423b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AlertDefinitionService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AlertDefinitionService.java
@@ -99,9 +99,6 @@
 
   /**
    * Create a request schedule resource instance
-   * @param clusterName
-   * @param requestScheduleId
-   * @return
    */
   private ResourceInstance createResourceInstance(String clusterName,
       Long definitionId) {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/ResultPostProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/ResultPostProcessor.java
index ab25bb2..9bba025 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/ResultPostProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/ResultPostProcessor.java
@@ -29,5 +29,5 @@
    *
    * @param result the result to process.
    */
-  public void process(Result result);
+  void process(Result result);
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/ResultStatus.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/ResultStatus.java
index 6284879..1e0e24a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/ResultStatus.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/ResultStatus.java
@@ -26,7 +26,7 @@
   /**
    * STATUS enum. Maps a status to a status code.
    */
-  public static enum STATUS { OK(200, "OK", false), CREATED(201, "Created", false), ACCEPTED(202, "Accepted", false),
+  public enum STATUS { OK(200, "OK", false), CREATED(201, "Created", false), ACCEPTED(202, "Accepted", false),
     CONFLICT(409, "Resource Conflict", true), NOT_FOUND(404, "Not Found", true), BAD_REQUEST(400, "Bad Request", true),
     UNAUTHORIZED(401, "Unauthorized", true), FORBIDDEN(403, "Forbidden", true),
     SERVER_ERROR(500, "Internal Server Error", true);
@@ -53,7 +53,7 @@
      * @param description  description
      * @param isErrorState whether this is an error state
      */
-    private STATUS(int code, String description, boolean isErrorState) {
+    STATUS(int code, String description, boolean isErrorState) {
       m_code = code;
       m_desc = description;
       m_isErrorState = isErrorState;
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/parsers/RequestBodyParser.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/parsers/RequestBodyParser.java
index dc16b3e..49465c5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/parsers/RequestBodyParser.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/parsers/RequestBodyParser.java
@@ -30,27 +30,27 @@
   /**
    * RequestInfo category path.
    */
-  public static final String REQUEST_INFO_PATH = "RequestInfo";
+  String REQUEST_INFO_PATH = "RequestInfo";
 
   /**
    * Slash symbol
    */
-  public static final String SLASH = "/";
+  String SLASH = "/";
 
   /**
    * Category path to ignore parsing of the child node
    */
-  public static final String REQUEST_BLOB_TITLE = "RequestBodyInfo";
+  String REQUEST_BLOB_TITLE = "RequestBodyInfo";
 
   /**
    * Name of the query property which may exist under REQUEST_INFO_PATH.
    */
-  public static final String QUERY_FIELD_NAME = "query";
+  String QUERY_FIELD_NAME = "query";
 
   /**
    * Path to the body object.
    */
-  public static final String BODY_TITLE = "Body";
+  String BODY_TITLE = "Body";
 
   /**
    * Parse the provided string into request bodies based on the
@@ -60,5 +60,5 @@
    *
    * @return a set of {@link RequestBody} instances
    */
-  public Set<RequestBody> parse(String body) throws BodyParseException;
+  Set<RequestBody> parse(String body) throws BodyParseException;
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/persistence/PersistenceManager.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/persistence/PersistenceManager.java
index 9a8b677..4cc4fa0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/persistence/PersistenceManager.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/persistence/PersistenceManager.java
@@ -45,7 +45,7 @@
    * @throws NoSuchParentResourceException
    * @throws SystemException
    */
-  public RequestStatus create(ResourceInstance resource, RequestBody requestBody)
+  RequestStatus create(ResourceInstance resource, RequestBody requestBody)
       throws UnsupportedPropertyException,
              ResourceAlreadyExistsException,
              NoSuchParentResourceException,
@@ -62,7 +62,7 @@
    * @throws NoSuchParentResourceException
    * @throws NoSuchResourceException
    */
-  public RequestStatus update(ResourceInstance resource, RequestBody requestBody)
+  RequestStatus update(ResourceInstance resource, RequestBody requestBody)
       throws UnsupportedPropertyException, SystemException, NoSuchParentResourceException, NoSuchResourceException;
 
   /**
@@ -76,6 +76,6 @@
    * @throws NoSuchParentResourceException
    * @throws NoSuchResourceException
    */
-  public RequestStatus delete(ResourceInstance resource, RequestBody requestBody)
+  RequestStatus delete(ResourceInstance resource, RequestBody requestBody)
       throws UnsupportedPropertyException, SystemException, NoSuchParentResourceException, NoSuchResourceException;
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java
index b7eca71..7731b22 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java
@@ -35,7 +35,6 @@
 import org.apache.ambari.server.topology.Blueprint;
 import org.apache.ambari.server.topology.ClusterTopology;
 import org.apache.ambari.server.topology.ConfigRecommendationStrategy;
-import org.apache.ambari.server.topology.Configuration;
 import org.apache.ambari.server.topology.HostGroup;
 import org.apache.ambari.server.topology.HostGroupInfo;
 import org.slf4j.Logger;
@@ -75,13 +74,13 @@
   /**
    * Recommend configurations by the stack advisor, then store the results in cluster topology.
    * @param clusterTopology cluster topology instance
-   * @param existingConfigurations Existing configurations of cluster
+   * @param userProvidedConfigurations User configurations of cluster provided in Blueprint + Cluster template
    */
-  public void adviseConfiguration(ClusterTopology clusterTopology, Map<String, Map<String, String>> existingConfigurations) throws ConfigurationTopologyException {
+  public void adviseConfiguration(ClusterTopology clusterTopology, Map<String, Map<String, String>> userProvidedConfigurations) throws ConfigurationTopologyException {
     StackAdvisorRequest request = createStackAdvisorRequest(clusterTopology, StackAdvisorRequestType.CONFIGURATIONS);
     try {
       RecommendationResponse response = stackAdvisorHelper.recommend(request);
-      addAdvisedConfigurationsToTopology(response, clusterTopology, existingConfigurations);
+      addAdvisedConfigurationsToTopology(response, clusterTopology, userProvidedConfigurations);
     } catch (StackAdvisorException e) {
       throw new ConfigurationTopologyException(RECOMMENDATION_FAILED, e);
     } catch (IllegalArgumentException e) {
@@ -94,7 +93,7 @@
     Map<String, Set<String>> hgComponentsMap = gatherHostGroupComponents(clusterTopology);
     Map<String, Set<String>> hgHostsMap = gatherHostGroupBindings(clusterTopology);
     Map<String, Set<String>> componentHostsMap = gatherComponentsHostsMap(hgComponentsMap,
-      hgHostsMap);
+            hgHostsMap);
     return StackAdvisorRequest.StackAdvisorRequestBuilder
       .forStack(stack.getName(), stack.getVersion())
       .forServices(new ArrayList<>(clusterTopology.getBlueprint().getServices()))
@@ -167,7 +166,7 @@
   }
 
   private void addAdvisedConfigurationsToTopology(RecommendationResponse response,
-                                                  ClusterTopology topology, Map<String, Map<String, String>> existingConfigurations) {
+                                                  ClusterTopology topology, Map<String, Map<String, String>> userProvidedConfigurations) {
     Preconditions.checkArgument(response.getRecommendations() != null,
       "Recommendation response is empty.");
     Preconditions.checkArgument(response.getRecommendations().getBlueprint() != null,
@@ -175,71 +174,41 @@
     Preconditions.checkArgument(response.getRecommendations().getBlueprint().getConfigurations() != null,
       "Configurations are missing from the recommendation blueprint response.");
 
-    Map<String, Map<String, String>> userProvidedProperties = existingConfigurations;
-    if (topology.getConfigRecommendationStrategy() == ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY) {
-      userProvidedProperties = getUserProvidedProperties(topology, existingConfigurations);
-    }
-
     Map<String, BlueprintConfigurations> recommendedConfigurations =
       response.getRecommendations().getBlueprint().getConfigurations();
+    Blueprint blueprint = topology.getBlueprint();
+
     for (Map.Entry<String, BlueprintConfigurations> configEntry : recommendedConfigurations.entrySet()) {
       String configType = configEntry.getKey();
-      BlueprintConfigurations blueprintConfig = filterBlueprintConfig(configType, configEntry.getValue(),
-        userProvidedProperties, topology);
-      topology.getAdvisedConfigurations().put(configType, new AdvisedConfiguration(
-              blueprintConfig.getProperties(), blueprintConfig.getPropertyAttributes()));
+      // add recommended config type only if related service is present in Blueprint
+      if (blueprint.isValidConfigType(configType)) {
+        BlueprintConfigurations blueprintConfig = filterBlueprintConfig(configType, configEntry.getValue(),
+                userProvidedConfigurations, topology);
+        topology.getAdvisedConfigurations().put(configType, new AdvisedConfiguration(
+                blueprintConfig.getProperties(), blueprintConfig.getPropertyAttributes()));
+      }
     }
   }
 
   /**
-   * Gather user defined properties. (keep that only which is not included in the stack defaults or it overrides the stack default value)
-   */
-  private Map<String, Map<String, String>> getUserProvidedProperties(ClusterTopology topology, Map<String, Map<String, String>> existingConfigurations) {
-    Map<String, Map<String, String>> userProvidedProperties = Maps.newHashMap();
-    Blueprint blueprint = topology.getBlueprint();
-    Configuration stackDefaults = blueprint.getStack().getConfiguration(blueprint.getServices());
-    Map<String, Map<String, String>> stackDefaultProps = stackDefaults.getProperties();
-
-    for (Map.Entry<String, Map<String, String>> configGroup : existingConfigurations.entrySet()) {
-      String configType = configGroup.getKey();
-      Map<String, String> configsToAdd = Maps.newHashMap();
-      for (Map.Entry<String, String> configProp : configGroup.getValue().entrySet()) {
-        if (stackDefaultProps.containsKey(configType) && stackDefaultProps.get(configType).containsKey(configProp.getKey())) {
-          String originalValue = stackDefaultProps.get(configType).get(configProp.getKey());
-          if (originalValue != null && !originalValue.equals(configProp.getValue())) {
-            configsToAdd.put(configProp.getKey(), configProp.getValue());
-          }
-        } else {
-          configsToAdd.put(configProp.getKey(), configProp.getValue());
-        }
-      }
-      if (!configsToAdd.isEmpty()) {
-        userProvidedProperties.put(configGroup.getKey(), configsToAdd);
-      }
-    }
-
-    return userProvidedProperties;
-  }
-
-  /**
-   * Remove user defined properties from stack advisor output in case of ONLY_STACK_DEFAULTS_APPLY or
+   * Remove user defined properties from Stack Advisor output in case of ONLY_STACK_DEFAULTS_APPLY or
    * ALWAYS_APPLY_DONT_OVERRIDE_CUSTOM_VALUES.
    */
   private BlueprintConfigurations filterBlueprintConfig(String configType, BlueprintConfigurations config,
-                                                        Map<String, Map<String, String>> userProvidedProperties,
+                                                        Map<String, Map<String, String>> userProvidedConfigurations,
                                                         ClusterTopology topology) {
     if (topology.getConfigRecommendationStrategy() == ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY ||
       topology.getConfigRecommendationStrategy() == ConfigRecommendationStrategy
         .ALWAYS_APPLY_DONT_OVERRIDE_CUSTOM_VALUES) {
-      if (userProvidedProperties.containsKey(configType)) {
+      if (userProvidedConfigurations.containsKey(configType)) {
         BlueprintConfigurations newConfig = new BlueprintConfigurations();
         Map<String, String> filteredProps = Maps.filterKeys(config.getProperties(),
-          Predicates.not(Predicates.in(userProvidedProperties.get(configType).keySet())));
+          Predicates.not(Predicates.in(userProvidedConfigurations.get(configType).keySet())));
         newConfig.setProperties(Maps.newHashMap(filteredProps));
 
         if (config.getPropertyAttributes() != null) {
           Map<String, ValueAttributesInfo> filteredAttributes = Maps.filterKeys(config.getPropertyAttributes(),
-            Predicates.not(Predicates.in(userProvidedProperties.get(configType).keySet())));
+            Predicates.not(Predicates.in(userProvidedConfigurations.get(configType).keySet())));
           newConfig.setPropertyAttributes(Maps.newHashMap(filteredAttributes));
         }
         return newConfig;
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorRequest.java
index f5677c1..1f3e96e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorRequest.java
@@ -200,7 +200,7 @@
 
     private String type;
 
-    private StackAdvisorRequestType(String type) {
+    StackAdvisorRequestType(String type) {
       this.type = type;
     }
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/commands/StackAdvisorCommandType.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/commands/StackAdvisorCommandType.java
index 5aa1ea9..317b848 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/commands/StackAdvisorCommandType.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/commands/StackAdvisorCommandType.java
@@ -35,7 +35,7 @@
 
   private final String name;
 
-  private StackAdvisorCommandType(String name) {
+  StackAdvisorCommandType(String name) {
     this.name = name;
   }
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/util/TreeNode.java b/ambari-server/src/main/java/org/apache/ambari/server/api/util/TreeNode.java
index 60d365f..144132f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/util/TreeNode.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/util/TreeNode.java
@@ -29,42 +29,42 @@
    *
    * @return the parent node or null if this node is the root
    */
-  public TreeNode<T> getParent();
+  TreeNode<T> getParent();
 
   /**
    * Obtain the list of child nodes.
    *
    * @return a list of child nodes or an empty list if a leaf node
    */
-  public Collection<TreeNode<T>> getChildren();
+  Collection<TreeNode<T>> getChildren();
 
   /**
    * Obtain the object associated with this node.
    *
    * @return the object associated with this node or null
    */
-  public T getObject();
+  T getObject();
 
   /**
    * Obtain the name of the node.
    *
    * @return the name of the node or null
    */
-  public String getName();
+  String getName();
 
   /**
    * Set the name of the node.
    *
    * @param name the name to set
    */
-  public void setName(String name);
+  void setName(String name);
 
   /**
    * Set the parent node.
    *
    * @param parent the parent node to set
    */
-  public void setParent(TreeNode<T> parent);
+  void setParent(TreeNode<T> parent);
 
   /**
    * Add a child node for the provided object.
@@ -73,7 +73,7 @@
    * @param name  the name of the child node
    * @return the newly created child node
    */
-  public TreeNode<T> addChild(T child, String name);
+  TreeNode<T> addChild(T child, String name);
 
   /**
    * Add the specified child node.
@@ -81,7 +81,7 @@
    * @param child the child node to add
    * @return the added child node
    */
-  public TreeNode<T> addChild(TreeNode<T> child);
+  TreeNode<T> addChild(TreeNode<T> child);
 
   /**
    * Remove a child from this resource.
@@ -90,7 +90,7 @@
    *
    * @return the previous value associated with key, or null if there was no mapping for key
    */
-  public TreeNode<T> removeChild(String name);
+  TreeNode<T> removeChild(String name);
 
   /**
    * Set a property on the node.
@@ -98,7 +98,7 @@
    * @param name  the name of the property
    * @param value the value of the property
    */
-  public void setProperty(String name, Object value);
+  void setProperty(String name, Object value);
 
   /**
    * Get the specified node property.
@@ -106,7 +106,7 @@
    * @param name property name
    * @return the requested property value or null
    */
-  public Object getProperty(String name);
+  Object getProperty(String name);
 
   /**
    * Get the specified node property as a String.
@@ -114,14 +114,14 @@
    * @param name property name
    * @return the requested property value (as a String) or null
    */
-  public String getStringProperty(String name);
+  String getStringProperty(String name);
 
   /**
    * Remove a property from the node.
    *
    * @param name  name of property to be removed
    */
-  public void removeProperty(String name);
+  void removeProperty(String name);
 
   /**
    * Find a child node by name.
@@ -132,5 +132,5 @@
    *
    * @return the requested node or null if the child was not found
    */
-  public TreeNode<T> getChild(String name);
+  TreeNode<T> getChild(String name);
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSRunner.java b/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSRunner.java
index cfd55bb..733a07a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSRunner.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSRunner.java
@@ -188,8 +188,7 @@
     // Startup a scheduled executor service to look through the logs
     ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
     BSStatusCollector statusCollector = new BSStatusCollector();
-    ScheduledFuture<?> handle = scheduler.scheduleWithFixedDelay(statusCollector,
-        0, 10, TimeUnit.SECONDS);
+    ScheduledFuture<?> handle = null;
     LOG.info("Kicking off the scheduler for polling on logs in " +
     this.requestIdDir);
     String user = sshHostInfo.getUser();
@@ -208,6 +207,8 @@
     String scriptlog = "";
     try {
       createRunDir();
+      handle = scheduler.scheduleWithFixedDelay(statusCollector,
+        0, 10, TimeUnit.SECONDS);
       if (LOG.isDebugEnabled()) {
         // FIXME needs to be removed later
         // security hole
@@ -357,7 +358,9 @@
       } catch (InterruptedException e) {
         throw new IOException(e);
       } finally {
-        handle.cancel(true);
+        if (handle != null) {
+          handle.cancel(true);
+        }
         /* schedule a last update */
         scheduler.schedule(new BSStatusCollector(), 0, TimeUnit.SECONDS);
         scheduler.shutdownNow();
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/AbstractCheckDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/AbstractCheckDescriptor.java
index dd1fa3c..707c756 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/AbstractCheckDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/AbstractCheckDescriptor.java
@@ -22,6 +22,8 @@
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.ambari.annotations.Experimental;
+import org.apache.ambari.annotations.ExperimentalFeature;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
@@ -30,16 +32,19 @@
 import org.apache.ambari.server.orm.dao.HostVersionDAO;
 import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
 import org.apache.ambari.server.orm.dao.UpgradeDAO;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.DesiredConfig;
+import org.apache.ambari.server.state.RepositoryType;
 import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.stack.PrereqCheckType;
 import org.apache.ambari.server.state.stack.PrerequisiteCheck;
 import org.apache.ambari.server.state.stack.UpgradePack;
 import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper;
 import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
+import org.apache.commons.collections.CollectionUtils;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -121,6 +126,7 @@
    * @throws org.apache.ambari.server.AmbariException
    *           if server error happens
    */
+  @Experimental(feature = ExperimentalFeature.PATCH_UPGRADES)
   public boolean isApplicable(PrereqCheckRequest request, List<String> requiredServices, boolean requiredAll) throws AmbariException {
     final Cluster cluster = clustersProvider.get().getCluster(request.getClusterName());
     Set<String> services = cluster.getServices().keySet();
@@ -138,6 +144,25 @@
       }
     }
 
+    // !!! service is found and deployed - now check if it is part of the VDF
+    if (serviceFound && null != request.getTargetStackId()) {
+      String stackName = request.getTargetStackId().getStackName();
+      RepositoryVersionEntity rve = repositoryVersionDaoProvider.get().
+        findByStackNameAndVersion(stackName, request.getRepositoryVersion());
+
+      if (RepositoryType.STANDARD != rve.getType()) {
+        try {
+          Set<String> availableServices = rve.getRepositoryXml().getAvailableServiceNames();
+
+          if (!CollectionUtils.containsAny(availableServices, requiredServices)) {
+            serviceFound = false;
+          }
+        } catch (Exception e) {
+          LOG.warn("Could not parse xml for %s", request.getRepositoryVersion(), e);
+        }
+      }
+    }
+
     return serviceFound;
   }
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
index e7e9433..7c40c83 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
@@ -24,6 +24,7 @@
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -46,9 +47,12 @@
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.dao.ExecutionCommandDAO;
 import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
 import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
+import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
 import org.apache.ambari.server.orm.dao.MetainfoDAO;
+import org.apache.ambari.server.orm.dao.StageDAO;
 import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
 import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
 import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
@@ -81,6 +85,9 @@
   private static Connection connection;
   private static AmbariMetaInfo ambariMetaInfo;
   private static DBAccessor dbAccessor;
+  private static HostRoleCommandDAO hostRoleCommandDAO;
+  private static ExecutionCommandDAO executionCommandDAO;
+  private static StageDAO stageDAO;
 
   private static DatabaseConsistencyCheckResult checkResult = DatabaseConsistencyCheckResult.DB_CHECK_SUCCESS;
 
@@ -174,6 +181,7 @@
       checkHostComponentStates();
       checkServiceConfigs();
       checkTopologyTables();
+      checkForLargeTables();
       LOG.info("******************************* Check database completed *******************************");
       return checkResult;
     }
@@ -223,6 +231,115 @@
   }
 
   /**
+   * This method checks if ambari database has tables with too big size (according to limit).
+   * First of all we are trying to get table size from schema information, but if it's not possible,
+   * we will get tables rows count and compare it with row count limit.
+   */
+  static void checkForLargeTables() {
+    LOG.info("Checking for tables with large physical size");
+
+    ensureConnection();
+
+    DBAccessor.DbType dbType = dbAccessor.getDbType();
+    String schemaName = dbAccessor.getDbSchema();
+
+    String GET_TABLE_SIZE_IN_BYTES_POSTGRESQL = "SELECT pg_total_relation_size('%s') \"Table Size\"";
+    String GET_TABLE_SIZE_IN_BYTES_MYSQL = "SELECT (data_length + index_length) \"Table Size\" FROM information_schema.TABLES WHERE table_schema = \"" + schemaName + "\" AND table_name =\"%s\"";
+    String GET_TABLE_SIZE_IN_BYTES_ORACLE = "SELECT bytes \"Table Size\" FROM user_segments WHERE segment_type='TABLE' AND segment_name='%s'";
+    String GET_ROW_COUNT_QUERY = "SELECT COUNT(*) FROM %s";
+
+    Map<DBAccessor.DbType, String> tableSizeQueryMap = new HashMap<>();
+    tableSizeQueryMap.put(DBAccessor.DbType.POSTGRES, GET_TABLE_SIZE_IN_BYTES_POSTGRESQL);
+    tableSizeQueryMap.put(DBAccessor.DbType.MYSQL, GET_TABLE_SIZE_IN_BYTES_MYSQL);
+    tableSizeQueryMap.put(DBAccessor.DbType.ORACLE, GET_TABLE_SIZE_IN_BYTES_ORACLE);
+
+    List<String> tablesToCheck = Arrays.asList("host_role_command", "execution_command", "stage", "request", "alert_history");
+
+    final double TABLE_SIZE_LIMIT_MB = 3000.0;
+    final int TABLE_ROW_COUNT_LIMIT = 3000000;
+
+    String findTableSizeQuery = tableSizeQueryMap.get(dbType);
+
+    if (dbType == DBAccessor.DbType.ORACLE) {
+      for (int i = 0;i < tablesToCheck.size(); i++) {
+        tablesToCheck.set(i, tablesToCheck.get(i).toUpperCase());
+      }
+    }
+
+    for (String tableName : tablesToCheck) {
+
+      ResultSet rs = null;
+      Statement statement = null;
+      Double tableSizeInMB = null;
+      Long tableSizeInBytes = null;
+      int tableRowCount = -1;
+
+      try {
+        statement = connection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE);
+        rs = statement.executeQuery(String.format(findTableSizeQuery, tableName));
+        if (rs != null) {
+          while (rs.next()) {
+            tableSizeInBytes = rs.getLong(1);
+            if (tableSizeInBytes != null) {
+              tableSizeInMB = tableSizeInBytes / 1024.0 / 1024.0;
+            }
+          }
+        }
+
+        if (tableSizeInMB != null && tableSizeInMB > TABLE_SIZE_LIMIT_MB) {
+          warning("The database table {} is currently {} MB (limit is {}) and may impact performance. It is recommended " +
+                  "that you reduce its size by executing \"ambari-server db-cleanup\".",
+                  tableName, tableSizeInMB, TABLE_SIZE_LIMIT_MB);
+        } else if (tableSizeInMB != null && tableSizeInMB < TABLE_SIZE_LIMIT_MB) {
+          LOG.info(String.format("The database table %s is currently %.3f MB and is within normal limits (%.3f)",
+                  tableName, tableSizeInMB, TABLE_SIZE_LIMIT_MB));
+        } else {
+          throw new Exception();
+        }
+      } catch (Exception e) {
+        LOG.error(String.format("Failed to get %s table size from database, will check row count: ", tableName), e);
+        try {
+          rs = statement.executeQuery(String.format(GET_ROW_COUNT_QUERY, tableName));
+          if (rs != null) {
+            while (rs.next()) {
+              tableRowCount = rs.getInt(1);
+            }
+          }
+
+          if (tableRowCount > TABLE_ROW_COUNT_LIMIT) {
+            warning("The database table {} currently has {} rows (limit is {}) and may impact performance. It is " +
+                    "recommended that you reduce its size by executing \"ambari-server db-cleanup\".",
+                    tableName, tableRowCount, TABLE_ROW_COUNT_LIMIT);
+          } else if (tableRowCount != -1 && tableRowCount < TABLE_ROW_COUNT_LIMIT) {
+            LOG.info(String.format("The database table %s currently has %d rows and is within normal limits (%d)", tableName, tableRowCount, TABLE_ROW_COUNT_LIMIT));
+          } else {
+            throw new SQLException();
+          }
+        } catch (SQLException ex) {
+          LOG.error(String.format("Failed to get %s row count: ", tableName), e);
+        }
+      } finally {
+        if (rs != null) {
+          try {
+            rs.close();
+          } catch (SQLException e) {
+            LOG.error("Exception occurred during result set closing procedure: ", e);
+          }
+        }
+
+        if (statement != null) {
+          try {
+            statement.close();
+          } catch (SQLException e) {
+            LOG.error("Exception occurred during statement closing procedure: ", e);
+          }
+        }
+      }
+    }
+
+  }
+
+  /**
    * This method checks if any config type in clusterconfig table, has more than
    * one versions selected. If config version is selected(in selected column =
    * 1), it means that this version of config is actual. So, if any config type
@@ -338,14 +455,14 @@
     String SELECT_REQUEST_COUNT_QUERY = "select count(tpr.id) from topology_request tpr";
 
     String SELECT_JOINED_COUNT_QUERY = "select count(DISTINCT tpr.id) from topology_request tpr join " +
-      "topology_logical_request tlr on tpr.id = tlr.request_id join topology_host_request thr on tlr.id = " +
-      "thr.logical_request_id join topology_host_task tht on thr.id = tht.host_request_id join topology_logical_task " +
-      "tlt on tht.id = tlt.host_task_id";
+      "topology_logical_request tlr on tpr.id = tlr.request_id";
 
-    int topologyRequestCount = 0;
-    int topologyRequestTablesJoinedCount = 0;
+    String SELECT_HOST_REQUEST_COUNT_QUERY = "select count(thr.id) from topology_host_request thr";
 
-    ResultSet rs = null;
+    String SELECT_HOST_JOINED_COUNT_QUERY = "select count(DISTINCT thr.id) from topology_host_request thr join " +
+            "topology_host_task tht on thr.id = tht.host_request_id join topology_logical_task " +
+            "tlt on tht.id = tlt.host_task_id";
+
     Statement statement = null;
 
     if (connection == null) {
@@ -358,38 +475,25 @@
     try {
       statement = connection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE);
 
-      rs = statement.executeQuery(SELECT_REQUEST_COUNT_QUERY);
-      if (rs != null) {
-        while (rs.next()) {
-          topologyRequestCount = rs.getInt(1);
-        }
-      }
-
-      rs = statement.executeQuery(SELECT_JOINED_COUNT_QUERY);
-      if (rs != null) {
-        while (rs.next()) {
-          topologyRequestTablesJoinedCount = rs.getInt(1);
-        }
-      }
+      int topologyRequestCount = runQuery(statement, SELECT_REQUEST_COUNT_QUERY);
+      int topologyRequestTablesJoinedCount = runQuery(statement, SELECT_JOINED_COUNT_QUERY);
 
       if (topologyRequestCount != topologyRequestTablesJoinedCount) {
         error("Your topology request hierarchy is not complete for each row in topology_request should exist " +
-          "at least one raw in topology_logical_request, topology_host_request, topology_host_task, " +
-          "topology_logical_task.");
+          "at least one row in topology_logical_request");
       }
 
+      int topologyHostRequestCount = runQuery(statement, SELECT_HOST_REQUEST_COUNT_QUERY);
+      int topologyHostRequestTablesJoinedCount = runQuery(statement, SELECT_HOST_JOINED_COUNT_QUERY);
+
+      if (topologyHostRequestCount != topologyHostRequestTablesJoinedCount) {
+        error("Your topology request hierarchy is not complete for each row in topology_host_request should exist " +
+                "at least one row in topology_host_task, topology_logical_task.");
+      }
 
     } catch (SQLException e) {
       LOG.error("Exception occurred during topology request tables check: ", e);
     } finally {
-      if (rs != null) {
-        try {
-          rs.close();
-        } catch (SQLException e) {
-          LOG.error("Exception occurred during result set closing procedure: ", e);
-        }
-      }
-
       if (statement != null) {
         try {
           statement.close();
@@ -401,6 +505,31 @@
 
   }
 
+  private static int runQuery(Statement statement, String query) {
+    ResultSet rs = null;
+    int result = 0;
+    try {
+      rs = statement.executeQuery(query);
+
+      if (rs != null) {
+        while (rs.next()) {
+          result = rs.getInt(1);
+        }
+      }
+
+    } catch (SQLException e) {
+      LOG.error("Exception occurred during topology request tables check: ", e);
+    } finally {
+      if (rs != null) {
+        try {
+          rs.close();
+        } catch (SQLException e) {
+          LOG.error("Exception occurred during result set closing procedure: ", e);
+        }
+      }
+    }
+    return result;
+  }
 
 
   /**
@@ -501,6 +630,9 @@
     List<ClusterConfigEntity> notMappedClusterConfigs = getNotMappedClusterConfigsToService();
 
     for (ClusterConfigEntity clusterConfigEntity : notMappedClusterConfigs){
+      if (!clusterConfigEntity.isServiceDeleted()){
+        continue; // skip clusterConfigs that did not leave after service deletion
+      }
       List<String> types = new ArrayList<>();
       String type = clusterConfigEntity.getType();
       types.add(type);
@@ -533,9 +665,11 @@
 
     Set<String> nonMappedConfigs = new HashSet<>();
     for (ClusterConfigEntity clusterConfigEntity : notMappedClasterConfigs) {
-      nonMappedConfigs.add(clusterConfigEntity.getType() + '-' + clusterConfigEntity.getTag());
+      if (!clusterConfigEntity.isServiceDeleted()){
+        nonMappedConfigs.add(clusterConfigEntity.getType() + '-' + clusterConfigEntity.getTag());
+      }
     }
-    if (!notMappedClasterConfigs.isEmpty()){
+    if (!nonMappedConfigs.isEmpty()){
       warning("You have config(s): {} that is(are) not mapped (in serviceconfigmapping table) to any service!", StringUtils.join(nonMappedConfigs, ","));
     }
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/RangerSSLConfigCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/RangerSSLConfigCheck.java
index 02f6559..47a0ea0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/RangerSSLConfigCheck.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/RangerSSLConfigCheck.java
@@ -24,7 +24,6 @@
 import org.apache.ambari.server.controller.PrereqCheckRequest;
 import org.apache.ambari.server.state.stack.PrereqCheckStatus;
 import org.apache.ambari.server.state.stack.PrerequisiteCheck;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesUpCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesUpCheck.java
index 05f3e91..273bdaa 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesUpCheck.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesUpCheck.java
@@ -195,8 +195,6 @@
    * purposes of this check. Component type, maintenance mode, and state are
    * taken into account.
    *
-   * @param clusters
-   *          the clusters instance
    * @param cluster
    *          the cluster
    * @param serviceComponent
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/UpgradeCheckGroup.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/UpgradeCheckGroup.java
index 67cf4f1..28ec1b1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/UpgradeCheckGroup.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/UpgradeCheckGroup.java
@@ -99,7 +99,7 @@
    *
    * @param order
    */
-  private UpgradeCheckGroup(Float order) {
+  UpgradeCheckGroup(Float order) {
     m_order = order;
   }
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
index a8bfbf0..114046f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
@@ -364,6 +364,13 @@
       "views.validate", "false");
 
   /**
+   * Determines whether the view directory watcher service should be disabled.
+   */
+  @Markdown(description = "Determines whether the view directory watcher service should be disabled.")
+  public static final ConfigurationProperty<String> DISABLE_VIEW_DIRECTORY_WATCHER = new ConfigurationProperty<>(
+      "views.directory.watcher.disable", "false");
+
+  /**
    * Determines whether remove undeployed views from the Ambari database.
    */
   @Markdown(description = "Determines whether remove undeployed views from the Ambari database.")
@@ -570,7 +577,7 @@
       "security.server.key_name", "ca.key");
 
   /**
-   * The name of the keystore file, located in {@link SRVR_KSTR_DIR}.
+   * The name of the keystore file, located in {@link #SRVR_KSTR_DIR}.
    */
   @Markdown(description = "The name of the keystore file, located in `security.server.keys_dir`")
   public static final ConfigurationProperty<String> KSTR_NAME = new ConfigurationProperty<>(
@@ -588,7 +595,7 @@
 
   /**
    * The name of the truststore file ambari uses to store trusted certificates.
-   * Located in {@link SRVR_KSTR_DIR}.
+   * Located in {@link #SRVR_KSTR_DIR}.
    */
   @Markdown(description = "The name of the truststore file ambari uses to store trusted certificates. Located in `security.server.keys_dir`")
   public static final ConfigurationProperty<String> TSTR_NAME = new ConfigurationProperty<>(
@@ -1652,7 +1659,7 @@
       "ssl.trustStore.password", null);
 
   /**
-   * The type of truststore used by the {@link JAVAX_SSL_TRUSTSTORE_TYPE} property.
+   * The type of truststore used by the {@link #JAVAX_SSL_TRUSTSTORE_TYPE} property.
    */
   @Markdown(description = "The type of truststore used by the `javax.net.ssl.trustStoreType` property.")
   public static final ConfigurationProperty<String> SSL_TRUSTSTORE_TYPE = new ConfigurationProperty<>(
@@ -2770,7 +2777,7 @@
      * Constructor.
      *
      */
-    private DatabaseType(String databaseType) {
+    DatabaseType(String databaseType) {
       m_databaseType = databaseType;
     }
 
@@ -2805,7 +2812,7 @@
      *
      * @param name
      */
-    private ConnectionPoolType(String name) {
+    ConnectionPoolType(String name) {
       m_name = name;
     }
 
@@ -3296,6 +3303,15 @@
   }
 
   /**
+   * Determines whether the view directory watcher service should be disabled
+   *
+   * @return true view directory watcher service should be disabled
+   */
+  public boolean isViewDirectoryWatcherServiceDisabled() {
+    return Boolean.parseBoolean(getProperty(DISABLE_VIEW_DIRECTORY_WATCHER));
+  }
+
+  /**
    * @return conventional Java version number, e.g. 7.
    * Integer is used here to simplify comparisons during usage.
    * If java version is not supported, returns -1
@@ -4411,7 +4427,7 @@
    * Caching of host role command status summary can be enabled/disabled
    * through the {@link #SERVER_HRC_STATUS_SUMMARY_CACHE_ENABLED} config property.
    * This method returns the value of {@link #SERVER_HRC_STATUS_SUMMARY_CACHE_ENABLED}
-   * config property. If this config property is not defined than returns the default defined by {@link #SERVER_HRC_STATUS_SUMMARY_CACHE_ENABLED_DEFAULT}.
+   * config property.
    * @return true if caching is to be enabled otherwise false.
    */
   public boolean getHostRoleCommandStatusSummaryCacheEnabled() {
@@ -4433,8 +4449,7 @@
    * In order to avoid the cache storing host role command status summary objects exhaust
    * memory we set a max record number allowed for the cache. This limit can be configured
    * through {@link #SERVER_HRC_STATUS_SUMMARY_CACHE_SIZE} config property. The method returns
-   * the value of this config property. If this config property is not defined than
-   * the default value specified by {@link #SERVER_HRC_STATUS_SUMMARY_CACHE_SIZE_DEFAULT} is returned.
+   * the value of this config property.
    * @return the upper limit for the number of cached host role command summaries.
    */
   public long getHostRoleCommandStatusSummaryCacheSize() {
@@ -4455,8 +4470,7 @@
   /**
    * As a safety measure the cache storing host role command status summaries should auto expire after a while.
    * The expiry duration is specified through the {@link #SERVER_HRC_STATUS_SUMMARY_CACHE_EXPIRY_DURATION} config property
-   * expressed in minutes. The method returns the value of this config property. If this config property is not defined than
-   * the default value specified by {@link #SERVER_HRC_STATUS_SUMMARY_CACHE_EXPIRY_DURATION_DEFAULT}
+   * expressed in minutes. The method returns the value of this config property.
    * @return the cache expiry duration in minutes
    */
   public long getHostRoleCommandStatusSummaryCacheExpiryDuration() {
@@ -4757,7 +4771,7 @@
 
   /**
    * Get property-providers' timeout value in milliseconds for waiting on the
-   * completion of submitted {@link Callable}s. This will return {@value 5000}
+   * completion of submitted {@link Callable}s. This will return 5000
    * if not specified.
    *
    * @return the property-providers' completion srevice timeout, in millis.
@@ -5007,8 +5021,6 @@
   /**
    * Gets the minimum number of connections that should always exist in the
    * connection pool.
-   *
-   * @return default of {@value #SERVER_JDBC_CONNECTION_POOL_MIN_SIZE}
    */
   public int getConnectionPoolMinimumSize() {
     return Integer.parseInt(getProperty(SERVER_JDBC_CONNECTION_POOL_MIN_SIZE));
@@ -5017,8 +5029,6 @@
   /**
    * Gets the maximum number of connections that should even exist in the
    * connection pool.
-   *
-   * @return default of {@value #SERVER_JDBC_CONNECTION_POOL_MAX_SIZE}
    */
   public int getConnectionPoolMaximumSize() {
     return Integer.parseInt(getProperty(SERVER_JDBC_CONNECTION_POOL_MAX_SIZE));
@@ -5028,8 +5038,6 @@
    * Gets the maximum amount of time in seconds any connection, whether its been
    * idle or active, should even be in the pool. This will terminate the
    * connection after the expiration age and force new connections to be opened.
-   *
-   * @return default of {@value #SERVER_JDBC_CONNECTION_POOL_MAX_AGE}
    */
   public int getConnectionPoolMaximumAge() {
     return Integer.parseInt(getProperty(SERVER_JDBC_CONNECTION_POOL_MAX_AGE));
@@ -5039,8 +5047,6 @@
    * Gets the maximum amount of time in seconds that an idle connection can
    * remain in the pool. This should always be greater than the value returned
    * from {@link #getConnectionPoolMaximumExcessIdle()}
-   *
-   * @return default of {@value #SERVER_JDBC_CONNECTION_POOL_MAX_IDLE_TIME}
    */
   public int getConnectionPoolMaximumIdle() {
     return Integer.parseInt(getProperty(SERVER_JDBC_CONNECTION_POOL_MAX_IDLE_TIME));
@@ -5050,9 +5056,6 @@
    * Gets the maximum amount of time in seconds that connections beyond the
    * minimum pool size should remain in the pool. This should always be less
    * than than the value returned from {@link #getConnectionPoolMaximumIdle()}
-   *
-   * @return default of
-   *         {@value #SERVER_JDBC_CONNECTION_POOL_MAX_IDLE_TIME_EXCESS}
    */
   public int getConnectionPoolMaximumExcessIdle() {
     return Integer.parseInt(getProperty(SERVER_JDBC_CONNECTION_POOL_MAX_IDLE_TIME_EXCESS));
@@ -5062,8 +5065,6 @@
    * Gets the number of connections that should be retrieved when the pool size
    * must increase. It's wise to set this higher than 1 since the assumption is
    * that a pool that needs to grow should probably grow by more than 1.
-   *
-   * @return default of {@value #SERVER_JDBC_CONNECTION_POOL_AQUISITION_SIZE}
    */
   public int getConnectionPoolAcquisitionSize() {
     return Integer.parseInt(getProperty(SERVER_JDBC_CONNECTION_POOL_AQUISITION_SIZE));
@@ -5072,9 +5073,6 @@
   /**
    * Gets the number of times connections should be retried to be acquired from
    * the database before giving up.
-   *
-   * @return default of
-   *         {@value #SERVER_JDBC_CONNECTION_POOL_ACQUISITION_RETRY_ATTEMPTS}
    */
   public int getConnectionPoolAcquisitionRetryAttempts() {
     return Integer.parseInt(getProperty(SERVER_JDBC_CONNECTION_POOL_ACQUISITION_RETRY_ATTEMPTS));
@@ -5082,8 +5080,6 @@
 
   /**
    * Gets the delay in milliseconds between connection acquire attempts.
-   *
-   * @return default of {@value #DEFAULT_JDBC_POOL_ACQUISITION_RETRY_DELAY}
    */
   public int getConnectionPoolAcquisitionRetryDelay() {
     return Integer.parseInt(getProperty(SERVER_JDBC_CONNECTION_POOL_ACQUISITION_RETRY_DELAY));
@@ -5093,8 +5089,6 @@
   /**
    * Gets the number of seconds in between testing each idle connection in the
    * connection pool for validity.
-   *
-   * @return default of {@value #SERVER_JDBC_CONNECTION_POOL_IDLE_TEST_INTERVAL}
    */
   public int getConnectionPoolIdleTestInternval() {
     return Integer.parseInt(getProperty(SERVER_JDBC_CONNECTION_POOL_IDLE_TEST_INTERVAL));
@@ -5164,6 +5158,11 @@
     return Boolean.parseBoolean(getProperty(TIMELINE_METRICS_CACHE_DISABLE));
   }
 
+  /** @see #AMBARISERVER_METRICS_DISABLE */
+  public boolean isMetricsServiceDisabled() {
+    return Boolean.parseBoolean(getProperty(AMBARISERVER_METRICS_DISABLE));
+  }
+
   /**
    * Constant fudge factor subtracted from the cache update requests to
    * account for unavailability of data on the trailing edge due to buffering.
@@ -5279,9 +5278,6 @@
   /**
    * Gets the interval at which cached alert data is written out to the
    * database, if enabled.
-   *
-   * @return the cache flush interval, or
-   *         {@value #ALERTS_CACHE_FLUSH_INTERVAL_DEFAULT} if not set.
    */
   @Experimental(feature = ExperimentalFeature.ALERT_CACHING)
   public int getAlertCacheFlushInterval() {
@@ -5290,9 +5286,6 @@
 
   /**
    * Gets the size of the alerts cache, if enabled.
-   *
-   * @return the cache flush interval, or {@value #ALERTS_CACHE_SIZE_DEFAULT} if
-   *         not set.
    */
   @Experimental(feature = ExperimentalFeature.ALERT_CACHING)
   public int getAlertCacheSize() {
@@ -5435,10 +5428,6 @@
 
   /**
    * Gets the core pool size used for the {@link MetricsRetrievalService}.
-   *
-   * @return the core pool size or
-   *         {@value #PROCESSOR_BASED_THREADPOOL_MAX_SIZE_DEFAULT} if not
-   *         specified.
    */
   public int getMetricsServiceThreadPoolCoreSize() {
     return Integer.parseInt(getProperty(METRIC_RETRIEVAL_SERVICE_THREADPOOL_CORE_SIZE));
@@ -5447,11 +5436,7 @@
   /**
    * Gets the max pool size used for the {@link MetricsRetrievalService}.
    * Threads will only be increased up to this value of the worker queue is
-   * exhauseted and rejects the new task.
-   *
-   * @return the max pool size, or
-   *         {@value PROCESSOR_BASED_THREADPOOL_MAX_SIZE_DEFAULT} if not
-   *         specified.
+   * exhausted and rejects the new task.
    * @see #getMetricsServiceWorkerQueueSize()
    */
   public int getMetricsServiceThreadPoolMaxSize() {
@@ -5914,7 +5899,7 @@
    */
   @Retention(RetentionPolicy.RUNTIME)
   @Target({ ElementType.TYPE, ElementType.FIELD, ElementType.METHOD })
-  static @interface ConfigurationMarkdown {
+  @interface ConfigurationMarkdown {
     /**
      * The base Markdown.
      *
@@ -5944,7 +5929,7 @@
    */
   @Retention(RetentionPolicy.RUNTIME)
   @Target({ ElementType.TYPE, ElementType.FIELD, ElementType.METHOD })
-  private static @interface ClusterScale {
+  private @interface ClusterScale {
     ClusterSizeType clusterSize();
     String value();
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index d5018f5..520dcab 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@ -75,10 +75,12 @@
 import org.apache.ambari.server.metadata.ActionMetadata;
 import org.apache.ambari.server.orm.dao.ClusterVersionDAO;
 import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
+import org.apache.ambari.server.orm.dao.RequestDAO;
 import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
 import org.apache.ambari.server.orm.entities.OperatingSystemEntity;
 import org.apache.ambari.server.orm.entities.RepositoryEntity;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.orm.entities.RequestEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.CommandScriptDefinition;
@@ -176,6 +178,9 @@
   private ClusterVersionDAO clusterVersionDAO;
 
   @Inject
+  private RequestDAO requestDAO;
+
+  @Inject
   private HostRoleCommandDAO hostRoleCommandDAO;
 
   private Map<String, Map<String, Map<String, String>>> configCredentialsForService = new HashMap<>();
@@ -374,6 +379,9 @@
       if (cmd != null) {
         cmd.setCommandDetail(commandDetail);
         cmd.setCustomCommandName(commandName);
+        if (customCommandDefinition != null){
+          cmd.setOpsDisplayName(customCommandDefinition.getOpsDisplayName());
+        }
       }
 
       //set type background
@@ -441,7 +449,7 @@
       }
 
       boolean isInstallCommand = commandName.equals(RoleCommand.INSTALL.toString());
-      String commandTimeout = configs.getDefaultAgentTaskTimeout(isInstallCommand);
+      int commandTimeout = Short.valueOf(configs.getDefaultAgentTaskTimeout(isInstallCommand)).intValue();
 
       ComponentInfo componentInfo = ambariMetaInfo.getComponent(
           stackId.getStackName(), stackId.getStackVersion(),
@@ -455,7 +463,7 @@
           commandParams.put(SCRIPT, script.getScript());
           commandParams.put(SCRIPT_TYPE, script.getScriptType().toString());
           if (script.getTimeout() > 0) {
-            commandTimeout = String.valueOf(script.getTimeout());
+            commandTimeout = script.getTimeout();
           }
         } else {
           String message = String.format("Component %s has not command script " +
@@ -466,7 +474,13 @@
         // We don't need package/repo information to perform service check
       }
 
-      commandParams.put(COMMAND_TIMEOUT, commandTimeout);
+      // !!! the action execution context timeout is the final say, but make sure it's at least 60 seconds
+      if (null != actionExecutionContext.getTimeout()) {
+        commandTimeout = actionExecutionContext.getTimeout().intValue();
+        commandTimeout = Math.max(60, commandTimeout);
+      }
+
+      commandParams.put(COMMAND_TIMEOUT, "" + commandTimeout);
       commandParams.put(SERVICE_PACKAGE_FOLDER, serviceInfo.getServicePackageFolder());
       commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
 
@@ -1005,7 +1019,12 @@
           StageUtils.getClusterHostInfo(cluster));
 
       // Reset cluster host info as it has changed
-      stage.setClusterHostInfo(clusterHostInfoJson);
+      RequestEntity requestEntity = requestDAO.findByPK(stage.getRequestId());
+
+      if (requestEntity != null) {
+        requestEntity.setClusterHostInfo(clusterHostInfoJson);
+        requestDAO.merge(requestEntity);
+      }
 
       Map<String, String> commandParams = new HashMap<>();
       if (serviceName.equals(Service.Type.HBASE.name())) {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
index 91bfe09..96bab85 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
@@ -365,28 +365,28 @@
    *
    * @throws AmbariException if we fail to link the extension to the stack
    */
-  public void createExtensionLink(ExtensionLinkRequest request) throws AmbariException;
+  void createExtensionLink(ExtensionLinkRequest request) throws AmbariException;
 
   /**
    * Update a link between an extension and a stack
    *
    * @throws AmbariException if we fail to link the extension to the stack
    */
-  public void updateExtensionLink(ExtensionLinkRequest request) throws AmbariException;
+  void updateExtensionLink(ExtensionLinkRequest request) throws AmbariException;
 
   /**
    * Update a link between an extension and a stack
    *
    * @throws AmbariException if we fail to link the extension to the stack
    */
-  public void updateExtensionLink(ExtensionLinkEntity linkEntity) throws AmbariException;
+  void updateExtensionLink(ExtensionLinkEntity linkEntity) throws AmbariException;
 
   /**
    * Delete a link between an extension and a stack
    *
    * @throws AmbariException if we fail to unlink the extension from the stack
    */
-  public void deleteExtensionLink(ExtensionLinkRequest request) throws AmbariException;
+  void deleteExtensionLink(ExtensionLinkRequest request) throws AmbariException;
 
   /**
    * Get supported extensions.
@@ -395,7 +395,7 @@
    * @return a set of extensions responses
    * @throws  AmbariException if the resources cannot be read
    */
-  public Set<ExtensionResponse> getExtensions(Set<ExtensionRequest> requests) throws AmbariException;
+  Set<ExtensionResponse> getExtensions(Set<ExtensionRequest> requests) throws AmbariException;
 
   /**
    * Get supported extension versions.
@@ -404,7 +404,7 @@
    * @return a set of extension versions responses
    * @throws  AmbariException if the resources cannot be read
    */
-  public Set<ExtensionVersionResponse> getExtensionVersions(Set<ExtensionVersionRequest> requests) throws AmbariException;
+  Set<ExtensionVersionResponse> getExtensionVersions(Set<ExtensionVersionRequest> requests) throws AmbariException;
 
   /**
    * Get supported stacks versions.
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 27bbd6c..9bc7f4a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -120,14 +120,12 @@
 import org.apache.ambari.server.orm.dao.WidgetLayoutDAO;
 import org.apache.ambari.server.orm.entities.ClusterEntity;
 import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
-import org.apache.ambari.server.orm.entities.ExtensionEntity;
 import org.apache.ambari.server.orm.entities.ExtensionLinkEntity;
 import org.apache.ambari.server.orm.entities.HostEntity;
 import org.apache.ambari.server.orm.entities.OperatingSystemEntity;
 import org.apache.ambari.server.orm.entities.RepositoryEntity;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.orm.entities.SettingEntity;
-import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.orm.entities.WidgetEntity;
 import org.apache.ambari.server.orm.entities.WidgetLayoutEntity;
 import org.apache.ambari.server.orm.entities.WidgetLayoutUserWidgetEntity;
@@ -309,11 +307,13 @@
 
   private MaintenanceStateHelper maintenanceStateHelper;
 
-  @Inject
-  private ExtensionLinkDAO linkDAO;
+  private AmbariManagementHelper helper;
+
   @Inject
   private ExtensionDAO extensionDAO;
   @Inject
+  private ExtensionLinkDAO linkDAO;
+  @Inject
   private StackDAO stackDAO;
 
   /**
@@ -392,6 +392,7 @@
       mysqljdbcUrl = null;
       serverDB = null;
     }
+    helper = new AmbariManagementHelper(stackDAO, extensionDAO, linkDAO);
   }
 
   @Override
@@ -1041,14 +1042,14 @@
   }
 
   private Stage createNewStage(long id, Cluster cluster, long requestId,
-                               String requestContext, String clusterHostInfo,
+                               String requestContext,
                                String commandParamsStage, String hostParamsStage) {
     String logDir = BASE_LOG_DIR + File.pathSeparator + requestId;
     Stage stage =
         stageFactory.createNew(requestId, logDir,
           null == cluster ? null : cluster.getClusterName(),
           null == cluster ? -1L : cluster.getClusterId(),
-          requestContext, clusterHostInfo, commandParamsStage,
+          requestContext, commandParamsStage,
           hostParamsStage);
     stage.setStageId(id);
     return stage;
@@ -2630,8 +2631,7 @@
           customCommandExecutionHelper.createDefaultHostParams(cluster));
 
       Stage stage = createNewStage(requestStages.getLastStageId(), cluster,
-          requestStages.getId(), requestProperties.get(REQUEST_CONTEXT_PROPERTY),
-          clusterHostInfoJson, "{}", hostParamsJson);
+          requestStages.getId(), requestProperties.get(REQUEST_CONTEXT_PROPERTY),"{}", hostParamsJson);
       boolean skipFailure = false;
       if (requestProperties.containsKey(Setting.SETTING_NAME_SKIP_FAILURE) && requestProperties.get(Setting.SETTING_NAME_SKIP_FAILURE).equalsIgnoreCase("true")) {
         skipFailure = true;
@@ -2981,6 +2981,7 @@
         rg.setCommandExecutionType(CommandExecutionType.DEPENDENCY_ORDERED);
       }
       rg.build(stage);
+      requestStages.setClusterHostInfo(clusterHostInfoJson);
       requestStages.addStages(rg.getStages());
 
       if (!componentsToEnableKerberos.isEmpty()) {
@@ -3066,9 +3067,7 @@
     Map<String, Set<String>> clusterHostInfo = StageUtils.getClusterHostInfo(cluster);
     String clusterHostInfoJson = StageUtils.getGson().toJson(clusterHostInfo);
     Map<String, String> hostParamsCmd = customCommandExecutionHelper.createDefaultHostParams(cluster);
-    Stage stage = createNewStage(0, cluster,
-                                 1, "",
-                                 clusterHostInfoJson, "{}", "");
+    Stage stage = createNewStage(0, cluster,1, "","{}", "");
 
 
     Map<String, Map<String, String>> configTags = configHelper.getEffectiveDesiredTags(cluster, scHost.getHostName());
@@ -4036,7 +4035,7 @@
     commandParamsForStage = gson.toJson(commandParamsStage);
 
     Stage stage = createNewStage(requestStageContainer.getLastStageId(), cluster, requestId, requestContext,
-        jsons.getClusterHostInfo(), commandParamsForStage, jsons.getHostParamsForStage());
+        commandParamsForStage, jsons.getHostParamsForStage());
 
     if (actionRequest.isCommand()) {
       customCommandExecutionHelper.addExecutionCommandsToStage(actionExecContext, stage,
@@ -4057,6 +4056,7 @@
     List<Stage> stages = rg.getStages();
 
     if (stages != null && !stages.isEmpty()) {
+      requestStageContainer.setClusterHostInfo(jsons.getClusterHostInfo());
       requestStageContainer.addStages(stages);
     }
 
@@ -5448,7 +5448,13 @@
    */
   @Override
   public void createExtensionLink(ExtensionLinkRequest request) throws AmbariException {
-    validateCreateExtensionLinkRequest(request);
+    if (StringUtils.isBlank(request.getStackName())
+            || StringUtils.isBlank(request.getStackVersion())
+            || StringUtils.isBlank(request.getExtensionName())
+            || StringUtils.isBlank(request.getExtensionVersion())) {
+
+      throw new IllegalArgumentException("Stack name, stack version, extension name and extension version should be provided");
+    }
 
     StackInfo stackInfo = ambariMetaInfo.getStack(request.getStackName(), request.getStackVersion());
 
@@ -5462,24 +5468,7 @@
       throw new StackAccessException("extensionName=" + request.getExtensionName() + ", extensionVersion=" + request.getExtensionVersion());
     }
 
-    ExtensionHelper.validateCreateLink(stackInfo, extensionInfo);
-    ExtensionLinkEntity linkEntity = createExtensionLinkEntity(request);
-    ambariMetaInfo.getStackManager().linkStackToExtension(stackInfo, extensionInfo);
-
-    try {
-      linkDAO.create(linkEntity);
-      linkEntity = linkDAO.merge(linkEntity);
-    } catch (RollbackException e) {
-      String message = "Unable to create extension link";
-      LOG.debug(message, e);
-      String errorMessage = message
-              + ", stackName=" + request.getStackName()
-              + ", stackVersion=" + request.getStackVersion()
-              + ", extensionName=" + request.getExtensionName()
-              + ", extensionVersion=" + request.getExtensionVersion();
-      LOG.warn(errorMessage);
-      throw new AmbariException(errorMessage, e);
-    }
+    helper.createExtensionLink(ambariMetaInfo.getStackManager(), stackInfo, extensionInfo);
   }
 
   /**
@@ -5530,37 +5519,6 @@
     ambariMetaInfo.getStackManager().linkStackToExtension(stackInfo, extensionInfo);
   }
 
-  private void validateCreateExtensionLinkRequest(ExtensionLinkRequest request) throws AmbariException {
-    if (request.getStackName() == null
-            || request.getStackVersion() == null
-            || request.getExtensionName() == null
-            || request.getExtensionVersion() == null) {
-
-      throw new IllegalArgumentException("Stack name, stack version, extension name and extension version should be provided");
-    }
-
-    ExtensionLinkEntity entity = linkDAO.findByStackAndExtension(request.getStackName(), request.getStackVersion(),
-            request.getExtensionName(), request.getExtensionVersion());
-
-    if (entity != null) {
-      throw new AmbariException("The stack and extension are already linked"
-                + ", stackName=" + request.getStackName()
-                + ", stackVersion=" + request.getStackVersion()
-                + ", extensionName=" + request.getExtensionName()
-                + ", extensionVersion=" + request.getExtensionVersion());
-    }
-  }
-
-  private ExtensionLinkEntity createExtensionLinkEntity(ExtensionLinkRequest request) throws AmbariException {
-    StackEntity stack = stackDAO.find(request.getStackName(), request.getStackVersion());
-    ExtensionEntity extension = extensionDAO.find(request.getExtensionName(), request.getExtensionVersion());
-
-    ExtensionLinkEntity linkEntity = new ExtensionLinkEntity();
-    linkEntity.setStack(stack);
-    linkEntity.setExtension(extension);
-    return linkEntity;
-  }
-
   @Override
   public QuickLinkVisibilityController getQuicklinkVisibilityController() {
     SettingEntity entity = settingDAO.findByName(QuickLinksProfile.SETTING_NAME_QUICKLINKS_PROFILE);
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
new file mode 100644
index 0000000..2dd6f12
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import javax.persistence.RollbackException;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.orm.dao.ExtensionDAO;
+import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
+import org.apache.ambari.server.orm.dao.StackDAO;
+import org.apache.ambari.server.orm.entities.ExtensionEntity;
+import org.apache.ambari.server.orm.entities.ExtensionLinkEntity;
+import org.apache.ambari.server.orm.entities.StackEntity;
+import org.apache.ambari.server.stack.ExtensionHelper;
+import org.apache.ambari.server.stack.StackManager;
+import org.apache.ambari.server.state.ExtensionInfo;
+import org.apache.ambari.server.state.StackInfo;
+import org.apache.ambari.server.state.stack.ExtensionMetainfoXml;
+import org.apache.ambari.server.utils.VersionUtils;
+import org.apache.commons.lang.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+
+@Singleton
+public class AmbariManagementHelper {
+
+  private final static Logger LOG =
+      LoggerFactory.getLogger(AmbariManagementHelper.class);
+
+  private ExtensionLinkDAO linkDAO;
+  private ExtensionDAO extensionDAO;
+  private StackDAO stackDAO;
+
+  @Inject
+  public AmbariManagementHelper(StackDAO stackDAO, ExtensionDAO extensionDAO, ExtensionLinkDAO linkDAO) {
+    this.stackDAO = stackDAO;
+    this.extensionDAO = extensionDAO;
+    this.linkDAO = linkDAO;
+  }
+
+  /**
+   * This method will create a link between an extension version and a stack version (Extension Link).
+   *
+   * An extension version is like a stack version but it contains custom services.  Linking an extension
+   * version to the current stack version allows the cluster to install the custom services contained in
+   * the extension version.
+   */
+  public void createExtensionLink(StackManager stackManager, StackInfo stackInfo, ExtensionInfo extensionInfo) throws AmbariException {
+    validateCreateExtensionLinkRequest(stackInfo, extensionInfo);
+    ExtensionHelper.validateCreateLink(stackInfo, extensionInfo);
+    ExtensionLinkEntity linkEntity = createExtensionLinkEntity(stackInfo, extensionInfo);
+    stackManager.linkStackToExtension(stackInfo, extensionInfo);
+
+    try {
+      linkDAO.create(linkEntity);
+      linkEntity = linkDAO.merge(linkEntity);
+    } catch (RollbackException e) {
+      String message = "Unable to create extension link";
+      LOG.debug(message, e);
+      String errorMessage = message
+              + ", stackName=" + stackInfo.getName()
+              + ", stackVersion=" + stackInfo.getVersion()
+              + ", extensionName=" + extensionInfo.getName()
+              + ", extensionVersion=" + extensionInfo.getVersion();
+      LOG.warn(errorMessage);
+      throw new AmbariException(errorMessage, e);
+    }
+  }
+
+  /**
+   * This method will create a link between an extension version and a stack version (Extension Link).
+   *
+   * An extension version is like a stack version but it contains custom services.  Linking an extension
+   * version to the current stack version allows the cluster to install the custom services contained in
+   * the extension version.
+   */
+  public void createExtensionLinks(StackManager stackManager, List<ExtensionInfo> extensions) throws AmbariException {
+    Map<String, List<StackInfo>> stackMap = stackManager.getStacksByName();
+    for (List<StackInfo> stacks : stackMap.values()) {
+      Collections.sort(stacks);
+      Collections.reverse(stacks);
+    }
+
+    Collections.sort(extensions);
+    Collections.reverse(extensions);
+    for (ExtensionInfo extension : extensions) {
+      if (extension.isActive() && extension.isAutoLink()) {
+        LOG.debug("Autolink - looking for matching stack versions for extension:{}/{} ", extension.getName(), extension.getVersion());
+        for (ExtensionMetainfoXml.Stack supportedStack : extension.getStacks()) {
+          List<StackInfo> stacks = stackMap.get(supportedStack.getName());
+          for (StackInfo stack : stacks) {
+            // If the stack version is not currently linked to a version of the extension and it meets the minimum stack version then link them
+            if (stack.getExtension(extension.getName()) == null && VersionUtils.compareVersions(stack.getVersion(), supportedStack.getVersion()) > -1) {
+              LOG.debug("Autolink - extension: {}/{} stack: {}/{}", extension.getName(), extension.getVersion(),
+                       stack.getName(), stack.getVersion());
+              createExtensionLink(stackManager, stack, extension);
+            }
+            else {
+              LOG.debug("Autolink - not a match extension: {}/{} stack: {}/{}", extension.getName(), extension.getVersion(),
+                       stack.getName(), stack.getVersion());
+            }
+          }
+        }
+      }
+      else {
+        LOG.debug("Autolink - skipping extension: {}/{}.  It is either not active or set to autolink.", extension.getName(), extension.getVersion());
+      }
+    }
+  }
+
+  /**
+   * Validates the stackInfo and extensionInfo parameters are valid.
+   * If they are then it confirms that the stack and extension are not already linked.
+   */
+  private void validateCreateExtensionLinkRequest(StackInfo stackInfo, ExtensionInfo extensionInfo) throws AmbariException {
+    if (stackInfo == null) {
+      throw new IllegalArgumentException("Stack should be provided");
+    }
+    if (extensionInfo == null) {
+      throw new IllegalArgumentException("Extension should be provided");
+    }
+    if (StringUtils.isBlank(stackInfo.getName())
+            || StringUtils.isBlank(stackInfo.getVersion())
+            || StringUtils.isBlank(extensionInfo.getName())
+            || StringUtils.isBlank(extensionInfo.getVersion())) {
+
+      throw new IllegalArgumentException("Stack name, stack version, extension name and extension version should be provided");
+    }
+
+    ExtensionLinkEntity entity = linkDAO.findByStackAndExtension(stackInfo.getName(), stackInfo.getVersion(),
+		extensionInfo.getName(), extensionInfo.getVersion());
+
+    if (entity != null) {
+      throw new AmbariException("The stack and extension are already linked"
+                + ", stackName=" + stackInfo.getName()
+                + ", stackVersion=" + stackInfo.getVersion()
+                + ", extensionName=" + extensionInfo.getName()
+                + ", extensionVersion=" + extensionInfo.getVersion());
+    }
+  }
+
+  private ExtensionLinkEntity createExtensionLinkEntity(StackInfo stackInfo, ExtensionInfo extensionInfo) throws AmbariException {
+    StackEntity stack = stackDAO.find(stackInfo.getName(), stackInfo.getVersion());
+    ExtensionEntity extension = extensionDAO.find(extensionInfo.getName(), extensionInfo.getVersion());
+
+    ExtensionLinkEntity linkEntity = new ExtensionLinkEntity();
+    linkEntity.setStack(stack);
+    linkEntity.setExtension(extension);
+    return linkEntity;
+  }
+
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
index 4e7af0ce..2f799b7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
@@ -310,6 +310,7 @@
     initDB();
     server = new Server();
     server.setSessionIdManager(sessionIdManager);
+    server.setSendServerVersion(false);
     Server serverForAgent = new Server();
 
     setSystemProperties(configs);
@@ -454,7 +455,6 @@
       SecurityContextHolder.setStrategyName(SecurityContextHolder.MODE_INHERITABLETHREADLOCAL);
 
       viewRegistry.readViewArchives();
-      viewDirectoryWatcher.start();
 
       //Check and load requestlog handler.
       loadRequestlogHandler(handlerList, serverForAgent, configsMap);
@@ -562,6 +562,11 @@
       serverForAgent.start();
       LOG.info("********* Started Server **********");
 
+      if( !configs.isViewDirectoryWatcherServiceDisabled()) {
+        LOG.info("Starting View Directory Watcher");
+        viewDirectoryWatcher.start();
+      }
+
       manager.start();
       LOG.info("********* Started ActionManager **********");
 
@@ -571,7 +576,7 @@
       serviceManager.startAsync();
       LOG.info("********* Started Services **********");
 
-      if (!Configuration.AMBARISERVER_METRICS_DISABLE.equals(true)) {
+      if (!configs.isMetricsServiceDisabled()) {
         metricsService.start();
       } else {
         LOG.info("AmbariServer Metrics disabled.");
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
index d000571..ca2dda5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
@@ -680,6 +680,6 @@
      * @return the response
      * @throws AmbariException thrown if a problem occurred during invocation
      */
-    public T invoke(A arg) throws AmbariException;
+    T invoke(A arg) throws AmbariException;
   }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
index 6687942..5c4728a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
@@ -2186,14 +2186,13 @@
    * @return a newly created Stage
    */
   private Stage createNewStage(long id, Cluster cluster, long requestId,
-                               String requestContext, String clusterHostInfo,
-                               String commandParams, String hostParams) {
+                               String requestContext, String commandParams, String hostParams) {
+
     Stage stage = stageFactory.createNew(requestId,
         BASE_LOG_DIR + File.pathSeparator + requestId,
         cluster.getClusterName(),
         cluster.getClusterId(),
         requestContext,
-        clusterHostInfo,
         commandParams,
         hostParams);
 
@@ -2221,14 +2220,14 @@
    * @param timeout           the timeout for the task/action  @return a newly created Stage
    */
   private Stage createServerActionStage(long id, Cluster cluster, long requestId,
-                                        String requestContext, String clusterHostInfo,
+                                        String requestContext,
                                         String commandParams, String hostParams,
                                         Class<? extends ServerAction> actionClass,
                                         ServiceComponentHostServerActionEvent event,
                                         Map<String, String> commandParameters, String commandDetail,
                                         Integer timeout) throws AmbariException {
 
-    Stage stage = createNewStage(id, cluster, requestId, requestContext, clusterHostInfo, commandParams, hostParams);
+    Stage stage = createNewStage(id, cluster, requestId, requestContext,  commandParams, hostParams);
     stage.addServerActionCommand(actionClass.getName(), null, Role.AMBARI_SERVER_ACTION,
         RoleCommand.EXECUTE, cluster.getClusterName(), event, commandParameters, commandDetail,
         ambariManagementController.findConfigurationTagsWithOverrides(cluster, null), timeout,
@@ -2769,7 +2768,6 @@
           cluster,
           requestStageContainer.getId(),
           "Preparing Operations",
-          clusterHostInfoJson,
           "{}",
           hostParamsJson,
           PrepareEnableKerberosServerAction.class,
@@ -2780,6 +2778,8 @@
 
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -2792,7 +2792,6 @@
           cluster,
           requestStageContainer.getId(),
           "Preparing Operations",
-          clusterHostInfoJson,
           "{}",
           hostParamsJson,
           PrepareKerberosIdentitiesServerAction.class,
@@ -2803,6 +2802,8 @@
 
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -2815,7 +2816,6 @@
           cluster,
           requestStageContainer.getId(),
           "Preparing Operations",
-          clusterHostInfoJson,
           "{}",
           hostParamsJson,
           PrepareDisableKerberosServerAction.class,
@@ -2826,6 +2826,8 @@
 
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -2838,7 +2840,6 @@
           cluster,
           requestStageContainer.getId(),
           "Create Principals",
-          clusterHostInfoJson,
           "{}",
           hostParamsJson,
           CreatePrincipalsServerAction.class,
@@ -2849,6 +2850,8 @@
 
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -2861,7 +2864,6 @@
           cluster,
           requestStageContainer.getId(),
           "Destroy Principals",
-          clusterHostInfoJson,
           "{}",
           hostParamsJson,
           DestroyPrincipalsServerAction.class,
@@ -2872,6 +2874,8 @@
 
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -2884,7 +2888,6 @@
           cluster,
           requestStageContainer.getId(),
           "Configure Ambari Identity",
-          clusterHostInfoJson,
           "{}",
           hostParamsJson,
           ConfigureAmbariIdentitiesServerAction.class,
@@ -2895,6 +2898,8 @@
 
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -2907,7 +2912,6 @@
           cluster,
           requestStageContainer.getId(),
           "Create Keytabs",
-          clusterHostInfoJson,
           "{}",
           hostParamsJson,
           CreateKeytabFilesServerAction.class,
@@ -2918,6 +2922,8 @@
 
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -2933,7 +2939,6 @@
           cluster,
           requestStageContainer.getId(),
           "Distribute Keytabs",
-          clusterHostInfoJson,
           StageUtils.getGson().toJson(commandParameters),
           hostParamsJson);
 
@@ -2958,6 +2963,8 @@
 
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -2999,12 +3006,13 @@
         cluster,
         requestStageContainer.getId(),
         "Disable security",
-        clusterHostInfoJson,
         StageUtils.getGson().toJson(commandParameters),
         hostParamsJson);
       addDisableSecurityCommandToAllServices(cluster, stage);
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -3042,7 +3050,6 @@
         cluster,
         requestStageContainer.getId(),
         "Stopping ZooKeeper",
-        clusterHostInfoJson,
         StageUtils.getGson().toJson(commandParameters),
         hostParamsJson);
       for (ServiceComponent component : zookeeper.getServiceComponents().values()) {
@@ -3056,6 +3063,8 @@
       }
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -3071,7 +3080,6 @@
           cluster,
           requestStageContainer.getId(),
           "Delete Keytabs",
-          clusterHostInfoJson,
           StageUtils.getGson().toJson(commandParameters),
           hostParamsJson);
 
@@ -3099,6 +3107,8 @@
 
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -3111,7 +3121,6 @@
           cluster,
           requestStageContainer.getId(),
           "Update Configurations",
-          clusterHostInfoJson,
           "{}",
           hostParamsJson,
           UpdateKerberosConfigsServerAction.class,
@@ -3122,6 +3131,8 @@
 
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -3145,7 +3156,6 @@
           cluster,
           requestStageContainer.getId(),
           "Finalize Operations",
-          clusterHostInfoJson,
           "{}",
           hostParamsJson,
           FinalizeKerberosServerAction.class,
@@ -3155,6 +3165,8 @@
 
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
 
@@ -3167,7 +3179,6 @@
           cluster,
           requestStageContainer.getId(),
           "Kerberization Clean Up",
-          clusterHostInfoJson,
           "{}",
           hostParamsJson,
           CleanupServerAction.class,
@@ -3178,6 +3189,8 @@
 
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
+
+      requestStageContainer.setClusterHostInfo(clusterHostInfoJson);
       requestStageContainer.addStages(roleGraph.getStages());
     }
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/MaintenanceStateHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/MaintenanceStateHelper.java
index f72f476..fd1d957 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/MaintenanceStateHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/MaintenanceStateHelper.java
@@ -435,7 +435,7 @@
    * {@link MaintenanceStateHelper#isOperationAllowed(org.apache.ambari.server.controller.spi.Resource.Type, Service)}
    * methods.
    */
-  public static interface HostPredicate {
+  public interface HostPredicate {
     /**
      * Gets whether the specified host should not be included in a result set.
      *
@@ -445,6 +445,6 @@
      *         choice, {@code false} otherwise.
      * @throws AmbariException
      */
-    public boolean shouldHostBeRemoved(String hostname) throws AmbariException;
+    boolean shouldHostBeRemoved(String hostname) throws AmbariException;
   }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/RequestRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/RequestRequest.java
index 5dfc148..ca9cf4c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/RequestRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/RequestRequest.java
@@ -39,6 +39,8 @@
 
   private String abortReason;
 
+  private boolean removePendingHostRequests = false;
+
 
   @ApiModelProperty(name = "request_status", notes = "Only valid value is ABORTED.")
   public HostRoleStatus getStatus() {
@@ -76,6 +78,14 @@
     this.abortReason = abortReason;
   }
 
+  public boolean isRemovePendingHostRequests() {
+    return removePendingHostRequests;
+  }
+
+  public void setRemovePendingHostRequests(boolean removePendingHostRequests) {
+    this.removePendingHostRequests = removePendingHostRequests;
+  }
+
   @Override
   public String toString() {
     return "RequestRequest{" +
@@ -83,6 +93,7 @@
             ", requestId=" + requestId +
             ", status=" + status +
             ", abortReason='" + abortReason + '\'' +
+            ", removePendingHostRequests='" + removePendingHostRequests + '\'' +
             '}';
   }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentResponse.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentResponse.java
index 9dbda20..16f724f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentResponse.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentResponse.java
@@ -21,27 +21,22 @@
 
 import java.util.Map;
 
+import org.apache.ambari.server.state.RepositoryVersionState;
+
 public class ServiceComponentResponse {
 
   private Long clusterId; // REF
-
   private String clusterName; // REF
-
   private String serviceName;
-
   private String componentName;
-
   private String displayName;
-
   private String desiredStackVersion;
-
   private String desiredState;
-
   private String category;
-
-  Map<String, Integer> serviceComponentStateCount;
-
+  private Map<String, Integer> serviceComponentStateCount;
   private boolean recoveryEnabled;
+  private String desiredVersion;
+  private RepositoryVersionState repoState;
 
   public ServiceComponentResponse(Long clusterId, String clusterName,
                                   String serviceName,
@@ -50,8 +45,9 @@
                                   String desiredState,
                                   Map<String, Integer> serviceComponentStateCount,
                                   boolean recoveryEnabled,
-                                  String displayName) {
-    super();
+                                  String displayName,
+                                  String desiredVersion,
+                                  RepositoryVersionState repoState) {
     this.clusterId = clusterId;
     this.clusterName = clusterName;
     this.serviceName = serviceName;
@@ -61,6 +57,8 @@
     this.desiredState = desiredState;
     this.serviceComponentStateCount = serviceComponentStateCount;
     this.recoveryEnabled = recoveryEnabled;
+    this.desiredVersion = desiredVersion;
+    this.repoState = repoState;
   }
 
   /**
@@ -196,6 +194,21 @@
     this.recoveryEnabled = recoveryEnabled;
   }
 
+  /**
+   * @return the desired version of the component
+   */
+  public String getDesiredVersion() {
+    return desiredVersion;
+  }
+
+  /**
+   * @return the state of the repository against the desired version
+   */
+  public RepositoryVersionState getRepositoryState() {
+    return repoState;
+  }
+
+
   @Override
   public boolean equals(Object o) {
     if (this == o) return true;
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/ClusterDefinition.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/ClusterDefinition.java
deleted file mode 100644
index 487ebff..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/ClusterDefinition.java
+++ /dev/null
@@ -1,434 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.ambari.server.utils.Closeables;
-
-/**
- * Defines the cluster created by gsInstaller.
- */
-public class ClusterDefinition {
-
-  private static final String CLUSTER_DEFINITION_FILE = "gsInstaller-hosts.txt";
-  private static final String DEFAULT_CLUSTER_NAME    = "ambari";
-  private static final String CLUSTER_NAME_TAG        = "CLUSTER=";
-  private static final String DEFAULT_VERSION_ID      = "HDP-1.2.0";
-  private static final String VERSION_ID_TAG          = "VERSION=";
-
-  private final Set<String> services = new HashSet<>();
-  private final Set<String> hosts = new HashSet<>();
-  private final Map<String, Set<String>> components = new HashMap<>();
-  private final Map<String, Map<String, Set<String>>> hostComponents = new HashMap<>();
-
-  private final GSInstallerStateProvider stateProvider;
-  private String clusterName;
-  private String versionId;
-
-  /**
-   * Index of host names to host component state.
-   */
-  private final Map<String, Set<HostComponentState>> hostStateMap = new HashMap<>();
-
-  /**
-   * Index of service names to host component state.
-   */
-  private final Map<String, Set<HostComponentState>> serviceStateMap = new HashMap<>();
-
-  /**
-   * Index of component names to host component state.
-   */
-  private final Map<String, Set<HostComponentState>> componentStateMap = new HashMap<>();
-
-  /**
-   * Index of host component names to host component state.
-   */
-  private final Map<String, HostComponentState> hostComponentStateMap = new HashMap<>();
-
-  /**
-   * Expiry for the health value.
-   */
-  private static final int DEFAULT_STATE_EXPIRY = 15000;
-
-  /**
-   * Component name mapping to account for differences in what is provided by the gsInstaller
-   * and what is expected by the Ambari providers.
-   */
-  private static final Map<String, String> componentNameMap = new HashMap<>();
-
-  static {
-    componentNameMap.put("GANGLIA", "GANGLIA_SERVER");
-  }
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Create a cluster definition.
-   *
-   * @param stateProvider  the state provider
-   */
-  public ClusterDefinition(GSInstallerStateProvider stateProvider) {
-    this(stateProvider, DEFAULT_STATE_EXPIRY);
-  }
-
-  /**
-   * Create a cluster definition.
-   *
-   * @param stateProvider  the state provider
-   * @param stateExpiry    the state expiry
-   */
-  public ClusterDefinition(GSInstallerStateProvider stateProvider, int stateExpiry) {
-    this.stateProvider = stateProvider;
-    this.clusterName   = DEFAULT_CLUSTER_NAME;
-    this.versionId     = DEFAULT_VERSION_ID;
-    readClusterDefinition();
-    setHostComponentState(stateExpiry);
-  }
-
-  // ----- ClusterDefinition -------------------------------------------------
-
-  /**
-   * Get the name of the cluster.
-   *
-   * @return the cluster name
-   */
-  public String getClusterName() {
-    return clusterName;
-  }
-
-  /**
-   * Get the name of the cluster.
-   *
-   * @return the cluster name
-   */
-  public String getVersionId() {
-    return versionId;
-  }
-
-  /**
-   * Get the services for the cluster.
-   *
-   * @return the set of service names
-   */
-  public Set<String> getServices() {
-    return services;
-  }
-
-  /**
-   * Get the hosts for the cluster.
-   *
-   * @return the set of hosts names
-   */
-  public Set<String> getHosts() {
-    return hosts;
-  }
-
-  /**
-   * Get the components for the given service.
-   *
-   * @param service  the service name
-   *
-   * @return the set of component names for the given service name
-   */
-  public Set<String> getComponents(String service) {
-    return components.get(service);
-  }
-
-  /**
-   * Get the host components for the given service and host.
-   *
-   * @param service  the service name
-   * @param host     the host name
-   *
-   * @return the set of host component names for the given service and host names
-   */
-  public Set<String> getHostComponents(String service, String host) {
-    Set<String> resultSet = null;
-    Map<String, Set<String>> serviceHostComponents = hostComponents.get(service);
-    if (serviceHostComponents != null) {
-      resultSet = serviceHostComponents.get(host);
-    }
-    return resultSet == null ? Collections.<String>emptySet() : resultSet;
-  }
-
-  /**
-   * Get the host state from the given host name.
-   *
-   * @param hostName  the host name
-   *
-   * @return the host state
-   */
-  public String getHostState(String hostName) {
-    return isHealthy(hostStateMap.get(hostName)) ? "HEALTHY" : "INIT";
-  }
-
-  /**
-   * Get the service state from the given service name.
-   *
-   * @param serviceName  the service name
-   *
-   * @return the service state
-   */
-  public String getServiceState(String serviceName) {
-    return isHealthy(serviceStateMap.get(serviceName)) ? "STARTED" : "INIT";
-  }
-
-  /**
-   * Get the component state from the give service name and component name.
-   *
-   * @param serviceName    the service name
-   * @param componentName  the component name
-   *
-   * @return the component state
-   */
-  public String getComponentState(String serviceName, String componentName) {
-    return isHealthy(componentStateMap.get(getComponentKey(serviceName, componentName))) ? "STARTED" : "INIT";
-  }
-
-  /**
-   * Get the host component name from the given host name, service name and component name.
-   *
-   * @param hostName       the host name
-   * @param serviceName    the service name
-   * @param componentName  the component name
-   *
-   * @return the host component state
-   */
-  public String getHostComponentState(String hostName, String serviceName, String componentName) {
-    return isHealthy(hostComponentStateMap.get(getHostComponentKey(hostName, serviceName, componentName))) ? "STARTED" : "INIT";
-  }
-
-
-  // ----- helper methods ----------------------------------------------------
-
-  /**
-   * Read the gsInstaller cluster definition file.
-   */
-  private void readClusterDefinition() {
-    InputStream is = null;
-    try {
-      is = this.getClass().getClassLoader().getResourceAsStream(CLUSTER_DEFINITION_FILE);
-      BufferedReader br = new BufferedReader(new InputStreamReader(is));
-
-      String line;
-      while ((line = br.readLine()) != null) {
-        line = line.trim();
-        if (line.startsWith(CLUSTER_NAME_TAG)) {
-          clusterName = line.substring(CLUSTER_NAME_TAG.length());
-        }
-        else if (line.startsWith(VERSION_ID_TAG)) {
-          versionId = line.substring(VERSION_ID_TAG.length());
-        }
-        else {
-          String[] parts = line.split("\\s+");
-          assert(parts.length == 3);
-
-          String serviceName   = parts[0];
-          String componentName = parts[1];
-          String hostName      = parts[2];
-
-          // translate the component name if required
-          if (componentNameMap.containsKey(componentName)) {
-            componentName = componentNameMap.get(componentName);
-          }
-
-          services.add(serviceName);
-          Set<String> serviceComponents = components.get(serviceName);
-          if (serviceComponents == null) {
-            serviceComponents = new HashSet<>();
-            components.put(serviceName, serviceComponents);
-          }
-          serviceComponents.add(componentName);
-
-          Map<String, Set<String>> serviceHostComponents = hostComponents.get(serviceName);
-          if (serviceHostComponents == null) {
-            serviceHostComponents = new HashMap<>();
-            hostComponents.put(serviceName, serviceHostComponents);
-          }
-
-          Set<String> hostHostComponents = serviceHostComponents.get(hostName);
-          if (hostHostComponents == null) {
-            hostHostComponents = new HashSet<>();
-            serviceHostComponents.put(hostName, hostHostComponents);
-          }
-          hostHostComponents.add(componentName);
-          hosts.add(hostName);
-        }
-      }
-    } catch (IOException e) {
-      String msg = "Caught exception reading " + CLUSTER_DEFINITION_FILE + ".";
-      throw new IllegalStateException(msg, e);
-    } finally {
-      Closeables.closeSilently(is);
-    }
-  }
-
-  /**
-   * Set the host component state maps.
-   */
-  private void setHostComponentState(int stateExpiry) {
-    for (Map.Entry<String, Map<String, Set<String>>> serviceEntry : hostComponents.entrySet()) {
-      String serviceName = serviceEntry.getKey();
-
-      for (Map.Entry<String, Set<String>> hostEntry : serviceEntry.getValue().entrySet()) {
-        String hostName = hostEntry.getKey();
-
-        for (String componentName : hostEntry.getValue()) {
-
-          HostComponentState state = new HostComponentState(hostName, componentName, stateExpiry);
-
-          // add state to hosts
-          addState(hostName, hostStateMap, state);
-
-          // add state to services
-          addState(serviceName, serviceStateMap, state);
-
-          // add state to components
-          addState(getComponentKey(serviceName, componentName), componentStateMap, state);
-
-          // add state to host components
-          hostComponentStateMap.put(getHostComponentKey(hostName, serviceName, componentName), state);
-        }
-      }
-    }
-  }
-
-  /**
-   * Add the given host component state object to the given map of state objects.
-   *
-   * @param hostName  the host name
-   * @param stateMap  the map of state objects
-   * @param state     the state
-   */
-  private static void addState(String hostName, Map<String, Set<HostComponentState>> stateMap, HostComponentState state) {
-    Set<HostComponentState> states = stateMap.get(hostName);
-    if (states == null) {
-      states = new HashSet<>();
-      stateMap.put(hostName, states);
-    }
-    states.add(state);
-  }
-
-  /**
-   * Get a key from the given service name and component name.
-   *
-   * @param serviceName    the service name
-   * @param componentName  the component name
-   *
-   * @return the key
-   */
-  private String getComponentKey(String serviceName, String componentName) {
-    return serviceName + "." + componentName;
-  }
-
-  /**
-   * Get a key from the given host name, service name and component name.
-   *
-   * @param hostName       the host name
-   * @param serviceName    the service name
-   * @param componentName  the component name
-   *
-   * @return the key
-   */
-  private String getHostComponentKey(String hostName, String serviceName, String componentName) {
-    return hostName + "." + serviceName + "." + componentName;
-  }
-
-  /**
-   * Determine whether or not the host components associated
-   * with the given states are healthy.
-   *
-   * @param states  the states
-   *
-   * @return true if the associated host components are healthy
-   */
-  private boolean isHealthy(Set<HostComponentState> states) {
-    if (states != null) {
-      for (HostComponentState state : states) {
-        if (!state.isHealthy()) {
-          return false;
-        }
-      }
-    }
-    return true;
-  }
-
-  /**
-   * Determine whether or not the host component associated
-   * with the given state is healthy.
-   *
-   * @param state  the state
-   *
-   * @return true if the associated host component is healthy
-   */
-  private boolean isHealthy(HostComponentState state) {
-    return state == null || state.isHealthy();
-  }
-
-
-  // ----- inner classes -----------------------------------------------------
-
-  /**
-   * A state object used to check the health of a host component.
-   */
-  private class HostComponentState {
-    private final String hostName;
-    private final String componentName;
-    private final int expiry;
-    private boolean healthy = true;
-    private long lastAccess;
-
-    // ----- Constructor -----------------------------------------------------
-
-    /**
-     * Constructor.
-     *
-     * @param hostName       the host name
-     * @param componentName  the component name
-     */
-    HostComponentState(String hostName, String componentName, int expiry) {
-      this.hostName      = hostName;
-      this.componentName = componentName;
-      this.expiry        = expiry;
-    }
-
-    /**
-     * Determine whether or not the associated host component is healthy.
-     *
-     * @return true if the associated host component is healthy
-     */
-    public boolean isHealthy() {
-      if (System.currentTimeMillis() - lastAccess > expiry) {
-        // health value has expired... get it again
-        healthy = stateProvider.isHealthy(hostName, componentName);
-        this.lastAccess = System.currentTimeMillis();
-      }
-      return healthy;
-    }
-  }
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerClusterProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerClusterProvider.java
deleted file mode 100644
index 784f51c..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerClusterProvider.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-/**
- * A cluster resource provider for a gsInstaller defined cluster.
- */
-public class GSInstallerClusterProvider extends GSInstallerResourceProvider{
-
-  // Clusters
-  protected static final String CLUSTER_NAME_PROPERTY_ID    = PropertyHelper.getPropertyId("Clusters", "cluster_name");
-  protected static final String CLUSTER_VERSION_PROPERTY_ID = PropertyHelper.getPropertyId("Clusters", "version");
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Construct a resource provider based on the given cluster definition.
-   *
-   * @param clusterDefinition  the cluster definition
-   */
-  public GSInstallerClusterProvider(ClusterDefinition clusterDefinition) {
-    super(Resource.Type.Cluster, clusterDefinition);
-    initClusterResources();
-  }
-
-
-  // ----- GSInstallerResourceProvider ---------------------------------------
-
-  @Override
-  public void updateProperties(Resource resource, Request request, Predicate predicate) {
-    // Do nothing
-  }
-
-
-  // ----- helper methods ----------------------------------------------------
-
-  /**
-   * Create the resources based on the cluster definition.
-   */
-  private void initClusterResources() {
-    Resource cluster = new ResourceImpl(Resource.Type.Cluster);
-    ClusterDefinition clusterDefinition = getClusterDefinition();
-    cluster.setProperty(CLUSTER_NAME_PROPERTY_ID, clusterDefinition.getClusterName());
-    cluster.setProperty(CLUSTER_VERSION_PROPERTY_ID, clusterDefinition.getVersionId());
-
-    addResource(cluster);
-  }
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerComponentProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerComponentProvider.java
deleted file mode 100644
index b0d953a..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerComponentProvider.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import java.util.Set;
-
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-/**
- * A component resource provider for a gsInstaller defined cluster.
- */
-public class GSInstallerComponentProvider extends GSInstallerResourceProvider{
-
-  // Components
-  protected static final String COMPONENT_CLUSTER_NAME_PROPERTY_ID    = PropertyHelper.getPropertyId("ServiceComponentInfo", "cluster_name");
-  protected static final String COMPONENT_SERVICE_NAME_PROPERTY_ID    = PropertyHelper.getPropertyId("ServiceComponentInfo", "service_name");
-  protected static final String COMPONENT_COMPONENT_NAME_PROPERTY_ID  = PropertyHelper.getPropertyId("ServiceComponentInfo", "component_name");
-  protected static final String COMPONENT_STATE_PROPERTY_ID           = PropertyHelper.getPropertyId("ServiceComponentInfo", "state");
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Construct a resource provider based on the given cluster definition.
-   *
-   * @param clusterDefinition  the cluster definition
-   */
-  public GSInstallerComponentProvider(ClusterDefinition clusterDefinition) {
-    super(Resource.Type.Component, clusterDefinition);
-    initComponentResources();
-  }
-
-
-  // ----- GSInstallerResourceProvider ---------------------------------------
-
-  @Override
-  public void updateProperties(Resource resource, Request request, Predicate predicate) {
-
-    Set<String> propertyIds = getRequestPropertyIds(request, predicate);
-    if (contains(propertyIds, COMPONENT_STATE_PROPERTY_ID)) {
-      String serviceName   = (String) resource.getPropertyValue(COMPONENT_SERVICE_NAME_PROPERTY_ID);
-      String componentName = (String) resource.getPropertyValue(COMPONENT_COMPONENT_NAME_PROPERTY_ID);
-      resource.setProperty(COMPONENT_STATE_PROPERTY_ID, getClusterDefinition().getComponentState(serviceName, componentName));
-    }
-  }
-
-
-  // ----- helper methods ----------------------------------------------------
-
-  /**
-   * Create the resources based on the cluster definition.
-   */
-  private void initComponentResources() {
-    String      clusterName = getClusterDefinition().getClusterName();
-    Set<String> services    = getClusterDefinition().getServices();
-    for (String serviceName : services) {
-      Set<String> components = getClusterDefinition().getComponents(serviceName);
-      for (String componentName : components) {
-        Resource component = new ResourceImpl(Resource.Type.Component);
-        component.setProperty(COMPONENT_CLUSTER_NAME_PROPERTY_ID, clusterName);
-        component.setProperty(COMPONENT_SERVICE_NAME_PROPERTY_ID, serviceName);
-        component.setProperty(COMPONENT_COMPONENT_NAME_PROPERTY_ID, componentName);
-
-        addResource(component);
-      }
-    }
-  }
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostComponentProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostComponentProvider.java
deleted file mode 100644
index d35c2ce..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostComponentProvider.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import java.util.Set;
-
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-/**
- * A host component resource provider for a gsInstaller defined cluster.
- */
-public class GSInstallerHostComponentProvider extends GSInstallerResourceProvider{
-
-  // Host Components
-  protected static final String HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID   = PropertyHelper.getPropertyId("HostRoles", "cluster_name");
-  protected static final String HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID   = PropertyHelper.getPropertyId("HostRoles", "service_name");
-  protected static final String HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("HostRoles", "component_name");
-  protected static final String HOST_COMPONENT_HOST_NAME_PROPERTY_ID      = PropertyHelper.getPropertyId("HostRoles", "host_name");
-  protected static final String HOST_COMPONENT_STATE_PROPERTY_ID          = PropertyHelper.getPropertyId("HostRoles", "state");
-  protected static final String HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID  = PropertyHelper.getPropertyId("HostRoles", "desired_state");
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Construct a resource provider based on the given cluster definition.
-   *
-   * @param clusterDefinition  the cluster definition
-   */
-  public GSInstallerHostComponentProvider(ClusterDefinition clusterDefinition) {
-    super(Resource.Type.HostComponent, clusterDefinition);
-    initHostComponentResources();
-  }
-
-
-  // ----- GSInstallerResourceProvider ---------------------------------------
-
-  @Override
-  public void updateProperties(Resource resource, Request request, Predicate predicate) {
-    Set<String> propertyIds = getRequestPropertyIds(request, predicate);
-    if (contains(propertyIds, HOST_COMPONENT_STATE_PROPERTY_ID) ||
-        contains(propertyIds, HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID)) {
-      String serviceName   = (String) resource.getPropertyValue(HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID);
-      String componentName = (String) resource.getPropertyValue(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID);
-      String hostName      = (String) resource.getPropertyValue(HOST_COMPONENT_HOST_NAME_PROPERTY_ID);
-
-      String hostComponentState = getClusterDefinition().getHostComponentState(hostName, serviceName, componentName);
-
-      resource.setProperty(HOST_COMPONENT_STATE_PROPERTY_ID, hostComponentState);
-      resource.setProperty(HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID, hostComponentState);
-    }
-  }
-
-
-  // ----- helper methods ----------------------------------------------------
-
-  /**
-   * Create the resources based on the cluster definition.
-   */
-  private void initHostComponentResources() {
-    String      clusterName = getClusterDefinition().getClusterName();
-    Set<String> services    = getClusterDefinition().getServices();
-    for (String serviceName : services) {
-      Set<String> hosts = getClusterDefinition().getHosts();
-      for (String hostName : hosts) {
-        Set<String> hostComponents = getClusterDefinition().getHostComponents(serviceName, hostName);
-        for (String componentName : hostComponents) {
-          Resource hostComponent = new ResourceImpl(Resource.Type.HostComponent);
-          hostComponent.setProperty(HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID, clusterName);
-          hostComponent.setProperty(HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID, serviceName);
-          hostComponent.setProperty(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, componentName);
-          hostComponent.setProperty(HOST_COMPONENT_HOST_NAME_PROPERTY_ID, hostName);
-
-          addResource(hostComponent);
-        }
-      }
-    }
-  }
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostProvider.java
deleted file mode 100644
index 5b69da4..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostProvider.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import java.util.Set;
-
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-/**
- * A host resource provider for a gsInstaller defined cluster.
- */
-public class GSInstallerHostProvider extends GSInstallerResourceProvider{
-
-  // Hosts
-  protected static final String HOST_CLUSTER_NAME_PROPERTY_ID =
-      PropertyHelper.getPropertyId("Hosts", "cluster_name");
-  protected static final String HOST_NAME_PROPERTY_ID =
-      PropertyHelper.getPropertyId("Hosts", "host_name");
-  protected static final String HOST_STATE_PROPERTY_ID =
-      PropertyHelper.getPropertyId("Hosts", "host_state");
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Construct a resource provider based on the given cluster definition.
-   *
-   * @param clusterDefinition  the cluster definition
-   */
-  public GSInstallerHostProvider(ClusterDefinition clusterDefinition) {
-    super(Resource.Type.Host, clusterDefinition);
-    initHostResources();
-  }
-
-
-  // ----- GSInstallerResourceProvider ---------------------------------------
-
-  @Override
-  public void updateProperties(Resource resource, Request request, Predicate predicate) {
-    Set<String> propertyIds = getRequestPropertyIds(request, predicate);
-    if (contains(propertyIds, HOST_STATE_PROPERTY_ID)) {
-      String hostName = (String) resource.getPropertyValue(HOST_NAME_PROPERTY_ID);
-      resource.setProperty(HOST_STATE_PROPERTY_ID, getClusterDefinition().getHostState(hostName));
-    }
-  }
-
-
-  // ----- helper methods ----------------------------------------------------
-
-  /**
-   * Create the resources based on the cluster definition.
-   */
-  private void initHostResources() {
-    ClusterDefinition clusterDefinition = getClusterDefinition();
-    String            clusterName       = clusterDefinition.getClusterName();
-    Set<String>       hosts             = clusterDefinition.getHosts();
-
-    for (String hostName : hosts) {
-      Resource host = new ResourceImpl(Resource.Type.Host);
-      host.setProperty(HOST_CLUSTER_NAME_PROPERTY_ID, clusterName);
-      host.setProperty(HOST_NAME_PROPERTY_ID, hostName);
-
-      addResource(host);
-    }
-  }
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerNoOpProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerNoOpProvider.java
deleted file mode 100644
index af7ab29..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerNoOpProvider.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.gsinstaller;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-
-/**
- * A NO-OP resource provider for a gsInstaller defined cluster.
- */
-public class GSInstallerNoOpProvider extends GSInstallerResourceProvider{
-
-  private final Map<Resource.Type, String> keyPropertyIds = new HashMap<>();
-
-  // ----- GSInstallerResourceProvider ---------------------------------------
-
-  @Override
-  public void updateProperties(Resource resource, Request request, Predicate predicate) {
-    // Do nothing
-  }
-
-  // ----- Constructors ------------------------------------------------------
-
-  public GSInstallerNoOpProvider(Resource.Type type, ClusterDefinition clusterDefinition) {
-    super(type, clusterDefinition);
-    keyPropertyIds.put(type, "id");
-  }
-
-
-  @Override
-  public Map<Resource.Type, String> getKeyPropertyIds() {
-    return keyPropertyIds;
-  }
-
-  @Override
-  public Set<String> checkPropertyIds(Set<String> propertyIds) {
-    return Collections.emptySet();
-  }
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerProviderModule.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerProviderModule.java
deleted file mode 100644
index 018ae19..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerProviderModule.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import java.io.IOException;
-import java.net.HttpURLConnection;
-import java.net.URL;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.ambari.server.controller.internal.AbstractProviderModule;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.ResourceProvider;
-
-/**
- * A provider module implementation that uses the GSInstaller resource provider.
- */
-public class GSInstallerProviderModule extends AbstractProviderModule implements GSInstallerStateProvider{
-
-  private final ClusterDefinition clusterDefinition;
-
-  private static final Map<String, String> PORTS = new HashMap<>();
-
-  static {
-    PORTS.put("NAMENODE",           "50070");
-    PORTS.put("DATANODE",           "50075");
-    PORTS.put("JOBTRACKER",         "50030");
-    PORTS.put("TASKTRACKER",        "50060");
-    PORTS.put("HBASE_MASTER",       "60010");
-    PORTS.put("HBASE_REGIONSERVER", "60030");
-  }
-
-  private static final int TIMEOUT = 5000;
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  public GSInstallerProviderModule() {
-    clusterDefinition = new ClusterDefinition(this);
-  }
-
-
-  // ----- GSInstallerStateProvider ------------------------------------------
-
-  @Override
-  public boolean isHealthy(String hostName, String componentName) {
-    String port = PORTS.get(componentName);
-    if (port != null) {
-      StringBuilder sb = new StringBuilder();
-      sb.append("http://").append(hostName);
-      sb.append(":").append(port);
-
-      try {
-        HttpURLConnection connection = (HttpURLConnection) new URL(sb.toString()).openConnection();
-
-        connection.setRequestMethod("HEAD");
-        connection.setConnectTimeout(TIMEOUT);
-        connection.setReadTimeout(TIMEOUT);
-
-        int code = connection.getResponseCode();
-
-        return code >= 200 && code <= 399;
-      } catch (IOException exception) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-
-  // ----- utility methods ---------------------------------------------------
-
-  @Override
-  protected ResourceProvider createResourceProvider(Resource.Type type) {
-    return GSInstallerResourceProvider.getResourceProvider(type, clusterDefinition);
-  }
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerResourceProvider.java
deleted file mode 100644
index 7b76cb9..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerResourceProvider.java
+++ /dev/null
@@ -1,234 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.spi.NoSuchParentResourceException;
-import org.apache.ambari.server.controller.spi.NoSuchResourceException;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.RequestStatus;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException;
-import org.apache.ambari.server.controller.spi.ResourceProvider;
-import org.apache.ambari.server.controller.spi.SystemException;
-import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
-import org.apache.ambari.server.controller.utilities.PredicateHelper;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-
-/**
- * An abstract resource provider for a gsInstaller defined cluster.
- */
-public abstract class GSInstallerResourceProvider implements ResourceProvider {
-
-  private final ClusterDefinition clusterDefinition;
-
-  private final Set<Resource> resources = new HashSet<>();
-
-  private final Resource.Type type;
-
-  private final Set<String> propertyIds;
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Construct a resource provider based on the given cluster definition.
-   *
-   * @param clusterDefinition  the cluster definition
-   */
-  public GSInstallerResourceProvider(Resource.Type type, ClusterDefinition clusterDefinition) {
-    this.type              = type;
-    this.clusterDefinition = clusterDefinition;
-
-    Set<String> propertyIds = PropertyHelper.getPropertyIds(type);
-    this.propertyIds = new HashSet<>(propertyIds);
-    this.propertyIds.addAll(PropertyHelper.getCategories(propertyIds));
-  }
-
-
-  // ----- ResourceProvider --------------------------------------------------
-
-  @Override
-  public RequestStatus createResources(Request request)
-      throws SystemException, UnsupportedPropertyException, ResourceAlreadyExistsException, NoSuchParentResourceException {
-    throw new UnsupportedOperationException("Management operations are not supported");
-  }
-
-  @Override
-  public Set<Resource> getResources(Request request, Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-
-    Set<Resource> resultSet = new HashSet<>();
-
-    for (Resource resource : resources) {
-      if (predicate == null || predicate.evaluate(resource)) {
-        ResourceImpl newResource = new ResourceImpl(resource);
-        updateProperties(newResource, request, predicate);
-        resultSet.add(newResource);
-      }
-    }
-    return resultSet;
-  }
-
-  @Override
-  public RequestStatus updateResources(Request request, Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-    throw new UnsupportedOperationException("Management operations are not supported");
-  }
-
-  @Override
-  public RequestStatus deleteResources(Request request, Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-    throw new UnsupportedOperationException("Management operations are not supported");
-  }
-
-  @Override
-  public Map<Resource.Type, String> getKeyPropertyIds() {
-    return PropertyHelper.getKeyPropertyIds(type);
-  }
-
-  @Override
-  public Set<String> checkPropertyIds(Set<String> propertyIds) {
-    propertyIds = new HashSet<>(propertyIds);
-    propertyIds.removeAll(this.propertyIds);
-    return propertyIds;
-  }
-
-
-  // ----- GSInstallerResourceProvider ---------------------------------------
-
-  /**
-   * Update the resource with any properties handled by the resource provider.
-   *
-   * @param resource   the resource to update
-   * @param request    the request
-   * @param predicate  the predicate
-   */
-  public abstract void updateProperties(Resource resource, Request request, Predicate predicate);
-
-
-  // ----- accessors ---------------------------------------------------------
-
-  /**
-   * Get the configuration provider.
-   *
-   * @return the configuration provider
-   */
-  protected ClusterDefinition getClusterDefinition() {
-    return clusterDefinition;
-  }
-
-  /**
-   * Get the resource provider type.
-   *
-   * @return the type
-   */
-  public Resource.Type getType() {
-    return type;
-  }
-
-
-// ----- helper methods ----------------------------------------------------
-
-  /**
-   * Get the set of property ids required to satisfy the given request.
-   *
-   * @param request              the request
-   * @param predicate            the predicate
-   *
-   * @return the set of property ids needed to satisfy the request
-   */
-  protected Set<String> getRequestPropertyIds(Request request, Predicate predicate) {
-    Set<String> propertyIds  = request.getPropertyIds();
-
-    // if no properties are specified, then return them all
-    if (propertyIds == null || propertyIds.isEmpty()) {
-      return new HashSet<>(this.propertyIds);
-    }
-
-    propertyIds = new HashSet<>(propertyIds);
-
-    if (predicate != null) {
-      propertyIds.addAll(PredicateHelper.getPropertyIds(predicate));
-    }
-    return propertyIds;
-  }
-
-  /**
-   * Check to see if the given set contains a property or category id that matches the given property id.
-   *
-   * @param ids         the set of property/category ids
-   * @param propertyId  the property id
-   *
-   * @return true if the given set contains a property id or category that matches the given property id
-   */
-  protected static boolean contains(Set<String> ids, String propertyId) {
-    boolean contains = ids.contains(propertyId);
-
-    if (!contains) {
-      String category = PropertyHelper.getPropertyCategory(propertyId);
-      while (category != null && !contains) {
-        contains = ids.contains(category);
-        category = PropertyHelper.getPropertyCategory(category);
-      }
-    }
-    return contains;
-  }
-
-  /**
-  * Add a resource to the set of resources provided by this provider.
-  *
-  * @param resource  the resource to add
-  */
-  protected void addResource(Resource resource) {
-    resources.add(resource);
-  }
-
-  /**
-   * Factory method for obtaining a resource provider based on a given type.
-   *
-   * @param type               the resource type
-   * @param clusterDefinition  the cluster definition
-   *
-   * @return a new resource provider
-   */
-  public static ResourceProvider getResourceProvider(Resource.Type type,
-                                                     ClusterDefinition clusterDefinition) {
-    switch (type.getInternalType()) {
-      case Cluster:
-        return new GSInstallerClusterProvider(clusterDefinition);
-      case Service:
-        return new GSInstallerServiceProvider(clusterDefinition);
-      case Component:
-        return new GSInstallerComponentProvider(clusterDefinition);
-      case Host:
-        return new GSInstallerHostProvider(clusterDefinition);
-      case HostComponent:
-        return new GSInstallerHostComponentProvider(clusterDefinition);
-      default:
-        return new GSInstallerNoOpProvider(type, clusterDefinition);
-    }
-  }
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerServiceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerServiceProvider.java
deleted file mode 100644
index 2438273..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerServiceProvider.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import java.util.Set;
-
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-/**
- * A service resource provider for a gsInstaller defined cluster.
- */
-public class GSInstallerServiceProvider extends GSInstallerResourceProvider{
-
-  // Services
-  protected static final String SERVICE_CLUSTER_NAME_PROPERTY_ID    = PropertyHelper.getPropertyId("ServiceInfo", "cluster_name");
-  protected static final String SERVICE_SERVICE_NAME_PROPERTY_ID    = PropertyHelper.getPropertyId("ServiceInfo", "service_name");
-  protected static final String SERVICE_SERVICE_STATE_PROPERTY_ID   = PropertyHelper.getPropertyId("ServiceInfo", "state");
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Construct a resource provider based on the given cluster definition.
-   *
-   * @param clusterDefinition  the cluster definition
-   */
-  public GSInstallerServiceProvider(ClusterDefinition clusterDefinition) {
-    super(Resource.Type.Service, clusterDefinition);
-    initServiceResources();
-  }
-
-
-  // ----- GSInstallerResourceProvider ---------------------------------------
-
-  @Override
-  public void updateProperties(Resource resource, Request request, Predicate predicate) {
-    Set<String> propertyIds = getRequestPropertyIds(request, predicate);
-    if (contains(propertyIds, SERVICE_SERVICE_STATE_PROPERTY_ID)) {
-      String serviceName = (String) resource.getPropertyValue(SERVICE_SERVICE_NAME_PROPERTY_ID);
-      resource.setProperty(SERVICE_SERVICE_STATE_PROPERTY_ID, getClusterDefinition().getServiceState(serviceName));
-    }
-  }
-
-
-  // ----- helper methods ----------------------------------------------------
-
-  /**
-   * Create the resources based on the cluster definition.
-   */
-  private void initServiceResources() {
-    String      clusterName = getClusterDefinition().getClusterName();
-    Set<String> services    = getClusterDefinition().getServices();
-
-    for (String serviceName : services) {
-      Resource service = new ResourceImpl(Resource.Type.Service);
-      service.setProperty(SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
-      service.setProperty(SERVICE_SERVICE_NAME_PROPERTY_ID, serviceName);
-
-      addResource(service);
-    }
-  }
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerStateProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerStateProvider.java
deleted file mode 100644
index aef907c..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerStateProvider.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-/**
- * Interface to provide component state to the gsInstaller resource provider.
- */
-public interface GSInstallerStateProvider {
-  /**
-   * Determine whether or not the host component identified by the given host name
-   * and component name is healthy.
-   *
-   * @param hostName       the host name
-   * @param componentName  the component name
-   *
-   * @return true if the host component is healthy
-   */
-  public boolean isHealthy(String hostName, String componentName);
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
index 27ac03e..6c4e096 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
@@ -915,7 +915,7 @@
    * @param clusterName
    *          the cluster name
    * @param configType
-   *          the configuration type (for example {@value hdfs-site}).
+   *          the configuration type (for example <code>hdfs-site</code>).
    * @return
    */
   private String getDesiredConfigVersion(String clusterName,
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractResourceProvider.java
index 01cf79a..8f23484 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractResourceProvider.java
@@ -500,6 +500,6 @@
      *
      * @throws AmbariException thrown if a problem occurred during invocation
      */
-    public T invoke() throws AmbariException, AuthorizationException;
+    T invoke() throws AmbariException, AuthorizationException;
   }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ArtifactResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ArtifactResourceProvider.java
index cb8c7bb..c5c02cf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ArtifactResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ArtifactResourceProvider.java
@@ -548,7 +548,7 @@
   /**
    * Used to register a dynamic sub-resource with an existing resource type.
    */
-  public static interface TypeRegistration {
+  public interface TypeRegistration {
     /**
      * Allows the management controller to be set on the registration.
      * This is called as part of the registration process.
@@ -557,14 +557,14 @@
      *
      * @param  controller  management controller
      */
-    public void setManagementController(AmbariManagementController controller);
+    void setManagementController(AmbariManagementController controller);
 
     /**
      * Get the type of the registering resource.
      *
      * @return type of the register resource
      */
-    public Resource.Type getType();
+    Resource.Type getType();
 
     /**
      * Full foreign key property name to use in the artifact resource.
@@ -574,7 +574,7 @@
      *          For example: "Artifacts/cluster_name
      */
     //todo: use relative property names
-    public String getFKPropertyName();
+    String getFKPropertyName();
 
     /**
      * Shortened foreign key name that is written to the database.
@@ -583,7 +583,7 @@
      *
      * @return short fk name.  For example: "cluster_name"
      */
-    public String getShortFKPropertyName();
+    String getShortFKPropertyName();
 
     /**
      * Convert the foreign key value to a value that is persisted to the database.
@@ -603,7 +603,7 @@
      *
      * @throws AmbariException if unable to convert the value
      */
-    public String toPersistId(String value) throws AmbariException;
+    String toPersistId(String value) throws AmbariException;
 
     /**
      * Convert the persist id form of the foreign key which is written to the database
@@ -623,7 +623,7 @@
      *
      * @throws AmbariException if unable to convert the value
      */
-    public String fromPersistId(String value) throws AmbariException;
+    String fromPersistId(String value) throws AmbariException;
 
     /**
      * Get a map of ancestor type to foreign key.
@@ -635,7 +635,7 @@
      * @return map of ancestor type to foreign key
      */
     //todo: look at the need to use the same name as specified by ancestors
-    public Map<Resource.Type, String> getForeignKeyInfo();
+    Map<Resource.Type, String> getForeignKeyInfo();
 
     /**
      * Determine if the instance identified by the provided properties exists.
@@ -647,8 +647,8 @@
      *
      * @throws AmbariException  an exception occurs trying to determine if the instance exists
      */
-    public boolean instanceExists(Map<Resource.Type, String> keyMap,
-                                  Map<String, Object> properties) throws AmbariException;
+    boolean instanceExists(Map<Resource.Type, String> keyMap,
+                           Map<String, Object> properties) throws AmbariException;
   }
 
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index 88a8f80..1a2947b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -34,6 +34,8 @@
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.PropertyDependencyInfo;
 import org.apache.ambari.server.state.ValueAttributesInfo;
 import org.apache.ambari.server.topology.AdvisedConfiguration;
@@ -155,8 +157,10 @@
    * This will initially be used to filter out the Ranger Passwords, but
    * could be extended in the future for more generic purposes.
    */
-  private static final PropertyFilter[] exportPropertyFilters =
-    { new PasswordPropertyFilter(),
+  private PropertyFilter[] getExportPropertyFilters (Map<Long, Set<String>> authToLocalPerClusterMap)
+    {
+      return new PropertyFilter[] {
+      new PasswordPropertyFilter(),
       new SimplePropertyNameExportFilter("tez.tez-ui.history-url.base", "tez-site"),
       new SimplePropertyNameExportFilter("admin_server_host", "kerberos-env"),
       new SimplePropertyNameExportFilter("kdc_hosts", "kerberos-env"),
@@ -168,8 +172,9 @@
       new SimplePropertyNameExportFilter("domains", "krb5-conf"),
       new SimplePropertyNameExportFilter("dfs_ha_initial_namenode_active", "hadoop-env"),
       new SimplePropertyNameExportFilter("dfs_ha_initial_namenode_standby", "hadoop-env"),
-      new StackPropertyTypeFilter()
-    };
+      new StackPropertyTypeFilter(),
+      new KerberosAuthToLocalRulesFilter(authToLocalPerClusterMap)};
+    }
 
   /**
    * Statically-defined list of filters to apply on cluster config
@@ -408,7 +413,10 @@
       }
     }
 
-    setMissingConfigurations(clusterConfig, configTypesUpdated);
+    // Explicitly set any properties that are required but not currently provided in the stack definition.
+    setRetryConfiguration(clusterConfig, configTypesUpdated);
+    setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
+    addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
 
     trimProperties(clusterConfig, clusterTopology);
 
@@ -515,6 +523,16 @@
    */
   private void doFilterPriorToExport(Configuration configuration) {
     Map<String, Map<String, String>> properties = configuration.getFullProperties();
+    Map<Long, Set<String>> authToLocalPerClusterMap = null;
+    try {
+      String clusterName = clusterTopology.getAmbariContext().getClusterName(clusterTopology.getClusterId());
+      Cluster cluster = clusterTopology.getAmbariContext().getController().getClusters().getCluster(clusterName);
+      authToLocalPerClusterMap = new HashMap<Long, Set<String>>();
+      authToLocalPerClusterMap.put(Long.valueOf(clusterTopology.getClusterId()), clusterTopology.getAmbariContext().getController().getKerberosHelper().getKerberosDescriptor(cluster).getAllAuthToLocalProperties());
+      } catch (AmbariException e) {
+        LOG.error("Error while getting authToLocal properties. ", e);
+    }
+    PropertyFilter [] exportPropertyFilters = getExportPropertyFilters(authToLocalPerClusterMap);
     for (Map.Entry<String, Map<String, String>> configEntry : properties.entrySet()) {
       String type = configEntry.getKey();
       try {
@@ -531,7 +549,7 @@
       for (Map.Entry<String, String> propertyEntry : typeProperties.entrySet()) {
         String propertyName = propertyEntry.getKey();
         String propertyValue = propertyEntry.getValue();
-        if (shouldPropertyBeExcludedForBlueprintExport(propertyName, propertyValue, type, clusterTopology)) {
+        if (shouldPropertyBeExcludedForBlueprintExport(propertyName, propertyValue, type, clusterTopology, exportPropertyFilters)) {
           configuration.removeProperty(type, propertyName);
         }
       }
@@ -1029,7 +1047,7 @@
    * @return true if the property should be excluded
    *         false if the property should not be excluded
    */
-  private static boolean shouldPropertyBeExcludedForBlueprintExport(String propertyName, String propertyValue, String propertyType, ClusterTopology topology) {
+  private boolean shouldPropertyBeExcludedForBlueprintExport(String propertyName, String propertyValue, String propertyType, ClusterTopology topology, PropertyFilter [] exportPropertyFilters ) {
     for(PropertyFilter filter : exportPropertyFilters) {
       if (!filter.isPropertyIncluded(propertyName, propertyValue, propertyType, topology)) {
         return true;
@@ -2778,59 +2796,52 @@
     });
   }
 
-  /**
-   * Explicitly set any properties that are required but not currently provided in the stack definition.
-   *
-   * @param configuration  configuration where properties are to be added
-   */
-  void setMissingConfigurations(Configuration configuration, Set<String> configTypesUpdated) {
+  private Collection<String> setupHDFSProxyUsers(Configuration configuration, Set<String> configTypesUpdated) {
     // AMBARI-5206
     final Map<String , String> userProps = new HashMap<>();
 
-    setRetryConfiguration(configuration, configTypesUpdated);
-
     Collection<String> services = clusterTopology.getBlueprint().getServices();
-    // only add user properties to the map for
-    // services actually included in the blueprint definition
-    if (services.contains("OOZIE")) {
-      userProps.put("oozie_user", "oozie-env");
-    }
-
-    if (services.contains("HIVE")) {
-      userProps.put("hive_user", "hive-env");
-      userProps.put("webhcat_user", "hive-env");
-    }
-
-    if (services.contains("HBASE")) {
-      userProps.put("hbase_user", "hbase-env");
-    }
-
-    if (services.contains("FALCON")) {
-      userProps.put("falcon_user", "falcon-env");
-    }
-
-    String proxyUserHosts  = "hadoop.proxyuser.%s.hosts";
-    String proxyUserGroups = "hadoop.proxyuser.%s.groups";
-
-    Map<String, Map<String, String>> existingProperties = configuration.getFullProperties();
-    for (String property : userProps.keySet()) {
-      String configType = userProps.get(property);
-      Map<String, String> configs = existingProperties.get(configType);
-      if (configs != null) {
-        String user = configs.get(property);
-        if (user != null && !user.isEmpty()) {
-          ensureProperty(configuration, "core-site", String.format(proxyUserHosts, user), "*", configTypesUpdated);
-          ensureProperty(configuration, "core-site", String.format(proxyUserGroups, user), "*", configTypesUpdated);
-        }
-      } else {
-        LOG.debug("setMissingConfigurations: no user configuration found for type = " + configType +
-                  ".  This may be caused by an error in the blueprint configuration.");
+    if (services.contains("HDFS")) {
+      // only add user properties to the map for
+      // services actually included in the blueprint definition
+      if (services.contains("OOZIE")) {
+        userProps.put("oozie_user", "oozie-env");
       }
 
+      if (services.contains("HIVE")) {
+        userProps.put("hive_user", "hive-env");
+        userProps.put("webhcat_user", "hive-env");
+      }
+
+      if (services.contains("HBASE")) {
+        userProps.put("hbase_user", "hbase-env");
+      }
+
+      if (services.contains("FALCON")) {
+        userProps.put("falcon_user", "falcon-env");
+      }
+
+      String proxyUserHosts = "hadoop.proxyuser.%s.hosts";
+      String proxyUserGroups = "hadoop.proxyuser.%s.groups";
+
+      Map<String, Map<String, String>> existingProperties = configuration.getFullProperties();
+      for (String property : userProps.keySet()) {
+        String configType = userProps.get(property);
+        Map<String, String> configs = existingProperties.get(configType);
+        if (configs != null) {
+          String user = configs.get(property);
+          if (user != null && !user.isEmpty()) {
+            ensureProperty(configuration, "core-site", String.format(proxyUserHosts, user), "*", configTypesUpdated);
+            ensureProperty(configuration, "core-site", String.format(proxyUserGroups, user), "*", configTypesUpdated);
+          }
+        } else {
+          LOG.debug("setMissingConfigurations: no user configuration found for type = " + configType +
+                  ".  This may be caused by an error in the blueprint configuration.");
+        }
+
+      }
     }
-
-    addExcludedConfigProperties(configuration, configTypesUpdated, services, clusterTopology.getBlueprint().getStack());
-
+    return services;
   }
 
   /**
@@ -2840,10 +2851,11 @@
    * In case the excluded config-type related service is not present in the blueprint, excluded configs are ignored
    * @param configuration
    * @param configTypesUpdated
-   * @param blueprintServices
    * @param stack
    */
-  private void addExcludedConfigProperties(Configuration configuration, Set<String> configTypesUpdated, Collection<String> blueprintServices, Stack stack) {
+  private void addExcludedConfigProperties(Configuration configuration, Set<String> configTypesUpdated, Stack stack) {
+    Collection<String> blueprintServices = clusterTopology.getBlueprint().getServices();
+
     LOG.debug("Handling excluded properties for blueprint services: {}", blueprintServices);
 
     for (String blueprintService : blueprintServices) {
@@ -2945,7 +2957,7 @@
    * if a given property should be included in an external
    * collection of properties.
    */
-  private static interface PropertyFilter {
+  private interface PropertyFilter {
 
     /**
      * Query to determine if a given property should be included in a collection of
@@ -3026,6 +3038,35 @@
   }
 
   /**
+   * A Filter that excludes Kerberos auth_to_local rules properties.
+   */
+  private static class KerberosAuthToLocalRulesFilter implements PropertyFilter {
+
+    /**
+     * Query to determine if a given property should be included in a collection of
+     * properties.
+     *
+     * This implementation filters Kerberos auth_to_local rules properties.
+     *
+     * @param propertyName property name
+     * @param propertyValue property value
+     * @param configType config type that contains this property
+     * @param topology cluster topology instance
+     *
+     * @return true if the property should be included
+     *         false if the property should not be included
+     */
+    Map<Long, Set<String>> authToLocalPerClusterMap = null;
+    KerberosAuthToLocalRulesFilter (Map<Long, Set<String>> authToLocalPerClusterMap) {
+      this.authToLocalPerClusterMap = authToLocalPerClusterMap;
+    }
+    @Override
+    public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType, ClusterTopology topology) {
+      return (authToLocalPerClusterMap == null || authToLocalPerClusterMap.get(topology.getClusterId()) == null || !authToLocalPerClusterMap.get(topology.getClusterId()).contains(String.format("%s/%s", configType, propertyName)));
+    }
+  }
+
+  /**
    * Simple filter implementation used to remove named properties from
    * a Blueprint export.  Some properties with hostname information set
    * by the UI do not have straightforward mappings to hosts, so these properties
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
index e42bd45..4ad01a5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
@@ -585,28 +585,27 @@
     throws SystemException {
     processExecutor.shutdown();
     try {
-      if (!processExecutor.awaitTermination(timeout, TimeUnit.MILLISECONDS)) {
-        processExecutor.shutdownNow();
-        for (CommandLineThreadWrapper commandLineThreadWrapper: pythonCmdThreads) {
-          CommandLineThread commandLineThread = commandLineThreadWrapper.getCommandLineThread();
-          try {
-            Integer returnCode = commandLineThread.getReturnCode();
-            if (returnCode == null) {
-              throw new TimeoutException();
-            } else if (returnCode != 0) {
-              throw new ExecutionException(String.format("Execution of \"%s\" returned %d.", commandLineThreadWrapper.getCommandLine(), returnCode),
-                new Throwable(commandLineThreadWrapper.getLogStream().getOutput()));
-            }
-          } catch (TimeoutException e) {
-            LOG.error("Generate client configs script was killed due to timeout ", e);
-            throw new SystemException("Generate client configs script was killed due to timeout ", e);
-          } catch (ExecutionException e) {
-            LOG.error(e.getMessage(), e);
-            throw new SystemException(e.getMessage() + " " + e.getCause());
-          } finally {
-            commandLineThreadWrapper.getProcess().destroy();
+      processExecutor.awaitTermination(timeout, TimeUnit.MILLISECONDS);
+      processExecutor.shutdownNow();
+      for (CommandLineThreadWrapper commandLineThreadWrapper: pythonCmdThreads) {
+        CommandLineThread commandLineThread = commandLineThreadWrapper.getCommandLineThread();
+        try {
+          Integer returnCode = commandLineThread.getReturnCode();
+          if (returnCode == null) {
+            throw new TimeoutException();
+          } else if (returnCode != 0) {
+            throw new ExecutionException(String.format("Execution of \"%s\" returned %d.", commandLineThreadWrapper.getCommandLine(), returnCode),
+              new Throwable(commandLineThreadWrapper.getLogStream().getOutput()));
           }
-        }  
+        } catch (TimeoutException e) {
+          LOG.error("Generate client configs script was killed due to timeout ", e);
+          throw new SystemException("Generate client configs script was killed due to timeout ", e);
+        } catch (ExecutionException e) {
+          LOG.error(e.getMessage(), e);
+          throw new SystemException(e.getMessage() + " " + e.getCause());
+        } finally {
+          commandLineThreadWrapper.getProcess().destroy();
+        }
       }
     } catch (InterruptedException e) {
       Thread.currentThread().interrupt();
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index 92e72ed..f8016a5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -20,7 +20,6 @@
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_LOCATION;
 
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
@@ -77,7 +76,7 @@
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ComponentInfo;
 import org.apache.ambari.server.state.Host;
-import org.apache.ambari.server.state.MaintenanceState;
+import org.apache.ambari.server.state.RepositoryType;
 import org.apache.ambari.server.state.RepositoryVersionState;
 import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.ServiceInfo;
@@ -90,6 +89,7 @@
 import org.apache.commons.lang.math.NumberUtils;
 
 import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 import com.google.gson.Gson;
 import com.google.inject.Inject;
@@ -349,10 +349,10 @@
       stackId = currentStackVersion;
     }
 
-    RepositoryVersionEntity repoVersionEnt = repositoryVersionDAO.findByStackAndVersion(
+    RepositoryVersionEntity repoVersionEntity = repositoryVersionDAO.findByStackAndVersion(
         stackId, desiredRepoVersion);
 
-    if (repoVersionEnt == null) {
+    if (repoVersionEntity == null) {
       throw new IllegalArgumentException(String.format(
               "Repo version %s is not available for stack %s",
               desiredRepoVersion, stackId));
@@ -360,14 +360,40 @@
 
     VersionDefinitionXml desiredVersionDefinition = null;
     try {
-      desiredVersionDefinition = repoVersionEnt.getRepositoryXml();
+      desiredVersionDefinition = repoVersionEntity.getRepositoryXml();
     } catch (Exception e) {
       throw new IllegalArgumentException(
           String.format("Version %s is backed by a version definition, but it could not be parsed", desiredRepoVersion), e);
     }
 
-    // get all of the host eligible for stack distribution
-    List<Host> hosts = getHostsForStackDistribution(cluster);
+    // if true, then we need to force all new host versions into the INSTALLED state
+    boolean forceInstalled = Boolean.parseBoolean((String)propertyMap.get(
+        CLUSTER_STACK_VERSION_FORCE));
+
+    try {
+      // either create the necessary host version entries, or set them to INSTALLING when attempting to re-distribute an existing version
+      return createOrUpdateHostVersions(cluster, repoVersionEntity, desiredVersionDefinition,
+          stackId, forceInstalled, propertyMap);
+    } catch (AmbariException e) {
+      throw new SystemException("Can not persist request", e);
+    }
+  }
+
+  @Transactional
+  RequestStatus createOrUpdateHostVersions(Cluster cluster,
+      RepositoryVersionEntity repoVersionEntity, VersionDefinitionXml versionDefinitionXml,
+      StackId stackId, boolean forceInstalled, Map<String, Object> propertyMap)
+      throws AmbariException, SystemException {
+
+    final String clusterName = cluster.getClusterName();
+    final String authName = getManagementController().getAuthName();
+    final String desiredRepoVersion = repoVersionEntity.getVersion();
+
+    ClusterVersionEntity clusterVersionEntity = clusterVersionDAO.findByClusterAndStackAndVersion(
+        clusterName, stackId, desiredRepoVersion);
+
+    // get all of the hosts eligible for stack distribution
+    List<Host> hosts = Lists.newArrayList(cluster.getHosts());
 
     /*
     If there is a repository that is already ATTEMPTED to be installed and the version
@@ -383,7 +409,7 @@
     install by name.  if the package-version is not known, then the 'newest' is ALWAYS installed.
     In this case, 2.5.0.0.  2.4 is never picked up.
     */
-    for (ClusterVersionEntity clusterVersion : clusterVersionDAO.findByCluster(clName)) {
+    for (ClusterVersionEntity clusterVersion : clusterVersionDAO.findByCluster(cluster.getClusterName())) {
       RepositoryVersionEntity clusterRepoVersion = clusterVersion.getRepositoryVersion();
 
       int compare = compareVersions(clusterRepoVersion.getVersion(), desiredRepoVersion);
@@ -396,18 +422,18 @@
       // !!! the version is greater to the one to install
 
       // if the stacks are different, then don't fail (further check same-stack version strings)
-      if (!StringUtils.equals(clusterRepoVersion.getStackName(), repoVersionEnt.getStackName())) {
+      if (!StringUtils.equals(clusterRepoVersion.getStackName(), repoVersionEntity.getStackName())) {
         continue;
       }
 
       // if there is no backing VDF for the desired version, allow the operation (legacy behavior)
-      if (null == desiredVersionDefinition) {
+      if (null == versionDefinitionXml) {
         continue;
       }
 
       // backing VDF does not define the package version for any of the hosts, cannot install (allows a VDF with package-version)
       for (Host host : hosts) {
-        if (StringUtils.isBlank(desiredVersionDefinition.getPackageVersion(host.getOsFamily()))) {
+        if (StringUtils.isBlank(versionDefinitionXml.getPackageVersion(host.getOsFamily()))) {
           String msg = String.format("Ambari cannot install version %s.  Version %s is already installed.",
             desiredRepoVersion, clusterRepoVersion.getVersion());
           throw new IllegalArgumentException(msg);
@@ -415,49 +441,17 @@
       }
     }
 
-    // if true, then we need to force all new host versions into the INSTALLED state
-    boolean forceInstalled = Boolean.parseBoolean((String)propertyMap.get(
-        CLUSTER_STACK_VERSION_FORCE));
-
-    final RequestStatusResponse response;
-
-    try {
-      if (forceInstalled) {
-        createHostVersions(cluster, hosts, stackId, desiredRepoVersion, RepositoryVersionState.INSTALLED);
-        response = null;
-      } else {
-        createHostVersions(cluster, hosts, stackId, desiredRepoVersion,
-            RepositoryVersionState.INSTALLING);
-
-        RequestStageContainer installRequest = createOrchestration(cluster, stackId, hosts,
-            repoVersionEnt, propertyMap);
-
-        response = installRequest.getRequestStatusResponse();
-      }
-    } catch (AmbariException e) {
-      throw new SystemException("Can not persist request", e);
+    RepositoryVersionState repositoryVersionState = RepositoryVersionState.INSTALLING;
+    if (forceInstalled) {
+      repositoryVersionState = RepositoryVersionState.INSTALLED;
     }
 
-    return getRequestStatus(response);
-  }
-
-  @Transactional
-  void createHostVersions(Cluster cluster, List<Host> hosts, StackId stackId,
-      String desiredRepoVersion, RepositoryVersionState repoState)
-      throws AmbariException, SystemException {
-    final String clusterName = cluster.getClusterName();
-    final String authName = getManagementController().getAuthName();
-
-    ClusterVersionEntity clusterVersionEntity = clusterVersionDAO.findByClusterAndStackAndVersion(
-        clusterName, stackId, desiredRepoVersion);
-
+    // if there is no cluster version entity, then create one
     if (clusterVersionEntity == null) {
       try {
         // Create/persist new cluster stack version
-        cluster.createClusterVersion(stackId, desiredRepoVersion, authName, repoState);
-
-        clusterVersionEntity = clusterVersionDAO.findByClusterAndStackAndVersion(clusterName,
-            stackId, desiredRepoVersion);
+        clusterVersionEntity = cluster.createClusterVersion(stackId, desiredRepoVersion, authName,
+            repositoryVersionState);
       } catch (AmbariException e) {
         throw new SystemException(
             String.format("Can not create cluster stack version %s for cluster %s",
@@ -465,25 +459,27 @@
       }
     } else {
       // Move cluster version into the specified state (retry installation)
-      cluster.transitionClusterVersion(stackId, desiredRepoVersion, repoState);
+      cluster.transitionClusterVersion(stackId, desiredRepoVersion, repositoryVersionState);
     }
 
-    // Will also initialize all Host Versions to the specified state state.
-    cluster.transitionHosts(clusterVersionEntity, repoState);
+    // the cluster will create/update all of the host versions to the correct state
+    List<Host> hostsNeedingInstallCommands = cluster.transitionHostsToInstalling(
+        clusterVersionEntity, repoVersionEntity, versionDefinitionXml, forceInstalled);
 
-    // Directly transition host versions to NOT_REQUIRED for hosts that don't
-    // have versionable components
-    for (Host host : hosts) {
-      if (!host.hasComponentsAdvertisingVersions(stackId)) {
-        transitionHostVersionToNotRequired(host, cluster,
-            clusterVersionEntity.getRepositoryVersion());
-      }
+    RequestStatusResponse response = null;
+    if (!forceInstalled) {
+      RequestStageContainer installRequest = createOrchestration(cluster, stackId,
+          hostsNeedingInstallCommands, repoVersionEntity, versionDefinitionXml, propertyMap);
+
+      response = installRequest.getRequestStatusResponse();
     }
+
+    return getRequestStatus(response);
   }
 
   @Transactional
   RequestStageContainer createOrchestration(Cluster cluster, StackId stackId,
-      List<Host> hosts, RepositoryVersionEntity repoVersionEnt, Map<String, Object> propertyMap)
+      List<Host> hosts, RepositoryVersionEntity repoVersionEnt, VersionDefinitionXml desiredVersionDefinition, Map<String, Object> propertyMap)
       throws AmbariException, SystemException {
     final AmbariManagementController managementController = getManagementController();
     final AmbariMetaInfo ami = managementController.getAmbariMetaInfo();
@@ -546,7 +542,7 @@
       }
 
       Stage stage = stageFactory.createNew(req.getId(), "/tmp/ambari", cluster.getClusterName(),
-          cluster.getClusterId(), stageName, clusterHostInfoJson, "{}", hostParamsJson);
+          cluster.getClusterId(), stageName, "{}", hostParamsJson);
 
       // if you have 1000 hosts (10 stages with 100 installs), we want to ensure
       // that a single failure doesn't cause all other stages to abort; set the
@@ -564,8 +560,11 @@
       // determine services for the repo
       Set<String> serviceNames = new HashSet<>();
 
-      // !!! TODO for patch upgrades, we need to limit the serviceNames to those
-      // that are detailed for the repository
+      // !!! limit the serviceNames to those that are detailed for the repository.
+      // TODO packages don't have component granularity
+      if (RepositoryType.STANDARD != repoVersionEnt.getType()) {
+        serviceNames.addAll(desiredVersionDefinition.getAvailableServiceNames());
+      }
 
       // Populate with commands for host
       for (int i = 0; i < maxTasks && hostIterator.hasNext(); i++) {
@@ -591,6 +590,7 @@
               repoVersionEnt.getDisplayName()));
     }
 
+    req.setClusterHostInfo(clusterHostInfoJson);
     req.addStages(stages);
     req.persist();
 
@@ -610,6 +610,9 @@
         osFamily, repoVersion.getVersion(), stackId));
     }
 
+    if (repoInfo.isEmpty()){
+      LOG.error(String.format("Repository list is empty. Ambari may not be managing the repositories for %s", osFamily));
+    }
 
     // determine packages for all services that are installed on host
     List<ServiceOsSpecific.Package> packages = new ArrayList<>();
@@ -624,6 +627,7 @@
     if (servicesOnHost.isEmpty()) {
       return null;
     }
+
     List<String> blacklistedPackagePrefixes = configuration.getRollingUpgradeSkipPackagesPrefixes();
     for (String serviceName : servicesOnHost) {
       try{
@@ -902,29 +906,6 @@
   }
 
   /**
-   * Gets all of the hosts in a cluster which are not in "maintenance mode" and
-   * are considered to be healthy. In the case of stack distribution, a host
-   * must be explicitely marked as being in maintenance mode for it to be
-   * considered as unhealthy.
-   *
-   * @param cluster
-   *          the cluster (not {@code null}).
-   * @return the list of hosts that are not in maintenance mode and are
-   *         elidgable to have a stack distributed to them.
-   */
-  private List<Host> getHostsForStackDistribution(Cluster cluster) {
-    Collection<Host> hosts = cluster.getHosts();
-    List<Host> healthyHosts = new ArrayList<>(hosts.size());
-    for (Host host : hosts) {
-      if (host.getMaintenanceState(cluster.getClusterId()) == MaintenanceState.OFF) {
-        healthyHosts.add(host);
-      }
-    }
-
-    return healthyHosts;
-  }
-
-  /**
    * Updates the version states.  Transactional to ensure only one transaction for all updates
    * @param clusterId the cluster
    * @param current   the repository that is current for the cluster
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
index 65cfcaa..ff8d0be 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
@@ -18,7 +18,6 @@
 package org.apache.ambari.server.controller.internal;
 
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.EnumSet;
 import java.util.HashMap;
@@ -64,6 +63,7 @@
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.Validate;
 
+import com.google.common.collect.Sets;
 import com.google.inject.assistedinject.Assisted;
 import com.google.inject.assistedinject.AssistedInject;
 import com.google.inject.persist.Transactional;
@@ -89,17 +89,18 @@
   protected static final String COMPONENT_UNKNOWN_COUNT_PROPERTY_ID   = "ServiceComponentInfo/unknown_count";
   protected static final String COMPONENT_INSTALL_FAILED_COUNT_PROPERTY_ID = "ServiceComponentInfo/install_failed_count";
   protected static final String COMPONENT_RECOVERY_ENABLED_ID         = "ServiceComponentInfo/recovery_enabled";
+  protected static final String COMPONENT_DESIRED_VERSION             = "ServiceComponentInfo/desired_version";
+  protected static final String COMPONENT_REPOSITORY_STATE            = "ServiceComponentInfo/repository_state";
 
   private static final String TRUE = "true";
 
   //Parameters from the predicate
   private static final String QUERY_PARAMETERS_RUN_SMOKE_TEST_ID = "params/run_smoke_test";
 
-  private static Set<String> pkPropertyIds =
-      new HashSet<>(Arrays.asList(new String[]{
+  private static Set<String> pkPropertyIds = Sets.newHashSet(
           COMPONENT_CLUSTER_NAME_PROPERTY_ID,
           COMPONENT_SERVICE_NAME_PROPERTY_ID,
-          COMPONENT_COMPONENT_NAME_PROPERTY_ID}));
+          COMPONENT_COMPONENT_NAME_PROPERTY_ID);
 
   private MaintenanceStateHelper maintenanceStateHelper;
 
@@ -188,6 +189,8 @@
       setResourceProperty(resource, COMPONENT_INIT_COUNT_PROPERTY_ID, response.getServiceComponentStateCount().get("initCount"), requestedIds);
       setResourceProperty(resource, COMPONENT_UNKNOWN_COUNT_PROPERTY_ID, response.getServiceComponentStateCount().get("unknownCount"), requestedIds);
       setResourceProperty(resource, COMPONENT_RECOVERY_ENABLED_ID, String.valueOf(response.isRecoveryEnabled()), requestedIds);
+      setResourceProperty(resource, COMPONENT_DESIRED_VERSION, response.getDesiredVersion(), requestedIds);
+      setResourceProperty(resource, COMPONENT_REPOSITORY_STATE, response.getRepositoryState(), requestedIds);
 
       resources.add(resource);
     }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequest.java
index f24c138..19d9141 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequest.java
@@ -47,7 +47,6 @@
 import org.apache.ambari.server.topology.HostGroupInfo;
 import org.apache.ambari.server.topology.InvalidTopologyTemplateException;
 import org.apache.ambari.server.topology.TopologyRequest;
-import org.apache.ambari.server.topology.TopologyValidator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -114,11 +113,6 @@
   }
 
   @Override
-  public List<TopologyValidator> getTopologyValidators() {
-    return Collections.emptyList();
-  }
-
-  @Override
   public String getDescription() {
     return String.format("Export Command For Cluster '%s'", clusterName);
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
index 747aac3..5579792 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
@@ -633,7 +633,7 @@
     // retrieve the cluster desired configs once instead of per host
     Map<String, DesiredConfig> desiredConfigs = null;
     if (null != cluster) {
-      cluster.getDesiredConfigs();
+      desiredConfigs = cluster.getDesiredConfigs();
     }
 
     for (Host h : hosts) {
@@ -664,7 +664,7 @@
         if (clustersForHost != null && clustersForHost.size() != 0) {
           Cluster clusterForHost = clustersForHost.iterator().next();
           r.setClusterName(clusterForHost.getClusterName());
-          r.setDesiredHostConfigs(h.getDesiredHostConfigs(clusterForHost, desiredConfigs));
+          r.setDesiredHostConfigs(h.getDesiredHostConfigs(clusterForHost, null));
           r.setMaintenanceState(h.getMaintenanceState(clusterForHost.getClusterId()));
         }
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
index 8f2d4e6..92edeb8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
@@ -398,6 +398,11 @@
                       "not defined. Repo version=%s, stackId=%s",
         osFamily, desiredRepoVersion, stackId));
     }
+
+    if (repoInfo.isEmpty()){
+      LOG.error(String.format("Repository list is empty. Ambari may not be managing the repositories for %s", osFamily));
+    }
+
     // For every host at cluster, determine packages for all installed services
     List<ServiceOsSpecific.Package> packages = new ArrayList<>();
     Set<String> servicesOnHost = new HashSet<>();
@@ -508,7 +513,6 @@
             cluster.getClusterName(),
             cluster.getClusterId(),
             caption,
-            clusterHostInfoJson,
             "{}",
             StageUtils.getGson().toJson(hostLevelParams));
 
@@ -517,6 +521,7 @@
       stageId = 1L;
     }
     stage.setStageId(stageId);
+    req.setClusterHostInfo(clusterHostInfoJson);
     req.addStages(Collections.singletonList(stage));
 
     try {
@@ -556,7 +561,6 @@
       cluster.getClusterName(),
       cluster.getClusterId(),
       caption,
-      clusterHostInfoJson,
       StageUtils.getGson().toJson(commandParams),
       StageUtils.getGson().toJson(hostLevelParams));
 
@@ -565,6 +569,7 @@
       stageId = 1L;
     }
     stage.setStageId(stageId);
+    req.setClusterHostInfo(clusterHostInfoJson);
     req.addStages(Collections.singletonList(stage));
 
     actionContext = new ActionExecutionContext(
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/JobResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/JobResourceProvider.java
index 2c5741f..906f33c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/JobResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/JobResourceProvider.java
@@ -177,7 +177,7 @@
   /**
    * Simple interface for fetching jobs from db.
    */
-  public static interface JobFetcher {
+  public interface JobFetcher {
     /**
      * Fetch job resources.
      * 
@@ -191,8 +191,8 @@
      *          the job id
      * @return a set of job resources
      */
-    public Set<Resource> fetchJobDetails(Set<String> requestedIds,
-        String clusterName, String workflowId, String jobId);
+    Set<Resource> fetchJobDetails(Set<String> requestedIds,
+                                  String clusterName, String workflowId, String jobId);
   }
 
   /**
@@ -315,7 +315,7 @@
   /**
    * Enumeration of db fields for the job table.
    */
-  static enum JobFields {
+  enum JobFields {
     JOBID,
     JOBNAME,
     STATUS,
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ObservableResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ObservableResourceProvider.java
index c0c1dc4..77a9bbd 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ObservableResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ObservableResourceProvider.java
@@ -28,12 +28,12 @@
    *
    * @param event  the event
    */
-  public void updateObservers(ResourceProviderEvent event);
+  void updateObservers(ResourceProviderEvent event);
 
   /**
    * Add an observer.
    *
    * @param observer  the observer
    */
-  public void addObserver(ResourceProviderObserver observer);
+  void addObserver(ResourceProviderObserver observer);
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequest.java
index eb02637..368543c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequest.java
@@ -19,7 +19,6 @@
 
 import java.util.Collection;
 import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
@@ -36,17 +35,12 @@
 import org.apache.ambari.server.topology.InvalidTopologyTemplateException;
 import org.apache.ambari.server.topology.NoSuchBlueprintException;
 import org.apache.ambari.server.topology.SecurityConfiguration;
-import org.apache.ambari.server.topology.TopologyValidator;
-import org.apache.ambari.server.topology.validators.ClusterConfigTypeValidator;
-import org.apache.ambari.server.topology.validators.HiveServiceValidator;
-import org.apache.ambari.server.topology.validators.RequiredPasswordValidator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Enums;
 import com.google.common.base.Optional;
 import com.google.common.base.Strings;
-import com.google.common.collect.ImmutableList;
 
 /**
  * Request for provisioning a cluster.
@@ -146,8 +140,6 @@
 
   private final String quickLinksProfileJson;
 
-  private final List<TopologyValidator> topologyValidators;
-
   private final static Logger LOG = LoggerFactory.getLogger(ProvisionClusterRequest.class);
 
   /**
@@ -197,9 +189,6 @@
     } catch (QuickLinksProfileEvaluationException ex) {
       throw new InvalidTopologyTemplateException("Invalid quick links profile", ex);
     }
-
-    topologyValidators = ImmutableList.of(new RequiredPasswordValidator(defaultPassword),
-      new ClusterConfigTypeValidator(), new HiveServiceValidator());
   }
 
   private String processQuickLinksProfile(Map<String, Object> properties) throws QuickLinksProfileEvaluationException {
@@ -273,11 +262,6 @@
   }
 
   @Override
-  public List<TopologyValidator> getTopologyValidators() {
-    return topologyValidators;
-  }
-
-  @Override
   public String getDescription() {
     return String.format("Provision Cluster '%s'", clusterName);
   }
@@ -480,4 +464,9 @@
   public String getQuickLinksProfileJson() {
     return quickLinksProfileJson;
   }
+
+  public String getDefaultPassword() {
+    return defaultPassword;
+  }
+
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
index 1fc4bd5..f41eb26 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
@@ -69,6 +69,7 @@
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.topology.LogicalRequest;
 import org.apache.ambari.server.topology.TopologyManager;
+import org.apache.ambari.server.utils.SecretReference;
 import org.apache.commons.lang.StringUtils;
 
 import com.google.common.collect.Sets;
@@ -104,6 +105,7 @@
   public static final String REQUEST_SOURCE_SCHEDULE_HREF = REQUESTS + "/request_schedule/href";
   public static final String REQUEST_TYPE_ID = REQUESTS + "/type";
   public static final String REQUEST_INPUTS_ID = REQUESTS + "/inputs";
+  public static final String REQUEST_CLUSTER_HOST_INFO_ID = REQUESTS + "/cluster_host_info";
   public static final String REQUEST_RESOURCE_FILTER_ID = REQUESTS + "/resource_filters";
   public static final String REQUEST_OPERATION_LEVEL_ID = REQUESTS + "/operation_level";
   public static final String REQUEST_CREATE_TIME_ID = REQUESTS + "/create_time";
@@ -117,6 +119,8 @@
   public static final String REQUEST_COMPLETED_TASK_CNT_ID = REQUESTS + "/completed_task_count";
   public static final String REQUEST_QUEUED_TASK_CNT_ID = REQUESTS + "/queued_task_count";
   public static final String REQUEST_PROGRESS_PERCENT_ID = REQUESTS + "/progress_percent";
+  public static final String REQUEST_REMOVE_PENDING_HOST_REQUESTS_ID = REQUESTS + "/remove_pending_host_requests";
+  public static final String REQUEST_PENDING_HOST_REQUEST_COUNT_ID = REQUESTS + "/pending_host_request_count";
   public static final String COMMAND_ID = "command";
   public static final String SERVICE_ID = "service_name";
   public static final String COMPONENT_ID = "component_name";
@@ -125,6 +129,7 @@
   public static final String ACTION_ID = "action";
   public static final String INPUTS_ID = "parameters";
   public static final String EXLUSIVE_ID = "exclusive";
+
   private static Set<String> pkPropertyIds =
     new HashSet<>(Arrays.asList(new String[]{
       REQUEST_ID_PROPERTY_ID}));
@@ -154,7 +159,11 @@
     REQUEST_TIMED_OUT_TASK_CNT_ID,
     REQUEST_COMPLETED_TASK_CNT_ID,
     REQUEST_QUEUED_TASK_CNT_ID,
-    REQUEST_PROGRESS_PERCENT_ID);
+    REQUEST_PROGRESS_PERCENT_ID,
+    REQUEST_REMOVE_PENDING_HOST_REQUESTS_ID,
+    REQUEST_PENDING_HOST_REQUEST_COUNT_ID,
+    REQUEST_CLUSTER_HOST_INFO_ID
+  );
 
   // ----- Constructors ----------------------------------------------------
 
@@ -299,6 +308,7 @@
   public RequestStatus updateResources(Request requestInfo, Predicate predicate)
           throws SystemException, UnsupportedPropertyException,
           NoSuchResourceException, NoSuchParentResourceException {
+
     AmbariManagementController amc = getManagementController();
     final Set<RequestRequest> requests = new HashSet<>();
 
@@ -321,33 +331,48 @@
       }
       // There should be only one request with this id (or no request at all)
       org.apache.ambari.server.actionmanager.Request internalRequest = internalRequests.get(0);
-      // Validate update request (check constraints on state value and presence of abort reason)
-      if (updateRequest.getAbortReason() == null || updateRequest.getAbortReason().isEmpty()) {
-        throw new IllegalArgumentException("Abort reason can not be empty.");
-      }
 
-      if (updateRequest.getStatus() != HostRoleStatus.ABORTED) {
-        throw new IllegalArgumentException(
-                String.format("%s is wrong value. The only allowed value " +
-                                "for updating request status is ABORTED",
-                        updateRequest.getStatus()));
-      }
-
-      HostRoleStatus internalRequestStatus =
-          CalculatedStatus.statusFromStages(internalRequest.getStages()).getStatus();
-
-      if (internalRequestStatus.isCompletedState()) {
-        // Ignore updates to completed requests to avoid throwing exception on race condition
+      if (updateRequest.isRemovePendingHostRequests()) {
+        if (internalRequest instanceof LogicalRequest) {
+          targets.add(internalRequest);
+        } else {
+          throw new IllegalArgumentException("Request with id: " + internalRequest.getRequestId() + "is not a Logical Request.");
+        }
       } else {
-        // Validation passed
-        targets.add(internalRequest);
+        // Validate update request (check constraints on state value and presence of abort reason)
+        if (updateRequest.getAbortReason() == null || updateRequest.getAbortReason().isEmpty()) {
+          throw new IllegalArgumentException("Abort reason can not be empty.");
+        }
+
+        if (updateRequest.getStatus() != HostRoleStatus.ABORTED) {
+          throw new IllegalArgumentException(
+                  String.format("%s is wrong value. The only allowed value " +
+                                  "for updating request status is ABORTED",
+                          updateRequest.getStatus()));
+        }
+
+        HostRoleStatus internalRequestStatus =
+                CalculatedStatus.statusFromStages(internalRequest.getStages()).getStatus();
+
+        if (internalRequestStatus.isCompletedState()) {
+          // Ignore updates to completed requests to avoid throwing exception on race condition
+        } else {
+          // Validation passed
+          targets.add(internalRequest);
+        }
       }
+
     }
+
     // Perform update
     Iterator<RequestRequest> reqIterator = requests.iterator();
     for (org.apache.ambari.server.actionmanager.Request target : targets) {
-      String reason = reqIterator.next().getAbortReason();
-      amc.getActionManager().cancelRequest(target.getRequestId(), reason);
+      if (target instanceof LogicalRequest) {
+        topologyManager.removePendingHostRequests(target.getClusterName(), target.getRequestId());
+      } else {
+        String reason = reqIterator.next().getAbortReason();
+        amc.getActionManager().cancelRequest(target.getRequestId(), reason);
+      }
     }
     return getRequestStatus(null);
   }
@@ -365,9 +390,15 @@
       requestStatus = HostRoleStatus.valueOf(requestStatusStr);
     }
     String abortReason = (String) propertyMap.get(REQUEST_ABORT_REASON_PROPERTY_ID);
+    String removePendingHostRequests = (String) propertyMap.get(REQUEST_REMOVE_PENDING_HOST_REQUESTS_ID);
+
     RequestRequest requestRequest = new RequestRequest(clusterNameStr, requestId);
     requestRequest.setStatus(requestStatus);
     requestRequest.setAbortReason(abortReason);
+    if (removePendingHostRequests != null) {
+      requestRequest.setRemovePendingHostRequests(Boolean.valueOf(removePendingHostRequests));
+    }
+
     return requestRequest;
 
   }
@@ -716,7 +747,20 @@
     setResourceProperty(resource, REQUEST_ID_PROPERTY_ID, entity.getRequestId(), requestedPropertyIds);
     setResourceProperty(resource, REQUEST_CONTEXT_ID, entity.getRequestContext(), requestedPropertyIds);
     setResourceProperty(resource, REQUEST_TYPE_ID, entity.getRequestType(), requestedPropertyIds);
-    setResourceProperty(resource, REQUEST_INPUTS_ID, entity.getInputs(), requestedPropertyIds);
+
+    // Mask any sensitive data fields in the inputs data structure
+    if (isPropertyRequested(REQUEST_INPUTS_ID, requestedPropertyIds)) {
+      String value = entity.getInputs();
+      if (!StringUtils.isBlank(value)) {
+        value = SecretReference.maskPasswordInPropertyMap(value);
+      }
+      resource.setProperty(REQUEST_INPUTS_ID, value);
+    }
+
+    if (isPropertyRequested(REQUEST_CLUSTER_HOST_INFO_ID, requestedPropertyIds)) {
+      resource.setProperty(REQUEST_CLUSTER_HOST_INFO_ID, entity.getClusterHostInfo());
+    }
+
     setResourceProperty(resource, REQUEST_RESOURCE_FILTER_ID,
         org.apache.ambari.server.actionmanager.Request.filtersFromEntity(entity),
         requestedPropertyIds);
@@ -755,13 +799,21 @@
       // in this case, it appears that there are no tasks but this is a logical
       // topology request, so it's a matter of hosts simply not registering yet
       // for tasks to be created
-      status = CalculatedStatus.PENDING;
+      if (logicalRequest.hasPendingHostRequests()) {
+        status = CalculatedStatus.PENDING;
+      } else {
+        status = CalculatedStatus.COMPLETED;
+      }
     } else {
       // there are either tasks or this is not a logical request, so do normal
       // status calculations
       status = CalculatedStatus.statusFromStageSummary(summary, summary.keySet());
     }
 
+    if (null != logicalRequest) {
+      setResourceProperty(resource, REQUEST_PENDING_HOST_REQUEST_COUNT_ID, logicalRequest.getPendingHostRequestCount(), requestedPropertyIds);
+    }
+
     setResourceProperty(resource, REQUEST_STATUS_PROPERTY_ID, status.getStatus().toString(), requestedPropertyIds);
     setResourceProperty(resource, REQUEST_PROGRESS_PERCENT_ID, status.getPercent(), requestedPropertyIds);
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestStageContainer.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestStageContainer.java
index c37be91..3f67704 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestStageContainer.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestStageContainer.java
@@ -65,6 +65,8 @@
 
   private ExecuteActionRequest actionRequest = null;
 
+  private String clusterHostInfo = null;
+
   /**
    * Logger
    */
@@ -99,6 +101,7 @@
     this.requestFactory = factory;
     this.actionManager = manager;
     this.actionRequest = actionRequest;
+    this.clusterHostInfo = "{}";
   }
 
   /**
@@ -110,6 +113,10 @@
     return id;
   }
 
+  public void setClusterHostInfo(String clusterHostInfo){
+    this.clusterHostInfo = clusterHostInfo;
+  }
+
   /**
    * Add stages to request.
    *
@@ -202,8 +209,8 @@
   public void persist() throws AmbariException {
     if (!stages.isEmpty()) {
       Request request = (null == actionRequest)
-          ? requestFactory.createNewFromStages(stages)
-          : requestFactory.createNewFromStages(stages, actionRequest);
+          ? requestFactory.createNewFromStages(stages, clusterHostInfo)
+          : requestFactory.createNewFromStages(stages, clusterHostInfo, actionRequest);
 
       if (null != requestContext) {
         request.setRequestContext(requestContext);
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ResourceProviderObserver.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ResourceProviderObserver.java
index a5dd3df..d5dd199 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ResourceProviderObserver.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ResourceProviderObserver.java
@@ -27,5 +27,5 @@
    *
    * @param event the event
    */
-  public void update(ResourceProviderEvent event);
+  void update(ResourceProviderEvent event);
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ScaleClusterRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ScaleClusterRequest.java
index 5e9091f..b2059ba 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ScaleClusterRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ScaleClusterRequest.java
@@ -20,7 +20,6 @@
 package org.apache.ambari.server.controller.internal;
 
 import java.util.Collections;
-import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
@@ -30,7 +29,6 @@
 import org.apache.ambari.server.topology.Configuration;
 import org.apache.ambari.server.topology.HostGroupInfo;
 import org.apache.ambari.server.topology.InvalidTopologyTemplateException;
-import org.apache.ambari.server.topology.TopologyValidator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -90,11 +88,6 @@
   }
 
   @Override
-  public List<TopologyValidator> getTopologyValidators() {
-    return Collections.emptyList();
-  }
-
-  @Override
   public String getDescription() {
     return String.format("Scale Cluster '%s' (+%s hosts)", clusterName, getTotalRequestedHostCount());
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
index db9a0e2..77757c6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
@@ -91,7 +91,6 @@
   public static final String STAGE_REQUEST_ID = "Stage/request_id";
   public static final String STAGE_LOG_INFO = "Stage/log_info";
   public static final String STAGE_CONTEXT = "Stage/context";
-  public static final String STAGE_CLUSTER_HOST_INFO = "Stage/cluster_host_info";
   public static final String STAGE_COMMAND_PARAMS = "Stage/command_params";
   public static final String STAGE_HOST_PARAMS = "Stage/host_params";
   public static final String STAGE_SKIPPABLE = "Stage/skippable";
@@ -119,7 +118,6 @@
     PROPERTY_IDS.add(STAGE_REQUEST_ID);
     PROPERTY_IDS.add(STAGE_LOG_INFO);
     PROPERTY_IDS.add(STAGE_CONTEXT);
-    PROPERTY_IDS.add(STAGE_CLUSTER_HOST_INFO);
     PROPERTY_IDS.add(STAGE_COMMAND_PARAMS);
     PROPERTY_IDS.add(STAGE_HOST_PARAMS);
     PROPERTY_IDS.add(STAGE_SKIPPABLE);
@@ -211,8 +209,7 @@
 
     // !!! poor mans cache.  toResource() shouldn't be calling the db
     // every time, when the request id is likely the same for each stageEntity
-    Map<Long, Map<Long, HostRoleCommandStatusSummaryDTO>> cache =
-      new HashMap<>();
+    Map<Long, Map<Long, HostRoleCommandStatusSummaryDTO>> cache = new HashMap<>();
 
     List<StageEntity> entities = dao.findAll(request, predicate);
     for (StageEntity entity : entities) {
@@ -232,8 +229,11 @@
 
       if (null != lr) {
         Collection<StageEntity> topologyManagerStages = lr.getStageEntities();
+        // preload summaries as it contains summaries for all stages within this request
+        Map<Long, HostRoleCommandStatusSummaryDTO> summary = topologyManager.getStageSummaries(requestId);
+        cache.put(requestId, summary);
         for (StageEntity entity : topologyManagerStages) {
-          Resource stageResource = toResource(entity, propertyIds);
+          Resource stageResource = toResource(cache, entity, propertyIds);
           if (predicate.evaluate(stageResource)) {
             results.add(stageResource);
           }
@@ -242,7 +242,11 @@
     } else {
       Collection<StageEntity> topologyManagerStages = topologyManager.getStages();
       for (StageEntity entity : topologyManagerStages) {
-        Resource stageResource = toResource(entity, propertyIds);
+        if (!cache.containsKey(entity.getRequestId())) {
+          Map<Long, HostRoleCommandStatusSummaryDTO> summary = topologyManager.getStageSummaries(entity.getRequestId());
+          cache.put(entity.getRequestId(), summary);
+        }
+        Resource stageResource = toResource(cache, entity, propertyIds);
         if (predicate.evaluate(stageResource)) {
           results.add(stageResource);
         }
@@ -301,12 +305,6 @@
     setResourceProperty(resource, STAGE_REQUEST_ID, entity.getRequestId(), requestedIds);
     setResourceProperty(resource, STAGE_CONTEXT, entity.getRequestContext(), requestedIds);
 
-    // this property is lazy loaded in JPA; don't use it unless requested
-    if (isPropertyRequested(STAGE_CLUSTER_HOST_INFO, requestedIds)) {
-      resource.setProperty(STAGE_CLUSTER_HOST_INFO, entity.getClusterHostInfo());
-    }
-
-    // this property is lazy loaded in JPA; don't use it unless requested
     if (isPropertyRequested(STAGE_COMMAND_PARAMS, requestedIds)) {
       String value = entity.getCommandParamsStage();
       if (!StringUtils.isBlank(value)) {
@@ -351,71 +349,4 @@
     return resource;
   }
 
-  /**
-   * Converts the {@link StageEntity} to a {@link Resource}.
-   *
-   * @param entity        the entity to convert (not {@code null})
-   * @param requestedIds  the properties requested (not {@code null})
-   *
-   * @return the new resource
-   */
-  //todo: almost exactly the same as other toResource except how summaries are obtained
-  //todo: refactor to combine the two with the summary logic extracted
-  private Resource toResource(StageEntity entity, Set<String> requestedIds) {
-
-    Resource resource = new ResourceImpl(Resource.Type.Stage);
-
-    Long clusterId = entity.getClusterId();
-    if (clusterId != null && !clusterId.equals(Long.valueOf(-1L))) {
-      try {
-        Cluster cluster = clustersProvider.get().getClusterById(clusterId);
-
-        setResourceProperty(resource, STAGE_CLUSTER_NAME, cluster.getClusterName(), requestedIds);
-      } catch (Exception e) {
-        LOG.error("Can not get information for cluster " + clusterId + ".", e );
-      }
-    }
-
-    Map<Long, HostRoleCommandStatusSummaryDTO> summary =
-        topologyManager.getStageSummaries(entity.getRequestId());
-
-    setResourceProperty(resource, STAGE_STAGE_ID, entity.getStageId(), requestedIds);
-    setResourceProperty(resource, STAGE_REQUEST_ID, entity.getRequestId(), requestedIds);
-    setResourceProperty(resource, STAGE_CONTEXT, entity.getRequestContext(), requestedIds);
-
-    // this property is lazy loaded in JPA; don't use it unless requested
-    if (isPropertyRequested(STAGE_CLUSTER_HOST_INFO, requestedIds)) {
-      resource.setProperty(STAGE_CLUSTER_HOST_INFO, entity.getClusterHostInfo());
-    }
-
-    // this property is lazy loaded in JPA; don't use it unless requested
-    if (isPropertyRequested(STAGE_COMMAND_PARAMS, requestedIds)) {
-      resource.setProperty(STAGE_COMMAND_PARAMS, entity.getCommandParamsStage());
-    }
-
-    // this property is lazy loaded in JPA; don't use it unless requested
-    if (isPropertyRequested(STAGE_HOST_PARAMS, requestedIds)) {
-      resource.setProperty(STAGE_HOST_PARAMS, entity.getHostParamsStage());
-    }
-
-    setResourceProperty(resource, STAGE_SKIPPABLE, entity.isSkippable(), requestedIds);
-
-    Long startTime = Long.MAX_VALUE;
-    Long endTime = 0L;
-    if (summary.containsKey(entity.getStageId())) {
-      startTime = summary.get(entity.getStageId()).getStartTime();
-      endTime = summary.get(entity.getStageId()).getEndTime();
-    }
-
-    setResourceProperty(resource, STAGE_START_TIME, startTime, requestedIds);
-    setResourceProperty(resource, STAGE_END_TIME, endTime, requestedIds);
-
-    CalculatedStatus status = CalculatedStatus.statusFromStageSummary(summary, Collections.singleton(entity.getStageId()));
-
-    setResourceProperty(resource, STAGE_PROGRESS_PERCENT, status.getPercent(), requestedIds);
-    setResourceProperty(resource, STAGE_STATUS, status.getStatus(), requestedIds);
-    setResourceProperty(resource, STAGE_DISPLAY_STATUS, status.getDisplayStatus(), requestedIds);
-
-    return resource;
-  }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/TaskAttemptResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/TaskAttemptResourceProvider.java
index 32b7fec..5528480 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/TaskAttemptResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/TaskAttemptResourceProvider.java
@@ -182,7 +182,7 @@
   /**
    * Simple interface for fetching task attempts from db.
    */
-  public static interface TaskAttemptFetcher {
+  public interface TaskAttemptFetcher {
     /**
      * Fetch task attempt resources
      * 
@@ -198,9 +198,9 @@
      *          the task attempt id
      * @return a set of task attempt resources
      */
-    public Set<Resource> fetchTaskAttemptDetails(Set<String> requestedIds,
-        String clusterName, String workflowId, String jobId,
-        String taskAttemptId);
+    Set<Resource> fetchTaskAttemptDetails(Set<String> requestedIds,
+                                          String clusterName, String workflowId, String jobId,
+                                          String taskAttemptId);
   }
 
   /**
@@ -318,7 +318,7 @@
   /**
    * Enumeration of db fields for the task attempt table.
    */
-  static enum TaskAttemptFields {
+  enum TaskAttemptFields {
     JOBID,
     TASKATTEMPTID,
     TASKTYPE,
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/TaskResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/TaskResourceProvider.java
index 20b0417..6721f7a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/TaskResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/TaskResourceProvider.java
@@ -75,6 +75,7 @@
   public static final String TASK_ATTEMPT_CNT_PROPERTY_ID  = PropertyHelper.getPropertyId("Tasks", "attempt_cnt");
   public static final String TASK_COMMAND_DET_PROPERTY_ID  = PropertyHelper.getPropertyId("Tasks", "command_detail");
   public static final String TASK_CUST_CMD_NAME_PROPERTY_ID  = PropertyHelper.getPropertyId("Tasks", "custom_command_name");
+  public static final String TASK_COMMAND_OPS_DISPLAY_NAME  = PropertyHelper.getPropertyId("Tasks", "ops_display_name");
 
   private static Set<String> pkPropertyIds =
     new HashSet<>(Arrays.asList(new String[]{
@@ -107,6 +108,7 @@
     PROPERTY_IDS.add(TASK_ATTEMPT_CNT_PROPERTY_ID);
     PROPERTY_IDS.add(TASK_COMMAND_DET_PROPERTY_ID);
     PROPERTY_IDS.add(TASK_CUST_CMD_NAME_PROPERTY_ID);
+    PROPERTY_IDS.add(TASK_COMMAND_OPS_DISPLAY_NAME);
   }
 
   /**
@@ -230,6 +232,7 @@
         setResourceProperty(resource, TASK_COMMAND_DET_PROPERTY_ID, hostRoleCommand.getCommandDetail(), requestedIds);
       }
 
+      setResourceProperty(resource, TASK_COMMAND_OPS_DISPLAY_NAME, hostRoleCommand.getOpsDisplayName(), requestedIds);
       results.add(resource);
     }
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 709ca93..0ebf3aa 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -91,6 +91,7 @@
 import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.RepositoryType;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceInfo;
@@ -100,6 +101,7 @@
 import org.apache.ambari.server.state.UpgradeContextFactory;
 import org.apache.ambari.server.state.UpgradeHelper;
 import org.apache.ambari.server.state.UpgradeHelper.UpgradeGroupHolder;
+import org.apache.ambari.server.state.repository.VersionDefinitionXml;
 import org.apache.ambari.server.state.stack.ConfigUpgradePack;
 import org.apache.ambari.server.state.stack.PrereqCheckStatus;
 import org.apache.ambari.server.state.stack.UpgradePack;
@@ -219,6 +221,10 @@
       Arrays.asList(UPGRADE_REQUEST_ID, UPGRADE_CLUSTER_NAME));
   private static final Set<String> PROPERTY_IDS = new HashSet<>();
 
+  /**
+   * The list of supported services put on a command.
+   */
+  public static final String COMMAND_PARAM_SUPPORTED_SERVICES = "supported_services";
 
   private static final String DEFAULT_REASON_TEMPLATE = "Aborting upgrade %s";
 
@@ -608,8 +614,7 @@
   /**
    * Validates a singular API request.
    *
-   * @param requestMap
-   *          the map of properties
+   * @param upgradeContext the map of properties
    * @return the validated upgrade pack
    * @throws AmbariException
    */
@@ -714,6 +719,36 @@
     Set<String> supportedServices = new HashSet<>();
     UpgradeScope scope = UpgradeScope.COMPLETE;
 
+    switch (direction) {
+      case UPGRADE:
+        StackId sourceStackId = cluster.getCurrentStackVersion();
+
+        RepositoryVersionEntity targetRepositoryVersion = s_repoVersionDAO.findByStackNameAndVersion(
+            sourceStackId.getStackName(), version);
+
+        // !!! Consult the version definition and add the service names to supportedServices
+        if (targetRepositoryVersion.getType() != RepositoryType.STANDARD) {
+          try {
+            VersionDefinitionXml vdf = targetRepositoryVersion.getRepositoryXml();
+            supportedServices.addAll(vdf.getAvailableServiceNames());
+
+            // !!! better not be, but just in case
+            if (!supportedServices.isEmpty()) {
+              scope = UpgradeScope.PARTIAL;
+            }
+
+          } catch (Exception e) {
+            String msg = String.format("Could not parse version definition for %s.  Upgrade will not proceed.", version);
+            LOG.error(msg, e);
+            throw new AmbariException(msg);
+          }
+        }
+
+        break;
+      case DOWNGRADE:
+        break;
+    }
+
     upgradeContext.setResolver(resolver);
     upgradeContext.setSupportedServices(supportedServices);
     upgradeContext.setScope(scope);
@@ -817,11 +852,14 @@
     // from upgrade pack
     @Experimental(feature=ExperimentalFeature.PATCH_UPGRADES)
     Set<Service> services = new HashSet<>(cluster.getServices().values());
+
+    @Experimental(feature=ExperimentalFeature.PATCH_UPGRADES)
     Map<Service, Set<ServiceComponent>> targetComponents = new HashMap<>();
     for (Service service: services) {
-      Set<ServiceComponent> serviceComponents =
-        new HashSet<>(service.getServiceComponents().values());
-      targetComponents.put(service, serviceComponents);
+      if (upgradeContext.isServiceSupported(service.getName())) {
+        Set<ServiceComponent> serviceComponents = new HashSet<>(service.getServiceComponents().values());
+        targetComponents.put(service, serviceComponents);
+      }
     }
 
     // !!! determine which stack to check for component isAdvertised
@@ -1314,6 +1352,7 @@
       String serviceName = wrapper.getTasks().get(0).getService();
       ServiceInfo serviceInfo = ambariMetaInfo.getService(stackId.getStackName(),
           stackId.getStackVersion(), serviceName);
+
       params.put(SERVICE_PACKAGE_FOLDER, serviceInfo.getServicePackageFolder());
       params.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
     }
@@ -1324,7 +1363,7 @@
     // hosts in maintenance mode are excluded from the upgrade
     actionContext.setMaintenanceModeHostExcluded(true);
 
-    actionContext.setTimeout(Short.valueOf(s_configuration.getDefaultAgentTaskTimeout(false)));
+    actionContext.setTimeout(wrapper.getMaxTimeout(s_configuration));
     actionContext.setRetryAllowed(allowRetry);
     actionContext.setAutoSkipFailures(context.isComponentFailureAutoSkipped());
 
@@ -1332,8 +1371,7 @@
         cluster, context.getEffectiveStackId());
 
     Stage stage = s_stageFactory.get().createNew(request.getId().longValue(), "/tmp/ambari",
-        cluster.getClusterName(), cluster.getClusterId(), entity.getText(),
-        jsons.getClusterHostInfo(), jsons.getCommandParamsForStage(),
+        cluster.getClusterName(), cluster.getClusterId(), entity.getText(), jsons.getCommandParamsForStage(),
         jsons.getHostParamsForStage());
 
     stage.setSkippable(skippable);
@@ -1404,7 +1442,7 @@
 
     ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
         function, filters, commandParams);
-    actionContext.setTimeout(Short.valueOf(s_configuration.getDefaultAgentTaskTimeout(false)));
+    actionContext.setTimeout(wrapper.getMaxTimeout(s_configuration));
     actionContext.setRetryAllowed(allowRetry);
     actionContext.setAutoSkipFailures(context.isComponentFailureAutoSkipped());
 
@@ -1415,8 +1453,7 @@
         cluster, context.getEffectiveStackId());
 
     Stage stage = s_stageFactory.get().createNew(request.getId().longValue(), "/tmp/ambari",
-        cluster.getClusterName(), cluster.getClusterId(), entity.getText(),
-        jsons.getClusterHostInfo(), jsons.getCommandParamsForStage(),
+        cluster.getClusterName(), cluster.getClusterId(), entity.getText(), jsons.getCommandParamsForStage(),
         jsons.getHostParamsForStage());
 
     stage.setSkippable(skippable);
@@ -1440,6 +1477,7 @@
     }
 
     s_commandExecutionHelper.get().addExecutionCommandsToStage(actionContext, stage, requestParams);
+
     request.addStages(Collections.singletonList(stage));
   }
 
@@ -1464,7 +1502,7 @@
     ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
         "SERVICE_CHECK", filters, commandParams);
 
-    actionContext.setTimeout(Short.valueOf(s_configuration.getDefaultAgentTaskTimeout(false)));
+    actionContext.setTimeout(wrapper.getMaxTimeout(s_configuration));
     actionContext.setRetryAllowed(allowRetry);
     actionContext.setAutoSkipFailures(context.isServiceCheckFailureAutoSkipped());
 
@@ -1476,8 +1514,7 @@
         cluster, context.getEffectiveStackId());
 
     Stage stage = s_stageFactory.get().createNew(request.getId().longValue(), "/tmp/ambari",
-        cluster.getClusterName(), cluster.getClusterId(), entity.getText(),
-        jsons.getClusterHostInfo(), jsons.getCommandParamsForStage(),
+        cluster.getClusterName(), cluster.getClusterId(), entity.getText(), jsons.getCommandParamsForStage(),
         jsons.getHostParamsForStage());
 
     stage.setSkippable(skippable);
@@ -1520,6 +1557,7 @@
 
     Map<String, String> commandParams = getNewParameterMap(request, context);
     commandParams.put(UpgradeContext.COMMAND_PARAM_UPGRADE_PACK, upgradePack.getName());
+    commandParams.put(COMMAND_PARAM_SUPPORTED_SERVICES, StringUtils.join(context.getSupportedServices(), ','));
 
     // Notice that this does not apply any params because the input does not specify a stage.
     // All of the other actions do use additional params.
@@ -1607,8 +1645,8 @@
         cluster, context.getEffectiveStackId());
 
     Stage stage = s_stageFactory.get().createNew(request.getId().longValue(), "/tmp/ambari",
-        cluster.getClusterName(), cluster.getClusterId(), stageText, jsons.getClusterHostInfo(),
-        jsons.getCommandParamsForStage(), jsons.getHostParamsForStage());
+        cluster.getClusterName(), cluster.getClusterId(), stageText, jsons.getCommandParamsForStage(),
+      jsons.getHostParamsForStage());
 
     stage.setSkippable(skippable);
     stage.setAutoSkipFailureSupported(supportsAutoSkipOnFailure);
@@ -1648,7 +1686,7 @@
    * command was created. For upgrades, this is problematic since the commands
    * are all created ahead of time, but the upgrade may change configs as part
    * of the upgrade pack.</li>
-   * <li>{@link #COMMAND_PARAM_REQUEST_ID}</li> the ID of the request.
+   * <li>{@link UpgradeContext#COMMAND_PARAM_REQUEST_ID}</li> the ID of the request.
    * <ul>
    *
    * @return the initialized parameter map.
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProvider.java
index 352aa2d..5f12e52 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProvider.java
@@ -102,6 +102,7 @@
   protected static final String VERSION_DEF_AVAILABLE_SERVICES       = "VersionDefinition/services";
   protected static final String VERSION_DEF_STACK_SERVICES           = "VersionDefinition/stack_services";
   protected static final String VERSION_DEF_STACK_DEFAULT            = "VersionDefinition/stack_default";
+  protected static final String VERSION_DEF_STACK_REPO_UPDATE_LINK_EXISTS = "VersionDefinition/stack_repo_update_link_exists";
   protected static final String VERSION_DEF_DISPLAY_NAME             = "VersionDefinition/display_name";
   protected static final String VERSION_DEF_VALIDATION               = "VersionDefinition/validation";
   protected static final String SHOW_AVAILABLE                       = "VersionDefinition/show_available";
@@ -157,6 +158,7 @@
       VERSION_DEF_AVAILABLE_SERVICES,
       VERSION_DEF_STACK_SERVICES,
       VERSION_DEF_STACK_DEFAULT,
+      VERSION_DEF_STACK_REPO_UPDATE_LINK_EXISTS,
       VERSION_DEF_DISPLAY_NAME,
       VERSION_DEF_VALIDATION,
       VERSION_DEF_MIN_JDK,
@@ -587,7 +589,6 @@
    * @param id            the definition id
    * @param xml           the version definition xml
    * @param requestedIds  the requested ids
-   * @param fromAvailable if the resource should include the {@link #SHOW_AVAILABLE} property
    * @return the resource
    * @throws SystemException
    */
@@ -617,6 +618,7 @@
     setResourceProperty(resource, VERSION_DEF_RELEASE_NOTES, xml.release.releaseNotes, requestedIds);
     setResourceProperty(resource, VERSION_DEF_RELEASE_VERSION, xml.release.version, requestedIds);
     setResourceProperty(resource, VERSION_DEF_STACK_DEFAULT, xml.isStackDefault(), requestedIds);
+    setResourceProperty(resource, VERSION_DEF_STACK_REPO_UPDATE_LINK_EXISTS, (stack.getRepositoryXml().getLatestURI() != null), requestedIds);
     setResourceProperty(resource, VERSION_DEF_DISPLAY_NAME, xml.release.display, requestedIds);
 
     if (null != validations) {
@@ -686,6 +688,7 @@
       setResourceProperty(resource, VERSION_DEF_STACK_SERVICES, xml.getStackServices(stack), requestedIds);
       setResourceProperty(resource, VERSION_DEF_MIN_JDK, stack.getMinJdk(), requestedIds);
       setResourceProperty(resource, VERSION_DEF_MAX_JDK, stack.getMaxJdk(), requestedIds);
+      setResourceProperty(resource, VERSION_DEF_STACK_REPO_UPDATE_LINK_EXISTS, (stack.getRepositoryXml().getLatestURI() != null), requestedIds);
     }
 
     return resource;
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/WidgetLayoutResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/WidgetLayoutResourceProvider.java
index a40f165..77b9c4a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/WidgetLayoutResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/WidgetLayoutResourceProvider.java
@@ -70,7 +70,7 @@
   public static final String WIDGETLAYOUT_WIDGETS_PROPERTY_ID = PropertyHelper.getPropertyId("WidgetLayoutInfo", "widgets");
   public static final String WIDGETLAYOUT_USERNAME_PROPERTY_ID = PropertyHelper.getPropertyId("WidgetLayoutInfo", "user_name");
   public static final String WIDGETLAYOUT_DISPLAY_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("WidgetLayoutInfo", "display_name");
-  public static enum SCOPE {
+  public enum SCOPE {
     CLUSTER,
     USER
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/WidgetResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/WidgetResourceProvider.java
index a3d3ab5..7ab5588 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/WidgetResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/WidgetResourceProvider.java
@@ -74,7 +74,7 @@
   public static final String WIDGET_METRICS_PROPERTY_ID                 = PropertyHelper.getPropertyId("WidgetInfo", "metrics");
   public static final String WIDGET_VALUES_PROPERTY_ID                 = PropertyHelper.getPropertyId("WidgetInfo", "values");
   public static final String WIDGET_PROPERTIES_PROPERTY_ID                 = PropertyHelper.getPropertyId("WidgetInfo", "properties");
-  public static enum SCOPE {
+  public enum SCOPE {
     CLUSTER,
     USER
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/WorkflowResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/WorkflowResourceProvider.java
index 9737aaa..4d082ddb 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/WorkflowResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/WorkflowResourceProvider.java
@@ -173,7 +173,7 @@
   /**
    * Simple interface for fetching workflows from db.
    */
-  public static interface WorkflowFetcher {
+  public interface WorkflowFetcher {
     /**
      * Fetch workflow resources.
      * 
@@ -185,8 +185,8 @@
      *          the workflow id
      * @return a set of workflow resources
      */
-    public Set<Resource> fetchWorkflows(Set<String> requestedIds,
-        String clusterName, String workflowId);
+    Set<Resource> fetchWorkflows(Set<String> requestedIds,
+                                 String clusterName, String workflowId);
   }
 
   /**
@@ -292,7 +292,7 @@
   /**
    * Enumeration of db fields for the workflow table.
    */
-  static enum WorkflowFields {
+  enum WorkflowFields {
     WORKFLOWID,
     WORKFLOWNAME,
     USERNAME,
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ivory/IvoryService.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ivory/IvoryService.java
index aeeed0a..e089041 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ivory/IvoryService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ivory/IvoryService.java
@@ -31,7 +31,7 @@
    *
    * @param feed  the feed
    */
-  public void submitFeed(Feed feed);
+  void submitFeed(Feed feed);
 
   /**
    * Get a feed for the given name.
@@ -40,49 +40,49 @@
    *
    * @return a feed that matches the given name; null if none is found
    */
-  public Feed getFeed(String feedName);
+  Feed getFeed(String feedName);
 
   /**
    * Get all the known feed names.
    *
    * @return a list of feed names; may not be null
    */
-  public List<String> getFeedNames();
+  List<String> getFeedNames();
 
   /**
    * Update a feed based on the given {@link Feed} object.
    *
    * @param feed the feed object
    */
-  public void updateFeed(Feed feed);
+  void updateFeed(Feed feed);
 
   /**
    * Suspend the feed with the given feed name.
    *
    * @param feedName  the feed name
    */
-  public void suspendFeed(String feedName);
+  void suspendFeed(String feedName);
 
   /**
    * Resume the feed with the given feed name.
    *
    * @param feedName  the feed name
    */
-  public void resumeFeed(String feedName);
+  void resumeFeed(String feedName);
 
   /**
    * Schedule the feed with the given feed name.
    *
    * @param feedName  the feed name
    */
-  public void scheduleFeed(String feedName);
+  void scheduleFeed(String feedName);
 
   /**
    * Delete the feed with the given feed name.
    *
    * @param feedName the feed name
    */
-  public void deleteFeed(String feedName);
+  void deleteFeed(String feedName);
 
 
   // ----- Cluster operations ------------------------------------------------
@@ -92,7 +92,7 @@
    *
    * @param cluster  the cluster
    */
-  public void submitCluster(Cluster cluster);
+  void submitCluster(Cluster cluster);
 
   /**
    * Get a cluster for the given name.
@@ -101,28 +101,28 @@
    *
    * @return a cluster that matches the given name; null if none is found
    */
-  public Cluster getCluster(String clusterName);
+  Cluster getCluster(String clusterName);
 
   /**
    * Get all the known cluster names.
    *
    * @return a list of cluster names; may not be null
    */
-  public List<String> getClusterNames();
+  List<String> getClusterNames();
 
   /**
    * Update a cluster based on the given {@link Cluster} object.
    *
    * @param cluster  the cluster
    */
-  public void updateCluster(Cluster cluster);
+  void updateCluster(Cluster cluster);
 
   /**
    * Delete the cluster with the given name.
    *
    * @param clusterName  the cluster name
    */
-  public void deleteCluster(String clusterName);
+  void deleteCluster(String clusterName);
 
 
   // ----- Instance operations -----------------------------------------------
@@ -134,7 +134,7 @@
    *
    * @return the list of instances for the given feed
    */
-  public List<Instance> getInstances(String feedName); //read
+  List<Instance> getInstances(String feedName); //read
 
   /**
    * Suspend the instance for the given feed name and id.
@@ -142,7 +142,7 @@
    * @param feedName    the feed name
    * @param instanceId  the id
    */
-  public void suspendInstance(String feedName, String instanceId);
+  void suspendInstance(String feedName, String instanceId);
 
   /**
    * Resume the instance for the given feed name and id.
@@ -150,7 +150,7 @@
    * @param feedName    the feed name
    * @param instanceId  the id
    */
-  public void resumeInstance(String feedName, String instanceId);
+  void resumeInstance(String feedName, String instanceId);
 
   /**
    * Kill the instance for the given feed name and id.
@@ -158,5 +158,5 @@
    * @param feedName    the feed name
    * @param instanceId  the id
    */
-  public void killInstance(String feedName, String instanceId);
+  void killInstance(String feedName, String instanceId);
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/jdbc/ConnectionFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/jdbc/ConnectionFactory.java
index c8444b1..c65a4b2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/jdbc/ConnectionFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/jdbc/ConnectionFactory.java
@@ -32,5 +32,5 @@
    *
    * @throws SQLException thrown if the connection cannot be obtained
    */
-  public Connection getConnection() throws SQLException;
+  Connection getConnection() throws SQLException;
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java
index bffe600..566d882 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java
@@ -35,7 +35,7 @@
    * @return set of JMX host names
    *
    */
-  public Set<String> getHostNames(String clusterName, String componentName);
+  Set<String> getHostNames(String clusterName, String componentName);
 
   /**
    * Get the port for the specified cluster name and component.
@@ -48,7 +48,7 @@
    *
    * @throws SystemException if unable to get the JMX port
    */
-  public String getPort(String clusterName, String componentName, String hostName)
+  String getPort(String clusterName, String componentName, String hostName)
       throws SystemException;
 
   /**
@@ -63,7 +63,7 @@
    *
    * @throws SystemException if unable to get the JMX port
    */
-  public String getPort(String clusterName, String componentName, String hostName,  boolean httpsEnabled)
+  String getPort(String clusterName, String componentName, String hostName, boolean httpsEnabled)
       throws SystemException;
   
   /**
@@ -75,7 +75,7 @@
    * @return the JMX protocol for the specified cluster name and component, one of http or https
    *
    */
-  public String getJMXProtocol(String clusterName, String componentName) ;
+  String getJMXProtocol(String clusterName, String componentName) ;
   
   /**
    * Get the rpc tag for the specified cluster name, component and port number
@@ -87,6 +87,6 @@
    * @return the RPC tag for the specified cluster name, component and port number(client/healthcheck/etc.).
    *
    */
-  public String getJMXRpcMetricTag(String clusterName, String componentName, String port);
+  String getJMXRpcMetricTag(String clusterName, String componentName, String port);
 
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalService.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalService.java
index 6b484a4..487182e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalService.java
@@ -17,10 +17,12 @@
  */
 package org.apache.ambari.server.controller.logging;
 
+import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.Executor;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.ambari.server.AmbariService;
 import org.apache.ambari.server.configuration.Configuration;
@@ -30,9 +32,9 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-
 import com.google.common.cache.Cache;
 import com.google.common.cache.CacheBuilder;
+import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.AbstractService;
 import com.google.inject.Inject;
@@ -69,6 +71,13 @@
   private static Logger LOG = LoggerFactory.getLogger(LogSearchDataRetrievalService.class);
 
   /**
+   * Maximum number of failed attempts that the LogSearch integration code will attempt for
+   *   a given component before treating the component as failed and skipping the request.
+   *
+   */
+  private static int MAX_RETRIES_FOR_FAILED_METADATA_REQUEST = 10;
+
+  /**
    * Factory instance used to handle URL string generation requests on the
    *   main request thread.
    */
@@ -109,6 +118,19 @@
    */
   private final Set<String> currentRequests = Sets.newConcurrentHashSet();
 
+  /**
+   * A map that maintains the set of failure counts for logging
+   * metadata requests on a per-component basis.  This map should
+   * be consulted prior to making a metadata request to the LogSearch
+   * service.  If LogSearch has already returned an empty list for the given
+   * component, or any other error has occurred for a certain number of attempts,
+   * the request should not be attempted further.
+   *
+   */
+  private final Map<String, AtomicInteger> componentRequestFailureCounts =
+    Maps.newConcurrentMap();
+
+
 
   /**
    * Executor instance to be used to run REST queries against
@@ -172,18 +194,20 @@
       LOG.debug("LogFileNames result for key = {} found in cache", key);
       return cacheResult;
     } else {
-      // queue up a thread to create the LogSearch REST request to obtain this information
-      if (currentRequests.contains(key)) {
-        LOG.debug("LogFileNames request has been made for key = {}, but not completed yet", key);
+      if (!componentRequestFailureCounts.containsKey(component) || componentRequestFailureCounts.get(component).get() < MAX_RETRIES_FOR_FAILED_METADATA_REQUEST) {
+        // queue up a thread to create the LogSearch REST request to obtain this information
+        if (currentRequests.contains(key)) {
+          LOG.debug("LogFileNames request has been made for key = {}, but not completed yet", key);
+        } else {
+          LOG.debug("LogFileNames result for key = {} not in cache, queueing up remote request", key);
+          // add request key to queue, to keep multiple copies of the same request from
+          // being submitted
+          currentRequests.add(key);
+          startLogSearchFileNameRequest(host, component, cluster);
+        }
       } else {
-        LOG.debug("LogFileNames result for key = {} not in cache, queueing up remote request", key);
-        // add request key to queue, to keep multiple copies of the same request from
-        // being submitted
-        currentRequests.add(key);
-        startLogSearchFileNameRequest(host, component, cluster);
+        LOG.debug("Too many failures occurred while attempting to obtain log file metadata for component = {}, Ambari will ignore this component for LogSearch Integration", component);
       }
-
-
     }
 
     return null;
@@ -260,6 +284,15 @@
     return currentRequests;
   }
 
+  /**
+   * This protected method allows for simpler unit tests.
+   *
+   * @return the Map of failure counts on a per-component basis
+   */
+  protected Map<String, AtomicInteger> getComponentRequestFailureCounts() {
+    return componentRequestFailureCounts;
+  }
+
   private void startLogSearchFileNameRequest(String host, String component, String cluster) {
     // Create a separate instance of LoggingRequestHelperFactory for
     // each task launched, since these tasks will occur on a separate thread
@@ -268,7 +301,7 @@
     // TODO: the LoggingRequestHelperFactory implementation thread-safe, so that
     // TODO: a single factory instance can be shared across multiple threads safely
     executor.execute(new LogSearchFileNameRequestRunnable(host, component, cluster, logFileNameCache, currentRequests,
-                                                          injector.getInstance(LoggingRequestHelperFactory.class)));
+                                                          injector.getInstance(LoggingRequestHelperFactory.class), componentRequestFailureCounts));
   }
 
   private AmbariManagementController getController() {
@@ -304,20 +337,24 @@
 
     private LoggingRequestHelperFactory loggingRequestHelperFactory;
 
+    private final Map<String, AtomicInteger> componentRequestFailureCounts;
+
     private AmbariManagementController controller;
 
-    LogSearchFileNameRequestRunnable(String host, String component, String cluster, Cache<String, Set<String>> logFileNameCache, Set<String> currentRequests, LoggingRequestHelperFactory loggingRequestHelperFactory) {
-      this(host, component, cluster, logFileNameCache, currentRequests, loggingRequestHelperFactory, AmbariServer.getController());
+    LogSearchFileNameRequestRunnable(String host, String component, String cluster, Cache<String, Set<String>> logFileNameCache, Set<String> currentRequests, LoggingRequestHelperFactory loggingRequestHelperFactory,
+                                     Map<String, AtomicInteger> componentRequestFailureCounts) {
+      this(host, component, cluster, logFileNameCache, currentRequests, loggingRequestHelperFactory, componentRequestFailureCounts, AmbariServer.getController());
     }
 
     LogSearchFileNameRequestRunnable(String host, String component, String cluster, Cache<String, Set<String>> logFileNameCache, Set<String> currentRequests,
-                                               LoggingRequestHelperFactory loggingRequestHelperFactory, AmbariManagementController controller) {
+                                               LoggingRequestHelperFactory loggingRequestHelperFactory, Map<String, AtomicInteger> componentRequestFailureCounts, AmbariManagementController controller) {
       this.host  = host;
       this.component = component;
       this.cluster = cluster;
       this.logFileNameCache = logFileNameCache;
       this.currentRequests = currentRequests;
       this.loggingRequestHelperFactory = loggingRequestHelperFactory;
+      this.componentRequestFailureCounts = componentRequestFailureCounts;
       this.controller = controller;
     }
 
@@ -340,7 +377,13 @@
             // update cache with returned result
             logFileNameCache.put(key, logFileNamesResult);
           } else {
-            LOG.debug("LogSearchFileNameRequestRunnable: remote request was not successful");
+            LOG.debug("LogSearchFileNameRequestRunnable: remote request was not successful for component = {} on host ={}", component, host);
+            if (!componentRequestFailureCounts.containsKey(component)) {
+              componentRequestFailureCounts.put(component, new AtomicInteger());
+            }
+
+            // increment the failure count for this component
+            componentRequestFailureCounts.get(component).incrementAndGet();
           }
         } else {
           LOG.debug("LogSearchFileNameRequestRunnable: request helper was null.  This may mean that LogSearch is not available, or could be a potential connection problem.");
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingRequestHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingRequestHelper.java
index 7833e01..aa98715 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingRequestHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingRequestHelper.java
@@ -29,7 +29,7 @@
    *
    * @return a LogQueryResponse, containing the results of the search
    */
-  public LogQueryResponse sendQueryRequest(Map<String, String> queryParameters);
+  LogQueryResponse sendQueryRequest(Map<String, String> queryParameters);
 
   /**
    * Sends a request to obtain the log file name for a given component, on
@@ -41,7 +41,7 @@
    * @return a Set of Strings that include the log file names associated
    *         with this component/host combination
    */
-  public Set<String> sendGetLogFileNamesRequest(String componentName, String hostName);
+  Set<String> sendGetLogFileNamesRequest(String componentName, String hostName);
 
   /**
    * Sends a request to obtain the log level counts for a given component on
@@ -53,7 +53,7 @@
    * @return a LogLevelQueryResponse, containing the log level counts for this
    *         component/host combination
    */
-  public LogLevelQueryResponse sendLogLevelQueryRequest(String componentName, String hostName);
+  LogLevelQueryResponse sendLogLevelQueryRequest(String componentName, String hostName);
 
   /**
    * Appends the required LogSearch query parameters to a base URI
@@ -68,6 +68,6 @@
    *         the log file associated with this component/host
    *         combination
    */
-  public String createLogFileTailURI(String baseURI, String componentName, String hostName);
+  String createLogFileTailURI(String baseURI, String componentName, String hostName);
 
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImpl.java
index 2b1b04a..0db8853 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImpl.java
@@ -100,6 +100,8 @@
 
   private static final int DEFAULT_LOGSEARCH_READ_TIMEOUT_IN_MILLISECONDS = 5000;
 
+  private static final String LOGSEARCH_CLUSTERS_QUERY_PARAMETER_NAME = "clusters";
+
   private static AtomicInteger errorLogCounterForLogSearchConnectionExceptions = new AtomicInteger(0);
 
   private final String hostName;
@@ -383,6 +385,10 @@
     URIBuilder uriBuilder = createBasicURI(scheme);
     uriBuilder.setPath(LOGSEARCH_QUERY_PATH);
 
+    // set the current cluster name, in case this LogSearch service supports data
+    // for multiple clusters
+    uriBuilder.addParameter(LOGSEARCH_CLUSTERS_QUERY_PARAMETER_NAME, cluster.getClusterName());
+
     // add any query strings specified
     for (String key : queryParameters.keySet()) {
       uriBuilder.addParameter(key, queryParameters.get(key));
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricHostProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricHostProvider.java
index 8f1fc8e..9cfd1b2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricHostProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricHostProvider.java
@@ -31,7 +31,7 @@
    *
    * @throws org.apache.ambari.server.controller.spi.SystemException if unable to get the metrics server host name
    */
-  public String getCollectorHostName(String clusterName, MetricsService service) throws SystemException;
+  String getCollectorHostName(String clusterName, MetricsService service) throws SystemException;
 
   /**
    * Get the host name for the given cluster name and component name.
@@ -42,7 +42,7 @@
    * @throws org.apache.ambari.server.controller.spi.SystemException
    *          if unable to get the JMX host name
    */
-  public String getHostName(String clusterName, String componentName) throws SystemException;
+  String getHostName(String clusterName, String componentName) throws SystemException;
 
   /**
    * Get the metrics server port for the given cluster name.
@@ -53,7 +53,7 @@
    *
    * @throws org.apache.ambari.server.controller.spi.SystemException if unable to get the metrics server port
    */
-  public String getCollectorPort(String clusterName, MetricsService service) throws SystemException;
+  String getCollectorPort(String clusterName, MetricsService service) throws SystemException;
 
   /**
    * Get the status of metrics server host for the given cluster name.
@@ -64,7 +64,7 @@
    *
    * @throws SystemException if unable to get the status of metrics server host
    */
-  public boolean isCollectorHostLive(String clusterName, MetricsService service) throws SystemException;
+  boolean isCollectorHostLive(String clusterName, MetricsService service) throws SystemException;
 
   /**
    * Get the status of metrics server component for the given cluster name.
@@ -75,5 +75,5 @@
    *
    * @throws SystemException if unable to get the status of metrics server component
    */
-  public boolean isCollectorComponentLive(String clusterName, MetricsService service) throws SystemException;
+  boolean isCollectorComponentLive(String clusterName, MetricsService service) throws SystemException;
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricsPaddingMethod.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricsPaddingMethod.java
index 2fb2f18..cceaafb 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricsPaddingMethod.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricsPaddingMethod.java
@@ -29,7 +29,7 @@
   private final PADDING_STRATEGY strategy;
   public static final String ZERO_PADDING_PARAM = "params/padding";
   private static final long MINIMUM_STEP_INTERVAL = 999l; // ~ 1 second
-  public static enum PADDING_STRATEGY {
+  public enum PADDING_STRATEGY {
     ZEROS,
     NULLS,
     NONE
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricsServiceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricsServiceProvider.java
index f9f9dc6..c207de7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricsServiceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricsServiceProvider.java
@@ -22,7 +22,7 @@
   /**
    * Enumeration to distinguish metrics service installed for a cluster
    */
-  public enum MetricsService {
+  enum MetricsService {
     GANGLIA,
     TIMELINE_METRICS
   }
@@ -31,5 +31,5 @@
    * Provide type of metrics service installed.
    * @return @MetricsService
    */
-  public MetricsService getMetricsServiceType();
+  MetricsService getMetricsServiceType();
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/BasePredicate.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/BasePredicate.java
index 9b3fbfe..6b14035 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/BasePredicate.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/BasePredicate.java
@@ -27,5 +27,5 @@
  * associated property ids.
  */
 public interface BasePredicate extends Predicate, PredicateVisitorAcceptor {
-  public Set<String> getPropertyIds();
+  Set<String> getPropertyIds();
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/PredicateVisitor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/PredicateVisitor.java
index 3316a00..061d79d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/PredicateVisitor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/PredicateVisitor.java
@@ -22,13 +22,13 @@
  */
 public interface PredicateVisitor {
 
-  public void acceptComparisonPredicate(ComparisonPredicate predicate);
+  void acceptComparisonPredicate(ComparisonPredicate predicate);
 
-  public void acceptArrayPredicate(ArrayPredicate predicate);
+  void acceptArrayPredicate(ArrayPredicate predicate);
 
-  public void acceptUnaryPredicate(UnaryPredicate predicate);
+  void acceptUnaryPredicate(UnaryPredicate predicate);
 
-  public void acceptAlwaysPredicate(AlwaysPredicate predicate);
+  void acceptAlwaysPredicate(AlwaysPredicate predicate);
 
-  public void acceptCategoryPredicate(CategoryPredicate predicate);
+  void acceptCategoryPredicate(CategoryPredicate predicate);
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/PredicateVisitorAcceptor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/PredicateVisitorAcceptor.java
index 6af2cb8..0e90b27 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/PredicateVisitorAcceptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/PredicateVisitorAcceptor.java
@@ -27,5 +27,5 @@
    *
    * @param visitor  the visitor
    */
-  public void accept(PredicateVisitor visitor);
+  void accept(PredicateVisitor visitor);
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ExtendedResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ExtendedResourceProvider.java
index 134ff2c..5d30226 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ExtendedResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ExtendedResourceProvider.java
@@ -41,7 +41,7 @@
    * @throws NoSuchResourceException the requested resource instance doesn't exist
    * @throws NoSuchParentResourceException a parent resource of the requested resource doesn't exist
    */
-  public QueryResponse queryForResources(Request request, Predicate predicate)
+  QueryResponse queryForResources(Request request, Predicate predicate)
       throws SystemException,
       UnsupportedPropertyException,
       NoSuchResourceException,
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/PageRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/PageRequest.java
index bb30ac2..c225d4d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/PageRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/PageRequest.java
@@ -28,14 +28,14 @@
    *
    * @return the starting point
    */
-  public StartingPoint getStartingPoint();
+  StartingPoint getStartingPoint();
 
   /**
    * Get the desired page size.
    *
    * @return the page size; -1 means from the starting point to the end of the resource set
    */
-  public int getPageSize();
+  int getPageSize();
 
   /**
    * Get the offset (zero based) of the resource that should be used as the start or end
@@ -43,7 +43,7 @@
    *
    * @return the offset
    */
-  public int getOffset();
+  int getOffset();
 
   /**
    * Return the predicate that identifies the single resource to be used
@@ -51,12 +51,12 @@
    *
    * @return the associated predicate
    */
-  public Predicate getPredicate();
+  Predicate getPredicate();
 
   /**
    * The desired starting point of the page being requested.
    */
-  public enum StartingPoint {
+  enum StartingPoint {
     Beginning,      // start the page from the beginning of the resource set
     End,            // end the page at the end of the resource set
     OffsetStart,    // start the page from the associated offset point
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/PageResponse.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/PageResponse.java
index 9e4d6ab..3bc5b58 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/PageResponse.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/PageResponse.java
@@ -27,32 +27,32 @@
    *
    * @return the iterable set of resources
    */
-  public Iterable<Resource> getIterable();
+  Iterable<Resource> getIterable();
 
   /**
    * Get the offset of the first resource of the page.
    *
    * @return the offset
    */
-  public int getOffset();
+  int getOffset();
 
   /**
    * Get the last resource before this page.
    *
    * @return the last resource before this page; null if this is the first page
    */
-  public Resource getPreviousResource();
+  Resource getPreviousResource();
 
   /**
    * Get the next resource after this page.
    *
    * @return the next resource after this page; null if this is the last page
    */
-  public Resource getNextResource();
+  Resource getNextResource();
 
   /**
    * Get the count of total resources without account for paging request.
    * @return total count
    */
-  public Integer getTotalResourceCount();
+  Integer getTotalResourceCount();
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Predicate.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Predicate.java
index ae24b06..52b668f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Predicate.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Predicate.java
@@ -29,5 +29,5 @@
    * @param resource the resource to evaluate the predicate against
    * @return the result of applying the predicate to the given resource
    */
-  public boolean evaluate(Resource resource);
+  boolean evaluate(Resource resource);
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/PropertyProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/PropertyProvider.java
index 6829c1b..1b72b24 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/PropertyProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/PropertyProvider.java
@@ -40,7 +40,7 @@
    *
    * @throws SystemException thrown if resources cannot be populated
    */
-  public Set<Resource> populateResources(Set<Resource> resources, Request request, Predicate predicate)
+  Set<Resource> populateResources(Set<Resource> resources, Request request, Predicate predicate)
       throws SystemException;
 
   /**
@@ -51,5 +51,5 @@
    *         supported by this resource provider.  An empty return set indicates
    *         that all of the given property ids are supported.
    */
-  public Set<String> checkPropertyIds(Set<String> propertyIds);
+  Set<String> checkPropertyIds(Set<String> propertyIds);
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ProviderModule.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ProviderModule.java
index 33dfc24..27c60a2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ProviderModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ProviderModule.java
@@ -31,7 +31,7 @@
    *
    * @return the resource adapter
    */
-  public ResourceProvider getResourceProvider(Resource.Type type);
+  ResourceProvider getResourceProvider(Resource.Type type);
 
   /**
    * Get the list of property providers for the given resource type.
@@ -40,5 +40,5 @@
    *
    * @return the list of property providers
    */
-  public List<PropertyProvider> getPropertyProviders(Resource.Type type);
+  List<PropertyProvider> getPropertyProviders(Resource.Type type);
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/QueryResponse.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/QueryResponse.java
index 66bd3b0..9ce8559 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/QueryResponse.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/QueryResponse.java
@@ -31,21 +31,21 @@
    *
    * @return the set of resources
    */
-  public Set<Resource> getResources();
+  Set<Resource> getResources();
 
   /**
    * Determine whether or not the response is sorted.
    *
    * @return {@code true} if the response is sorted
    */
-  public boolean isSortedResponse();
+  boolean isSortedResponse();
 
   /**
    * Determine whether or not the response is paginated.
    *
    * @return {@code true} if the response is paginated
    */
-  public boolean isPagedResponse();
+  boolean isPagedResponse();
 
   /**
    * Get the the total number of resources for the query result.
@@ -54,5 +54,5 @@
    *
    * @return total the total number of resources in the query result
    */
-  public int getTotalResourceCount();
+  int getTotalResourceCount();
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/RequestStatus.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/RequestStatus.java
index 0ebf93a..6c80273 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/RequestStatus.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/RequestStatus.java
@@ -31,28 +31,28 @@
    *
    * @return the set of resources
    */
-  public Set<Resource> getAssociatedResources();
+  Set<Resource> getAssociatedResources();
 
   /**
    * Get the resource of type request for the asynchronous request.
    *
    * @return the request resource
    */
-  public Resource getRequestResource();
+  Resource getRequestResource();
 
   /**
    * Get the status of the operation initiated by the request.
    *
    * @return the status
    */
-  public Status getStatus();
+  Status getStatus();
 
   RequestStatusMetaData getStatusMetadata();
 
   /**
    * Request status.
    */
-  public enum Status {
+  enum Status {
     Accepted,
     InProgress,
     Complete
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ResourcePredicateEvaluator.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ResourcePredicateEvaluator.java
index 425ef45..abd933a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ResourcePredicateEvaluator.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ResourcePredicateEvaluator.java
@@ -26,5 +26,5 @@
    *
    * @return true if the predicate evaluates to true for the resource
    */
-  public boolean evaluate(Predicate predicate, Resource resource);
+  boolean evaluate(Predicate predicate, Resource resource);
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Schema.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Schema.java
index bcb3291..c97df46 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Schema.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Schema.java
@@ -39,7 +39,7 @@
    * @param type the resource type
    * @return the key property id for the given resource type
    */
-  public String getKeyPropertyId(Resource.Type type);
+  String getKeyPropertyId(Resource.Type type);
 
   /**
    * Get the set of resource types used in the key that uniquely identifies
@@ -52,5 +52,5 @@
    * @return the set of resource types used in the key that uniquely identifies
    *         the resource type described by this schema.
    */
-  public Set<Resource.Type> getKeyTypes();
+  Set<Resource.Type> getKeyTypes();
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/SchemaFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/SchemaFactory.java
index 290b508..d591630 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/SchemaFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/SchemaFactory.java
@@ -28,5 +28,5 @@
    * @param type  resource type
    * @return schema instance for the specified type
    */
-  public Schema getSchema(Resource.Type type);
+  Schema getSchema(Resource.Type type);
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/SortRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/SortRequest.java
index 9f7f6ab..e2bbd51 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/SortRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/SortRequest.java
@@ -27,17 +27,17 @@
   /**
    * Get a list of @SortRequestProperty.
    */
-  public List<SortRequestProperty> getProperties();
+  List<SortRequestProperty> getProperties();
 
   /**
    * Get a list of propertyIds
    */
-  public List<String> getPropertyIds();
+  List<String> getPropertyIds();
 
   /**
    * Enumeration for order of sorting
    */
-  public enum Order {
+  enum Order {
     ASC,
     DESC
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/StreamProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/StreamProvider.java
index 098393f..350c4da 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/StreamProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/StreamProvider.java
@@ -25,6 +25,6 @@
  * A provider of input stream from a property source.
  */
 public interface StreamProvider {
-  public InputStream readFrom(String spec) throws IOException;
-  public InputStream readFrom(String spec, String requestMethod, String params) throws IOException;
+  InputStream readFrom(String spec) throws IOException;
+  InputStream readFrom(String spec, String requestMethod, String params) throws IOException;
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/AlertDefinitionDisabledEvent.java b/ambari-server/src/main/java/org/apache/ambari/server/events/AlertDefinitionDisabledEvent.java
index a3c6e68..b91253c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/AlertDefinitionDisabledEvent.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/AlertDefinitionDisabledEvent.java
@@ -35,7 +35,7 @@
    *
    * @param clusterId
    *          the ID of the cluster that the definition is in.
-   * @param definition
+   * @param definitionId
    *          the alert definition being registered.
    */
   public AlertDefinitionDisabledEvent(long clusterId, long definitionId) {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/jpa/EntityManagerCacheInvalidationEvent.java b/ambari-server/src/main/java/org/apache/ambari/server/events/jpa/EntityManagerCacheInvalidationEvent.java
index fe472b2..9c910f7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/jpa/EntityManagerCacheInvalidationEvent.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/jpa/EntityManagerCacheInvalidationEvent.java
@@ -23,11 +23,6 @@
  */
 public final class EntityManagerCacheInvalidationEvent extends JPAEvent {
 
-  /**
-   * Constructor.
-   *
-   * @param eventType
-   */
   public EntityManagerCacheInvalidationEvent() {
     super(JPAEventType.CACHE_INVALIDATION);
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertMaintenanceModeListener.java b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertMaintenanceModeListener.java
index 847a207..1197589 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertMaintenanceModeListener.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertMaintenanceModeListener.java
@@ -20,17 +20,22 @@
 import java.util.List;
 
 import org.apache.ambari.server.EagerSingleton;
+import org.apache.ambari.server.events.AggregateAlertRecalculateEvent;
+import org.apache.ambari.server.events.AlertEvent;
 import org.apache.ambari.server.events.MaintenanceModeEvent;
+import org.apache.ambari.server.events.publishers.AlertEventPublisher;
 import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
 import org.apache.ambari.server.orm.dao.AlertsDAO;
 import org.apache.ambari.server.orm.entities.AlertCurrentEntity;
 import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
 import org.apache.ambari.server.orm.entities.AlertHistoryEntity;
 import org.apache.ambari.server.orm.entities.AlertNoticeEntity;
+import org.apache.ambari.server.state.AlertState;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.MaintenanceState;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.alert.AggregateDefinitionMapping;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -52,12 +57,26 @@
   private static Logger LOG = LoggerFactory.getLogger(AlertMaintenanceModeListener.class);
 
   /**
+   * Publishes {@link AlertEvent} instances.
+   */
+  @Inject
+  private AlertEventPublisher m_alertEventPublisher;
+
+  /**
    * Used for updating the MM of current alerts.
    */
   @Inject
   private AlertsDAO m_alertsDao = null;
 
   /**
+   * Used for quick lookups of aggregate alerts.
+   */
+  @Inject
+  private AggregateDefinitionMapping m_aggregateMapping;
+
+  private long clusterId = -1;
+
+  /**
    * Constructor.
    *
    * @param publisher
@@ -81,6 +100,7 @@
   public void onEvent(MaintenanceModeEvent event) {
     LOG.debug("Received event {}", event);
 
+    boolean recalculateAggregateAlert = false;
     List<AlertCurrentEntity> currentAlerts = m_alertsDao.findCurrent();
 
     MaintenanceState newMaintenanceState = MaintenanceState.OFF;
@@ -104,14 +124,16 @@
         if( null != host ){
           String hostName = host.getHostName();
           if( hostName.equals( alertHostName ) ){
-            updateMaintenanceState(currentAlert, newMaintenanceState);
+            if (updateMaintenanceStateAndRecalculateAggregateAlert(history, currentAlert, newMaintenanceState))
+              recalculateAggregateAlert = true;
             continue;
           }
         } else if( null != service ){
           // service level maintenance
           String serviceName = service.getName();
           if( serviceName.equals(alertServiceName)){
-            updateMaintenanceState(currentAlert, newMaintenanceState);
+            if (updateMaintenanceStateAndRecalculateAggregateAlert(history, currentAlert, newMaintenanceState))
+              recalculateAggregateAlert = true;
             continue;
           }
         } else if( null != serviceComponentHost ){
@@ -123,7 +145,8 @@
           // match on all 3 for a service component
           if (hostName.equals(alertHostName) && serviceName.equals(alertServiceName)
               && componentName.equals(alertComponentName)) {
-            updateMaintenanceState(currentAlert, newMaintenanceState);
+            if (updateMaintenanceStateAndRecalculateAggregateAlert(history, currentAlert, newMaintenanceState))
+              recalculateAggregateAlert = true;
             continue;
           }
         }
@@ -133,35 +156,56 @@
             definition.getDefinitionName(), alertHostName, exception);
       }
     }
+
+    if (recalculateAggregateAlert) {
+      // publish the event to recalculate aggregates
+      m_alertEventPublisher.publish(new AggregateAlertRecalculateEvent(clusterId));
+    }
   }
 
   /**
    * Updates the maintenance state of the specified alert if different than the
-   * supplied maintenance state.
+   * supplied maintenance state. And recalcualte aggregates if the maintenance state
+   * is changed and there is an aggregate alert for the specified alert if it is
+   * in CRITICAL or WARNING state.
    *
+   * @param historyAlert
+   *          the alert to check if having an aggregate alert associated with it.
    * @param currentAlert
    *          the alert to update (not {@code null}).
    * @param maintenanceState
    *          the maintenance state to change to, either
    *          {@link MaintenanceState#OFF} or {@link MaintenanceState#ON}.
    */
-  private void updateMaintenanceState(AlertCurrentEntity currentAlert,
-      MaintenanceState maintenanceState) {
+  private boolean updateMaintenanceStateAndRecalculateAggregateAlert (AlertHistoryEntity historyAlert,
+      AlertCurrentEntity currentAlert, MaintenanceState maintenanceState) {
 
     // alerts only care about OFF or ON
     if (maintenanceState != MaintenanceState.OFF && maintenanceState != MaintenanceState.ON) {
       LOG.warn("Unable to set invalid maintenance state of {} on the alert {}", maintenanceState,
           currentAlert.getAlertHistory().getAlertDefinition().getDefinitionName());
 
-      return;
+      return false;
     }
 
     MaintenanceState currentState = currentAlert.getMaintenanceState();
     if (currentState == maintenanceState) {
-      return;
+      return false;
     }
 
     currentAlert.setMaintenanceState(maintenanceState);
     m_alertsDao.merge(currentAlert);
+
+    AlertState alertState = historyAlert.getAlertState();
+
+    if (AlertState.RECALCULATE_AGGREGATE_ALERT_STATES.contains(alertState)){
+      clusterId = historyAlert.getClusterId();
+      String alertName = historyAlert.getAlertDefinition().getDefinitionName();
+
+      if (m_aggregateMapping.getAggregateDefinition(clusterId, alertName) != null){
+        return true;
+      }
+    }
+    return false;
   }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertReceivedListener.java b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertReceivedListener.java
index 907e4d8..2faadba 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertReceivedListener.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/alerts/AlertReceivedListener.java
@@ -588,7 +588,7 @@
    *          the definition to read any repeat tolerance overrides from.
    * @param state
    *          the state of the {@link AlertCurrentEntity}.
-   * @param the
+   * @param occurrences
    *          occurrences of the alert in the current state (used for
    *          calculation firmness when moving between non-OK states)
    * @return
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListener.java b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListener.java
index 22d7f2e..abf8e6b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListener.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListener.java
@@ -17,9 +17,6 @@
  */
 package org.apache.ambari.server.events.listeners.upgrade;
 
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.EagerSingleton;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
@@ -37,7 +34,6 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.eventbus.AllowConcurrentEvents;
 import com.google.common.eventbus.Subscribe;
 import com.google.inject.Inject;
 import com.google.inject.Provider;
@@ -60,12 +56,6 @@
   private final static Logger LOG = LoggerFactory.getLogger(StackVersionListener.class);
   public static final String UNKNOWN_VERSION = State.UNKNOWN.toString();
 
-  /**
-   * Used to prevent multiple threads from trying to create host alerts
-   * simultaneously.
-   */
-  private Lock m_stackVersionLock = new ReentrantLock();
-
   @Inject
   private RepositoryVersionDAO repositoryVersionDAO;
 
@@ -83,7 +73,6 @@
   }
 
   @Subscribe
-  @AllowConcurrentEvents
   public void onAmbariEvent(HostComponentVersionAdvertisedEvent event) {
     LOG.debug("Received event {}", event);
 
@@ -96,8 +85,6 @@
       return;
     }
 
-    m_stackVersionLock.lock();
-
     // if the cluster is upgrading, there's no need to update the repo version -
     // it better be right
     if (null != event.getRepositoryVersionId() && null == cluster.getUpgradeInProgress()) {
@@ -120,6 +107,7 @@
       ComponentInfo componentInfo = metaInfo.getComponent(cluster.getDesiredStackVersion().getStackName(),
       cluster.getDesiredStackVersion().getStackVersion(), sch.getServiceName(), sch.getServiceComponentName());
       ServiceComponent sc = cluster.getService(sch.getServiceName()).getServiceComponent(sch.getServiceComponentName());
+
       if (componentInfo.isVersionAdvertised() && StringUtils.isNotBlank(newVersion)
           && !UNKNOWN_VERSION.equalsIgnoreCase(newVersion)) {
         processComponentAdvertisedVersion(cluster, sch, newVersion, sc);
@@ -135,12 +123,11 @@
           processComponentAdvertisedVersion(cluster, sch, newVersion, sc);
         }
       }
+
     } catch (Exception e) {
       LOG.error(
           "Unable to propagate version for ServiceHostComponent on component: {}, host: {}. Error: {}",
           sch.getServiceComponentName(), sch.getHostName(), e.getMessage());
-    } finally {
-      m_stackVersionLock.unlock();
     }
   }
 
@@ -158,6 +145,7 @@
     if (StringUtils.isBlank(newVersion)) {
       return;
     }
+
     String previousVersion = sch.getVersion();
     if (previousVersion == null || UNKNOWN_VERSION.equalsIgnoreCase(previousVersion)) {
       // value may be "UNKNOWN" when upgrading from older Ambari versions
@@ -168,6 +156,8 @@
     } else if (!StringUtils.equals(previousVersion, newVersion)) {
       processComponentVersionChange(cluster, sc, sch, newVersion);
     }
+
+    sc.updateRepositoryState(newVersion);
   }
 
   /**
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/hooks/users/UserHookService.java b/ambari-server/src/main/java/org/apache/ambari/server/hooks/users/UserHookService.java
index 69463ab..149e2f0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/hooks/users/UserHookService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/hooks/users/UserHookService.java
@@ -139,7 +139,7 @@
       String stageContextText = String.format(POST_USER_CREATION_REQUEST_CONTEXT, ctx.getUserGroups().size());
 
       Stage stage = stageFactory.createNew(requestStageContainer.getId(), configuration.getServerTempDir() + File.pathSeparatorChar + requestStageContainer.getId(), clsData.getClusterName(),
-          clsData.getClusterId(), stageContextText, "{}", "{}", "{}");
+          clsData.getClusterId(), stageContextText, "{}", "{}");
       stage.setStageId(requestStageContainer.getLastStageId());
 
       ServiceComponentHostServerActionEvent serverActionEvent = new ServiceComponentHostServerActionEvent("ambari-server-host", System.currentTimeMillis());
@@ -148,6 +148,7 @@
       stage.addServerActionCommand(PostUserCreationHookServerAction.class.getName(), "ambari", Role.AMBARI_SERVER_ACTION,
           RoleCommand.EXECUTE, clsData.getClusterName(), serverActionEvent, commandParams, stageContextText, null, null, false, false);
 
+      requestStageContainer.setClusterHostInfo("{}");
       requestStageContainer.addStages(Collections.singletonList(stage));
       requestStageContainer.persist();
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/metrics/system/impl/AmbariMetricSinkImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/metrics/system/impl/AmbariMetricSinkImpl.java
index f268149..83e422c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/metrics/system/impl/AmbariMetricSinkImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/metrics/system/impl/AmbariMetricSinkImpl.java
@@ -20,6 +20,7 @@
 import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.LinkedHashMap;
@@ -53,7 +54,6 @@
 import org.apache.hadoop.metrics2.sink.timeline.AbstractTimelineMetricsSink;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
-
 import org.apache.hadoop.metrics2.sink.timeline.cache.TimelineMetricsCache;
 import org.springframework.security.core.context.SecurityContextHolder;
 
@@ -71,6 +71,8 @@
   private AmbariManagementController ambariManagementController;
   private TimelineMetricsCache timelineMetricsCache;
   private boolean isInitialized = false;
+  private boolean setInstanceId = false;
+  private String instanceId;
 
   public AmbariMetricSinkImpl(AmbariManagementController amc) {
     this.ambariManagementController = amc;
@@ -98,28 +100,30 @@
 
     for (Map.Entry<String, Cluster> kv : clusters.getClusters().entrySet()) {
       String clusterName = kv.getKey();
+      instanceId = clusterName;
       Cluster c = kv.getValue();
       Resource.Type type = Resource.Type.ServiceConfigVersion;
 
       //If Metrics Collector VIP settings are configured, use that.
-      boolean vipHostConfigPresent = false;
-      boolean vipPortConfigPresent = false;
+      boolean externalHostConfigPresent = false;
+      boolean externalPortConfigPresent = false;
       Config clusterEnv = c.getDesiredConfigByType(ConfigHelper.CLUSTER_ENV);
       if (clusterEnv != null) {
         Map<String, String> configs = clusterEnv.getProperties();
 
-        String metricsCollectorVipHost = configs.get("metrics_collector_vip_host");
-        if (StringUtils.isNotEmpty(metricsCollectorVipHost)) {
-          LOG.info("Setting Metrics Collector Vip Host : " + metricsCollectorVipHost);
-          collectorHosts.add(metricsCollectorVipHost);
-          vipHostConfigPresent = true;
+        String metricsCollectorExternalHosts = configs.get("metrics_collector_external_hosts");
+        if (StringUtils.isNotEmpty(metricsCollectorExternalHosts)) {
+          LOG.info("Setting Metrics Collector External Host : " + metricsCollectorExternalHosts);
+          collectorHosts.addAll(Arrays.asList(metricsCollectorExternalHosts.split(",")));
+          externalHostConfigPresent = true;
+          setInstanceId = true;
         }
 
-        String metricsCollectorVipPort = configs.get("metrics_collector_vip_port");
-        if (StringUtils.isNotEmpty(metricsCollectorVipPort)) {
-          LOG.info("Setting Metrics Collector Vip Port : " + metricsCollectorVipPort);
-          port = metricsCollectorVipPort;
-          vipPortConfigPresent = true;
+        String metricsCollectorExternalPort = configs.get("metrics_collector_external_port");
+        if (StringUtils.isNotEmpty(metricsCollectorExternalPort)) {
+          LOG.info("Setting Metrics Collector External Port : " + metricsCollectorExternalPort);
+          port = metricsCollectorExternalPort;
+          externalPortConfigPresent = true;
         }
       }
 
@@ -140,7 +144,7 @@
         ambariManagementController);
 
       try {
-        if ( !vipHostConfigPresent ) {
+        if ( !externalHostConfigPresent ) {
           //get collector host(s)
           Service service = c.getService(ambariMetricsServiceName);
           if (service != null) {
@@ -166,7 +170,7 @@
               if (config != null && config.get("type").equals("ams-site")) {
                 TreeMap<Object, Object> properties = (TreeMap<Object, Object>) config.get("properties");
                 String timelineWebappAddress = (String) properties.get("timeline.metrics.service.webapp.address");
-                if (!vipPortConfigPresent && StringUtils.isNotEmpty(timelineWebappAddress) && timelineWebappAddress.contains(":")) {
+                if (!externalPortConfigPresent && StringUtils.isNotEmpty(timelineWebappAddress) && timelineWebappAddress.contains(":")) {
                   port = timelineWebappAddress.split(":")[1];
                 }
                 String httpPolicy = (String) properties.get("timeline.metrics.service.http.policy");
@@ -295,6 +299,16 @@
     return hostName;
   }
 
+  @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return false;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return 0;
+  }
+
   private List<TimelineMetric> getFilteredMetricList(List<SingleMetric> metrics) {
     final List<TimelineMetric> metricList = new ArrayList<>();
     for (SingleMetric metric : metrics) {
@@ -318,6 +332,9 @@
     TimelineMetric timelineMetric = new TimelineMetric();
     timelineMetric.setMetricName(attributeName);
     timelineMetric.setHostName(hostName);
+    if (setInstanceId) {
+      timelineMetric.setInstanceId(instanceId);
+    }
     timelineMetric.setAppId(component);
     timelineMetric.setStartTime(currentTimeMillis);
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/notifications/DispatchCallback.java b/ambari-server/src/main/java/org/apache/ambari/server/notifications/DispatchCallback.java
index e4e944d..b9861b4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/notifications/DispatchCallback.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/notifications/DispatchCallback.java
@@ -36,7 +36,7 @@
    *          a list of unique IDs that the caller can use to identify the
    *          {@link Notification} that was dispatched.
    */
-  public void onSuccess(List<String> callbackIds);
+  void onSuccess(List<String> callbackIds);
 
   /**
    * Invoked when a {@link Notification} failed to be dispatched.
@@ -45,6 +45,6 @@
    *          a list of unique IDs that the caller can use to identify the
    *          {@link Notification} that was dispatched.
    */
-  public void onFailure(List<String> callbackIds);
+  void onFailure(List<String> callbackIds);
 
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessor.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessor.java
index c132a3d..4f29d61 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessor.java
@@ -531,9 +531,8 @@
    *          the name of the table (not {@code null}).
    * @param columnName
    *          the name of the column to retrieve type for (not {@code null}).
-   * @return the integer representation of the column type from {@link Types}.
+   * @return the integer representation of the column type
    * @throws SQLException
-   * @see {@link Types}
    */
   int getColumnType(String tableName, String columnName)
       throws SQLException;
@@ -623,6 +622,30 @@
    */
   void addDefaultConstraint(String tableName, DBColumnInfo column) throws SQLException;
 
+  /**
+   * Move column data from {@code sourceTableName} to {@code targetTableName} using {@code sourceIDFieldName} and
+   * {@code targetIDFieldName} keys to match right rows
+   *
+   * @param sourceTableName
+   *          the source table name
+   * @param sourceColumn
+   *          the source column name
+   * @param sourceIDFieldName
+   *          the source id key filed name matched with {@code targetIDFieldName}
+   * @param targetTableName
+   *          the target table name
+   * @param targetColumn
+   *          the target column name
+   * @param targetIDFieldName
+   *          the target id key name matched with {@code sourceIDFieldName}
+   * @param isColumnNullable
+   *          should be target column nullable or not
+   *
+   * @throws SQLException
+   */
+  void moveColumnToAnotherTable(String sourceTableName, DBColumnInfo sourceColumn, String sourceIDFieldName,
+       String targetTableName, DBColumnInfo targetColumn, String targetIDFieldName, boolean isColumnNullable) throws SQLException;
+
   enum DbType {
     ORACLE,
     MYSQL,
@@ -639,6 +662,12 @@
   DbType getDbType();
 
   /**
+   * Get database schema name
+   * @return @dbSchema
+   */
+  String getDbSchema();
+
+  /**
    * Capture column type
    */
   class DBColumnInfo {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
index 1dd3b54..9c6425c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
@@ -233,6 +233,11 @@
   }
 
   @Override
+  public String getDbSchema() {
+    return dbSchema;
+  }
+
+  @Override
   public boolean tableHasData(String tableName) throws SQLException {
     String query = "SELECT count(*) from " + tableName;
     Statement statement = getConnection().createStatement();
@@ -1299,4 +1304,48 @@
 
     return valueString;
   }
+
+  /**
+   * Move column data from {@code sourceTableName} to {@code targetTableName} using {@code sourceIDFieldName} and
+   * {@code targetIDFieldName} keys to match right rows
+   *
+   * @param sourceTableName
+   *          the source table name
+   * @param sourceColumn
+   *          the source column name
+   * @param sourceIDFieldName
+   *          the source id key filed name matched with {@code targetIDFieldName}
+   * @param targetTableName
+   *          the target table name
+   * @param targetColumn
+   *          the target column name
+   * @param targetIDFieldName
+   *          the target id key name matched with {@code sourceIDFieldName}
+   * @param isColumnNullable
+   *          should be target column nullable or not
+   *
+   * @throws SQLException
+   */
+  @Override
+  public void moveColumnToAnotherTable(String sourceTableName, DBColumnInfo sourceColumn, String sourceIDFieldName,
+              String targetTableName, DBColumnInfo targetColumn, String targetIDFieldName,  boolean isColumnNullable) throws SQLException {
+
+    if (this.tableHasColumn(sourceTableName, sourceIDFieldName)) {
+
+      final String moveSQL = dbmsHelper.getCopyColumnToAnotherTableStatement(sourceTableName, sourceColumn.getName(),
+        sourceIDFieldName, targetTableName, targetColumn.getName(),targetIDFieldName);
+
+      targetColumn.setNullable(true);  // setting column nullable by default
+
+      this.addColumn(targetTableName, targetColumn);
+      this.executeUpdate(moveSQL, false);
+
+      if (!isColumnNullable) {
+        // this can will trigger exception if some record is null
+        // ToDo: add default option
+        this.setColumnNullable(targetTableName, targetColumn.getName(), false);
+      }
+      this.dropColumn(sourceTableName, sourceColumn.getName());
+    }
+  }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/JPATableGenerationStrategy.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/JPATableGenerationStrategy.java
index 9316c10..ccc8db8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/JPATableGenerationStrategy.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/JPATableGenerationStrategy.java
@@ -30,7 +30,7 @@
 
   private String value;
 
-  private JPATableGenerationStrategy(String value) {
+  JPATableGenerationStrategy(String value) {
     this.value = value;
   }
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/cache/ConfigGroupHostMapping.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/cache/ConfigGroupHostMapping.java
index ae3f076..16af596 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/cache/ConfigGroupHostMapping.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/cache/ConfigGroupHostMapping.java
@@ -23,13 +23,13 @@
 
 public interface ConfigGroupHostMapping {
   
-  public Long getConfigGroupId();
-  public Long getHostId();
-  public Host getHost();
-  public ConfigGroup getConfigGroup();
+  Long getConfigGroupId();
+  Long getHostId();
+  Host getHost();
+  ConfigGroup getConfigGroup();
   
-  public void setConfigGroupId(Long configGroupId);
-  public void setHostId(Long setHostId);
-  public void setHost(Host host);
-  public void setConfigGroup(ConfigGroup configGroup);
+  void setConfigGroupId(Long configGroupId);
+  void setHostId(Long setHostId);
+  void setHost(Host host);
+  void setConfigGroup(ConfigGroup configGroup);
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/cache/HostConfigMapping.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/cache/HostConfigMapping.java
index d5afc15..27f2da4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/cache/HostConfigMapping.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/cache/HostConfigMapping.java
@@ -19,27 +19,27 @@
 
 public interface HostConfigMapping {
   
-  public Long getClusterId();
-  public void setClusterId(Long clusterId);
+  Long getClusterId();
+  void setClusterId(Long clusterId);
 
-  public Long getHostId();
-  public void setHostId(Long hostId);
+  Long getHostId();
+  void setHostId(Long hostId);
   
-  public String getType();
-  public void setType(String type);
+  String getType();
+  void setType(String type);
   
-  public Long getCreateTimestamp();
-  public void setCreateTimestamp(Long createTimestamp);
+  Long getCreateTimestamp();
+  void setCreateTimestamp(Long createTimestamp);
   
-  public String getVersion();
-  public void setVersion(String version);
+  String getVersion();
+  void setVersion(String version);
   
-  public String getServiceName();
-  public void setServiceName(String serviceName);
+  String getServiceName();
+  void setServiceName(String serviceName);
   
-  public String getUser();
-  public void setUser(String user);
+  String getUser();
+  void setUser(String user);
   
-  public Integer getSelected();
-  public void setSelected(Integer selected);
+  Integer getSelected();
+  void setSelected(Integer selected);
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionDAO.java
index 6c6c3ae..4b428d1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionDAO.java
@@ -135,7 +135,7 @@
 
   /**
    * Creates or updates the specified entity. This method will check
-   * {@link ExtensionEntity#getStackId()} in order to determine whether the entity
+   * {@link ExtensionEntity#getExtensionId()} in order to determine whether the entity
    * should be created or merged.
    *
    * @param extension
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionLinkDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionLinkDAO.java
index e860c64..7bfa544 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionLinkDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionLinkDAO.java
@@ -72,16 +72,19 @@
     }
 
     String stackName = request.getStackName();
-    String stackVersion = request.getStackName();
-    String extensionName = request.getStackName();
-    String extensionVersion = request.getStackName();
+    String stackVersion = request.getStackVersion();
+    String extensionName = request.getExtensionName();
+    String extensionVersion = request.getExtensionVersion();
 
     if (stackName != null && stackVersion != null) {
-      if (extensionName != null && extensionVersion != null) {
-        ExtensionLinkEntity entity = findByStackAndExtension(stackName, stackVersion, extensionName, extensionVersion);
-        List<ExtensionLinkEntity> list = new ArrayList<>();
-        list.add(entity);
-        return list;
+      if (extensionName != null) {
+        if (extensionVersion != null) {
+          ExtensionLinkEntity entity = findByStackAndExtension(stackName, stackVersion, extensionName, extensionVersion);
+          List<ExtensionLinkEntity> list = new ArrayList<>();
+          list.add(entity);
+          return list;
+        }
+        return findByStackAndExtensionName(stackName, stackVersion, extensionName);
       }
       return findByStack(stackName, stackVersion);
     }
@@ -151,6 +154,23 @@
   }
 
   /**
+   * Gets the extension link that match the specified stack name, stack version and extension name.
+   *
+   * @return the extension link matching the specified stack name, stack version and extension name if any.
+   */
+  @RequiresSession
+  public List<ExtensionLinkEntity> findByStackAndExtensionName(String stackName, String stackVersion, String extensionName) {
+    TypedQuery<ExtensionLinkEntity> query = entityManagerProvider.get().createNamedQuery(
+        "ExtensionLinkEntity.findByStackAndExtensionName", ExtensionLinkEntity.class);
+
+    query.setParameter("stackName", stackName);
+    query.setParameter("stackVersion", stackVersion);
+    query.setParameter("extensionName", extensionName);
+
+    return daoUtils.selectList(query);
+  }
+
+  /**
    * Gets the extension link that match the specified stack name, stack version, extension name and extension version.
    *
    * @return the extension link matching the specified stack name, stack version, extension name and extension version if any.
@@ -209,8 +229,7 @@
    * {@link ExtensionLinkEntity#getLinkId()} in order to determine whether the entity
    * should be created or merged.
    *
-   * @param extension
-   *          the link to create or update (not {@code null}).
+   * @param link the link to create or update (not {@code null}).
    */
   public void createOrUpdate(ExtensionLinkEntity link)
       throws AmbariException {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentStateDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentStateDAO.java
index cc7b503..6174912 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentStateDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentStateDAO.java
@@ -207,4 +207,24 @@
 
     em.clear();
   }
+
+  /**
+   * @param serviceName
+   * @param componentName
+   * @param version
+   * @return a list of host components whose version that does NOT match the give version
+   */
+  @RequiresSession
+  public List<HostComponentStateEntity> findByServiceAndComponentAndNotVersion(String serviceName,
+      String componentName, String version) {
+
+    final TypedQuery<HostComponentStateEntity> query = entityManagerProvider.get().createNamedQuery(
+        "HostComponentStateEntity.findByServiceAndComponentAndNotVersion", HostComponentStateEntity.class);
+
+    query.setParameter("serviceName", serviceName);
+    query.setParameter("componentName", componentName);
+    query.setParameter("version", version);
+
+    return daoUtils.selectList(query);
+  }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAO.java
index 7318162..58a4180 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAO.java
@@ -737,7 +737,7 @@
   @TransactionalLock(lockArea = LockArea.HRC_STATUS_CACHE, lockType = LockType.WRITE)
   public void remove(HostRoleCommandEntity entity) {
     EntityManager entityManager = entityManagerProvider.get();
-    entityManager.remove(merge(entity));
+    entityManager.remove(entity);
     invalidateHostRoleCommandStatusSummaryCache(entity);
   }
 
@@ -993,4 +993,20 @@
       return HostRoleCommandEntity_.getPredicateMapping().get(propertyId);
     }
   }
+
+  public List<Long> findTaskIdsByRequestStageIds(List<RequestDAO.StageEntityPK> requestStageIds) {
+    EntityManager entityManager = entityManagerProvider.get();
+    List<Long> taskIds = new ArrayList<Long>();
+    for (RequestDAO.StageEntityPK requestIds : requestStageIds) {
+      TypedQuery<Long> hostRoleCommandQuery =
+              entityManager.createNamedQuery("HostRoleCommandEntity.findTaskIdsByRequestStageIds", Long.class);
+
+      hostRoleCommandQuery.setParameter("requestId", requestIds.getRequestId());
+      hostRoleCommandQuery.setParameter("stageId", requestIds.getStageId());
+
+      taskIds.addAll(daoUtils.selectList(hostRoleCommandQuery));
+    }
+
+    return taskIds;
+  }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestDAO.java
index 2696f66..8f16cb2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RequestDAO.java
@@ -19,27 +19,54 @@
 package org.apache.ambari.server.orm.dao;
 
 import java.text.MessageFormat;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.Date;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
 import java.util.List;
+import java.util.Set;
 
 import javax.persistence.EntityManager;
 import javax.persistence.TypedQuery;
 
+import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
+import org.apache.ambari.server.cleanup.TimeBasedCleanupPolicy;
 import org.apache.ambari.server.orm.RequiresSession;
+import org.apache.ambari.server.orm.entities.ExecutionCommandEntity;
+import org.apache.ambari.server.orm.entities.HostRoleCommandEntity;
 import org.apache.ambari.server.orm.entities.RequestEntity;
+import org.apache.ambari.server.orm.entities.RequestOperationLevelEntity;
 import org.apache.ambari.server.orm.entities.RequestResourceFilterEntity;
+import org.apache.ambari.server.orm.entities.RoleSuccessCriteriaEntity;
+import org.apache.ambari.server.orm.entities.StageEntity;
+import org.apache.ambari.server.orm.entities.TopologyHostRequestEntity;
+import org.apache.ambari.server.orm.entities.TopologyHostTaskEntity;
+import org.apache.ambari.server.orm.entities.TopologyLogicalTaskEntity;
+import org.apache.ambari.server.state.Clusters;
 import org.eclipse.persistence.config.HintValues;
 import org.eclipse.persistence.config.QueryHints;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
 import com.google.inject.Inject;
 import com.google.inject.Provider;
 import com.google.inject.Singleton;
 import com.google.inject.persist.Transactional;
 
 @Singleton
-public class RequestDAO {
+public class RequestDAO implements Cleanable {
+
+  private static final Logger LOG = LoggerFactory.getLogger(RequestDAO.class);
+
+
+  private static final int BATCH_SIZE = 999;
+
   /**
    * SQL template to retrieve all request IDs, sorted by the ID.
    */
@@ -64,6 +91,27 @@
   @Inject
   DaoUtils daoUtils;
 
+  @Inject
+  private Provider<Clusters> m_clusters;
+
+  @Inject
+  private HostRoleCommandDAO hostRoleCommandDAO;
+
+  @Inject
+  private StageDAO stageDAO;
+
+  @Inject
+  private TopologyLogicalTaskDAO topologyLogicalTaskDAO;
+
+  @Inject
+  private TopologyHostTaskDAO topologyHostTaskDAO;
+
+  @Inject
+  private TopologyLogicalRequestDAO topologyLogicalRequestDAO;
+
+  @Inject
+  private TopologyRequestDAO topologyRequestDAO;
+
   @RequiresSession
   public RequestEntity findByPK(Long requestId) {
     return entityManagerProvider.get().find(RequestEntity.class, requestId);
@@ -175,8 +223,8 @@
    * Retrieves from the database for a cluster, or specifically for non-cluster requests.
    * This method should be considered temporary until Request/Stage/Task cleanup is achieved.
    *
-   * @param maxResults  the max number to return
-   * @param ascOrder    {@code true} to sort by requestId ascending, {@code false} for descending
+   * @param limit the max number to return
+   * @param sortAscending {@code true} to sort by requestId ascending, {@code false} for descending
    * @param clusterId   the cluster to find, or {@code null} to search for requests without cluster
    */
   @RequiresSession
@@ -197,4 +245,214 @@
 
     return daoUtils.selectList(query);
   }
+
+  public static final class StageEntityPK {
+    private Long requestId;
+    private Long stageId;
+
+    public StageEntityPK(Long requestId, Long stageId) {
+      this.requestId = requestId;
+      this.stageId = stageId;
+    }
+
+    public Long getStageId() {
+      return stageId;
+    }
+
+    public void setStageId(Long stageId) {
+      this.stageId = stageId;
+    }
+
+    public Long getRequestId() {
+      return requestId;
+    }
+
+    public void setRequestId(Long requestId) {
+      this.requestId = requestId;
+    }
+  }
+
+  /**
+   * Search for all request ids in Upgrade table
+   * @return the list of request ids
+   */
+  private List<Long> findAllRequestIdsFromUpgrade() {
+    EntityManager entityManager = entityManagerProvider.get();
+    TypedQuery<Long> upgradeQuery =
+            entityManager.createNamedQuery("UpgradeEntity.findAllRequestIds", Long.class);
+
+    return daoUtils.selectList(upgradeQuery);
+  }
+
+  /**
+   * Search for all request and stage ids in Request and Stage tables
+   * @return the list of request/stage ids
+   */
+  public List<StageEntityPK> findRequestAndStageIdsInClusterBeforeDate(Long clusterId, long  beforeDateMillis) {
+    EntityManager entityManager = entityManagerProvider.get();
+    TypedQuery<StageEntityPK> requestQuery =
+            entityManager.createNamedQuery("RequestEntity.findRequestStageIdsInClusterBeforeDate", StageEntityPK.class);
+
+    requestQuery.setParameter("clusterId", clusterId);
+    requestQuery.setParameter("beforeDate", beforeDateMillis);
+
+    return daoUtils.selectList(requestQuery);
+  }
+
+  /**
+   * In this method we are removing entities using passed ids,
+   * To prevent issues we are using batch request to remove limited
+   * count of entities.
+   * @param ids              list of ids that we are using to remove rows from table
+   * @param paramName        name of parameter that we are using in sql query (taskIds, stageIds)
+   * @param entityName       name of entity which we will remove
+   * @param beforeDateMillis timestamp which was set by user (remove all entities that were created before),
+   *                         we are using it only for logging
+   * @param entityQuery      name of NamedQuery which we will use to remove needed entities
+   * @param type             type of entity class which we will use for casting query result
+   * @return                 rows count that were removed
+   */
+  @Transactional
+  protected <T> int cleanTableByIds(Set<Long> ids, String paramName, String entityName, Long beforeDateMillis,
+                                  String entityQuery, Class<T> type) {
+    LOG.info(String.format("Deleting %s entities before date %s", entityName, new Date(beforeDateMillis)));
+    EntityManager entityManager = entityManagerProvider.get();
+    int affectedRows = 0;
+    // Batch delete
+    TypedQuery<T> query = entityManager.createNamedQuery(entityQuery, type);
+    if (ids != null && !ids.isEmpty()) {
+      for (int i = 0; i < ids.size(); i += BATCH_SIZE) {
+        int endRow = (i + BATCH_SIZE) > ids.size() ? ids.size() : (i + BATCH_SIZE);
+        List<Long> idsSubList = new ArrayList<>(ids).subList(i, endRow);
+        LOG.info("Deleting " + entityName + " entity batch with task ids: " +
+                idsSubList.get(0) + " - " + idsSubList.get(idsSubList.size() - 1));
+        query.setParameter(paramName, idsSubList);
+        affectedRows += query.executeUpdate();
+      }
+    }
+
+    return affectedRows;
+  }
+
+  /**
+   * In this method we are removing entities using passed few ids,
+   * To prevent issues we are using batch request to remove limited
+   * count of entities.
+   * @param ids              list of ids pairs that we are using to remove rows from table
+   * @param paramNames       list of two names of parameters that we are using in sql query (taskIds, stageIds)
+   * @param entityName       name of entity which we will remove
+   * @param beforeDateMillis timestamp which was set by user (remove all entities that were created before),
+   *                         we are using it only for logging
+   * @param entityQuery      name of NamedQuery which we will use to remove needed entities
+   * @param type             type of entity class which we will use for casting query result
+   * @return                 rows count that were removed
+   */
+  @Transactional
+  protected <T> int cleanTableByStageEntityPK(List<StageEntityPK> ids, LinkedList<String> paramNames, String entityName, Long beforeDateMillis,
+                                  String entityQuery, Class<T> type) {
+    LOG.info(String.format("Deleting %s entities before date %s", entityName, new Date(beforeDateMillis)));
+    EntityManager entityManager = entityManagerProvider.get();
+    int affectedRows = 0;
+    // Batch delete
+    TypedQuery<T> query = entityManager.createNamedQuery(entityQuery, type);
+    if (ids != null && !ids.isEmpty()) {
+      for (int i = 0; i < ids.size(); i += BATCH_SIZE) {
+        int endRow = (i + BATCH_SIZE) > ids.size() ? ids.size() : (i + BATCH_SIZE);
+        List<StageEntityPK> idsSubList = new ArrayList<>(ids).subList(i, endRow);
+        LOG.info("Deleting " + entityName + " entity batch with task ids: " +
+                idsSubList.get(0) + " - " + idsSubList.get(idsSubList.size() - 1));
+        for (StageEntityPK requestIds : idsSubList) {
+          query.setParameter(paramNames.get(0), requestIds.getStageId());
+          query.setParameter(paramNames.get(1), requestIds.getRequestId());
+          affectedRows += query.executeUpdate();
+        }
+      }
+    }
+
+    return affectedRows;
+  }
+
+  @Transactional
+  @Override
+  public long cleanup(TimeBasedCleanupPolicy policy) {
+    long affectedRows = 0;
+    Long clusterId = null;
+    try {
+      clusterId = m_clusters.get().getCluster(policy.getClusterName()).getClusterId();
+      // find request and stage ids that were created before date populated by user.
+      List<StageEntityPK> requestStageIds = findRequestAndStageIdsInClusterBeforeDate(clusterId, policy.getToDateInMillis());
+
+      // find request ids from Upgrade table and exclude these ids from
+      // request ids set that we already have. We don't want to make any changes for upgrade
+      Set<Long> requestIdsFromUpgrade = Sets.newHashSet(findAllRequestIdsFromUpgrade());
+      Iterator<StageEntityPK> requestStageIdsIterator =  requestStageIds.iterator();
+      while (requestStageIdsIterator.hasNext()) {
+        StageEntityPK nextRequestStageIds = requestStageIdsIterator.next();
+        if (requestIdsFromUpgrade.contains(nextRequestStageIds.getRequestId())) {
+          requestStageIdsIterator.remove();
+        }
+      }
+
+
+      Set<Long> requestIds = new HashSet<>();
+      for (StageEntityPK ids : requestStageIds) {
+        requestIds.add(ids.getRequestId());
+      }
+
+      // find task ids using request stage ids
+      Set<Long> taskIds = Sets.newHashSet(hostRoleCommandDAO.findTaskIdsByRequestStageIds(requestStageIds));
+      LinkedList<String> params = new LinkedList<>();
+      params.add("stageId");
+      params.add("requestId");
+
+      // find host task ids, to find related host requests and also to remove needed host tasks
+      List<Long> hostTaskIds = new ArrayList<>();
+      if (taskIds != null && !taskIds.isEmpty()) {
+        hostTaskIds = topologyLogicalTaskDAO.findHostTaskIdsByPhysicalTaskIds(Lists.newArrayList(taskIds));
+      }
+
+      // find host request ids by host task ids to remove later needed host requests
+      List<Long> hostRequestIds = new ArrayList<>();
+      if (!hostTaskIds.isEmpty()) {
+        hostRequestIds = topologyHostTaskDAO.findHostRequestIdsByHostTaskIds(hostTaskIds);
+      }
+
+      List<Long> topologyRequestIds = new ArrayList<>();
+      if (!hostRequestIds.isEmpty()) {
+        topologyRequestIds = topologyLogicalRequestDAO.findRequestIdsByIds(hostRequestIds);
+      }
+
+
+      //removing all entities one by one according to their relations using stage, task and request ids
+      affectedRows += cleanTableByIds(taskIds, "taskIds", "ExecutionCommand", policy.getToDateInMillis(),
+              "ExecutionCommandEntity.removeByTaskIds", ExecutionCommandEntity.class);
+      affectedRows += cleanTableByIds(taskIds, "taskIds", "TopologyLogicalTask", policy.getToDateInMillis(),
+              "TopologyLogicalTaskEntity.removeByPhysicalTaskIds", TopologyLogicalTaskEntity.class);
+      affectedRows += cleanTableByIds(Sets.newHashSet(hostTaskIds), "hostTaskIds", "TopologyHostTask", policy.getToDateInMillis(),
+              "TopologyHostTaskEntity.removeByTaskIds", TopologyHostTaskEntity.class);
+      affectedRows += cleanTableByIds(Sets.newHashSet(hostRequestIds), "hostRequestIds", "TopologyHostRequest", policy.getToDateInMillis(),
+              "TopologyHostRequestEntity.removeByIds", TopologyHostRequestEntity.class);
+      for (Long topologyRequestId : topologyRequestIds) {
+        topologyRequestDAO.removeByPK(topologyRequestId);
+      }
+      affectedRows += cleanTableByIds(taskIds, "taskIds", "HostRoleCommand", policy.getToDateInMillis(),
+              "HostRoleCommandEntity.removeByTaskIds", HostRoleCommandEntity.class);
+      affectedRows += cleanTableByStageEntityPK(requestStageIds, params, "RoleSuccessCriteria", policy.getToDateInMillis(),
+              "RoleSuccessCriteriaEntity.removeByRequestStageIds", RoleSuccessCriteriaEntity.class);
+      affectedRows += cleanTableByStageEntityPK(requestStageIds, params, "Stage", policy.getToDateInMillis(),
+              "StageEntity.removeByRequestStageIds", StageEntity.class);
+      affectedRows += cleanTableByIds(requestIds, "requestIds", "RequestResourceFilter", policy.getToDateInMillis(),
+              "RequestResourceFilterEntity.removeByRequestIds", RequestResourceFilterEntity.class);
+      affectedRows += cleanTableByIds(requestIds, "requestIds", "RequestOperationLevel", policy.getToDateInMillis(),
+              "RequestOperationLevelEntity.removeByRequestIds", RequestOperationLevelEntity.class);
+      affectedRows += cleanTableByIds(requestIds, "requestIds", "Request", policy.getToDateInMillis(),
+              "RequestEntity.removeByRequestIds", RequestEntity.class);
+
+    } catch (AmbariException e) {
+      LOG.error("Error while looking up cluster with name: {}", policy.getClusterName(), e);
+      throw new IllegalStateException(e);
+    }
+
+    return affectedRows;
+  }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceComponentDesiredStateDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceComponentDesiredStateDAO.java
index 987e44f..92f1d09 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceComponentDesiredStateDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceComponentDesiredStateDAO.java
@@ -199,4 +199,28 @@
     return daoUtils.selectList(query);
   }
 
+  /**
+   * Gets a specific version for a component
+   * @param clusterId     the cluster id
+   * @param serviceName   the service name
+   * @param componentName the component name
+   * @param version       the component version to find
+   * @return the version entity, or {@code null} if not found
+   */
+  @RequiresSession
+  public ServiceComponentVersionEntity findVersion(long clusterId, String serviceName,
+      String componentName, String version) {
+
+    EntityManager entityManager = entityManagerProvider.get();
+    TypedQuery<ServiceComponentVersionEntity> query = entityManager.createNamedQuery(
+        "ServiceComponentVersionEntity.findByComponentAndVersion", ServiceComponentVersionEntity.class);
+
+    query.setParameter("clusterId", clusterId);
+    query.setParameter("serviceName", serviceName);
+    query.setParameter("componentName", componentName);
+    query.setParameter("repoVersion", version);
+
+    return daoUtils.selectSingle(query);
+  }
+
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/TopologyHostTaskDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/TopologyHostTaskDAO.java
index 02532db..1b18ffe 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/TopologyHostTaskDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/TopologyHostTaskDAO.java
@@ -54,6 +54,17 @@
   }
 
   @RequiresSession
+  public List<Long> findHostRequestIdsByHostTaskIds(List<Long> hostTaskIds) {
+    EntityManager entityManager = entityManagerProvider.get();
+    TypedQuery<Long> topologyHostTaskQuery =
+            entityManager.createNamedQuery("TopologyLogicalTaskEntity.findHostRequestIdsByHostTaskIds", Long.class);
+
+    topologyHostTaskQuery.setParameter("hostTaskIds", hostTaskIds);
+
+    return daoUtils.selectList(topologyHostTaskQuery);
+  }
+
+  @RequiresSession
   public List<TopologyHostTaskEntity> findAll() {
     return daoUtils.selectAll(entityManagerProvider.get(), TopologyHostTaskEntity.class);
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/TopologyLogicalRequestDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/TopologyLogicalRequestDAO.java
index e917dc2..ce1131a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/TopologyLogicalRequestDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/TopologyLogicalRequestDAO.java
@@ -20,6 +20,7 @@
 import java.util.List;
 
 import javax.persistence.EntityManager;
+import javax.persistence.TypedQuery;
 
 import org.apache.ambari.server.orm.RequiresSession;
 import org.apache.ambari.server.orm.entities.TopologyLogicalRequestEntity;
@@ -61,4 +62,15 @@
   public void remove(TopologyLogicalRequestEntity requestEntity) {
     entityManagerProvider.get().remove(requestEntity);
   }
+
+  @RequiresSession
+  public List<Long> findRequestIdsByIds(List<Long> ids) {
+    EntityManager entityManager = entityManagerProvider.get();
+    TypedQuery<Long> topologyLogicalRequestQuery =
+            entityManager.createNamedQuery("TopologyLogicalRequestEntity.findRequestIds", Long.class);
+
+    topologyLogicalRequestQuery.setParameter("ids", ids);
+
+    return daoUtils.selectList(topologyLogicalRequestQuery);
+  }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/TopologyLogicalTaskDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/TopologyLogicalTaskDAO.java
index 35f47a7..780a3ba 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/TopologyLogicalTaskDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/TopologyLogicalTaskDAO.java
@@ -20,6 +20,7 @@
 import java.util.List;
 
 import javax.persistence.EntityManager;
+import javax.persistence.TypedQuery;
 
 import org.apache.ambari.server.orm.RequiresSession;
 import org.apache.ambari.server.orm.entities.TopologyLogicalTaskEntity;
@@ -43,6 +44,17 @@
   }
 
   @RequiresSession
+  public List<Long> findHostTaskIdsByPhysicalTaskIds(List<Long> physicalTaskIds) {
+    EntityManager entityManager = entityManagerProvider.get();
+    TypedQuery<Long> topologyHostTaskQuery =
+            entityManager.createNamedQuery("TopologyLogicalTaskEntity.findHostTaskIdsByPhysicalTaskIds", Long.class);
+
+    topologyHostTaskQuery.setParameter("physicalTaskIds", physicalTaskIds);
+
+    return daoUtils.selectList(topologyHostTaskQuery);
+  }
+
+  @RequiresSession
   public List<TopologyLogicalTaskEntity> findAll() {
     return daoUtils.selectAll(entityManagerProvider.get(), TopologyLogicalTaskEntity.class);
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/UpgradeDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/UpgradeDAO.java
index 4666edf..716f5b5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/UpgradeDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/UpgradeDAO.java
@@ -27,8 +27,6 @@
 import org.apache.ambari.server.orm.entities.UpgradeGroupEntity;
 import org.apache.ambari.server.orm.entities.UpgradeItemEntity;
 import org.apache.ambari.server.state.stack.upgrade.Direction;
-import org.eclipse.persistence.config.HintValues;
-import org.eclipse.persistence.config.QueryHints;
 
 import com.google.inject.Inject;
 import com.google.inject.Provider;
@@ -90,10 +88,10 @@
 
   @RequiresSession
   public UpgradeEntity findUpgradeByRequestId(Long requestId) {
-    TypedQuery<UpgradeEntity> query = entityManagerProvider.get().createQuery(
-        "SELECT p FROM UpgradeEntity p WHERE p.requestId = :requestId", UpgradeEntity.class);
+    TypedQuery<UpgradeEntity> query = entityManagerProvider.get().createNamedQuery(
+        "UpgradeEntity.findUpgradeByRequestId", UpgradeEntity.class);
+
     query.setParameter("requestId", requestId);
-    query.setHint(QueryHints.REFRESH, HintValues.TRUE);
 
     return daoUtils.selectSingle(query);
   }
@@ -131,27 +129,11 @@
     TypedQuery<UpgradeGroupEntity> query = entityManagerProvider.get().createQuery(
         "SELECT p FROM UpgradeGroupEntity p WHERE p.upgradeGroupId = :groupId", UpgradeGroupEntity.class);
     query.setParameter("groupId", groupId);
-    query.setHint(QueryHints.REFRESH, HintValues.TRUE);
 
     return daoUtils.selectSingle(query);
   }
 
   /**
-   * @param itemId the item id
-   * @return the upgrade item entity, or {@code null} if not found
-   */
-  @RequiresSession
-  public UpgradeItemEntity findUpgradeItem(long itemId) {
-    TypedQuery<UpgradeItemEntity> query = entityManagerProvider.get().createQuery(
-        "SELECT p FROM UpgradeItemEntity p WHERE p.upgradeItemId = :itemId", UpgradeItemEntity.class);
-    query.setParameter("itemId", Long.valueOf(itemId));
-    query.setHint(QueryHints.REFRESH, HintValues.TRUE);
-
-    return daoUtils.selectSingle(query);
-  }
-
-
-  /**
    * @param requestId the request id
    * @param stageId the stage id
    * @return the upgrade entity, or {@code null} if not found
@@ -164,8 +146,6 @@
     query.setParameter("requestId", requestId);
     query.setParameter("stageId", stageId);
 
-    query.setHint(QueryHints.REFRESH, HintValues.TRUE);
-
     return daoUtils.selectSingle(query);
   }
 
@@ -184,8 +164,6 @@
     query.setParameter("clusterId", clusterId);
     query.setParameter("direction", direction);
 
-    query.setHint(QueryHints.REFRESH, HintValues.TRUE);
-
     return daoUtils.selectSingle(query);
   }
 
@@ -200,8 +178,6 @@
     query.setMaxResults(1);
     query.setParameter("clusterId", clusterId);
 
-    query.setHint(QueryHints.REFRESH, HintValues.TRUE);
-
     return daoUtils.selectSingle(query);
   }
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/AlertDefinitionEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/AlertDefinitionEntity.java
index 6337487..0c3d817 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/AlertDefinitionEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/AlertDefinitionEntity.java
@@ -538,7 +538,7 @@
    * value from {@link #getRepeatTolerance()} should be used to calculate retry
    * tolerance.
    *
-   * @param repeatToleranceEnabled
+   * @param enabled
    *          {@code true} to override the defautlt value and use the value
    *          returned from {@link #getRepeatTolerance()}.
    */
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/BlueprintConfiguration.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/BlueprintConfiguration.java
index 36dde73..cddac9b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/BlueprintConfiguration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/BlueprintConfiguration.java
@@ -27,28 +27,28 @@
    *
    * @param type configuration type
    */
-  public void setType(String type);
+  void setType(String type);
 
   /**
    * Get the configuration type.
    *
    * @return configuration type
    */
-  public String getType();
+  String getType();
 
   /**
    * Set the blueprint name.
    *
    * @param blueprintName  blueprint name
    */
-  public void setBlueprintName(String blueprintName);
+  void setBlueprintName(String blueprintName);
 
   /**
    * Get the blueprint name.
    *
    * @return blueprint name
    */
-  public String getBlueprintName();
+  String getBlueprintName();
 
   /**
    * Set the configuration properties.
@@ -57,21 +57,21 @@
    *
    * @param configData json representation of property map
    */
-  public void setConfigData(String configData);
+  void setConfigData(String configData);
 
   /**
    * Get the configuration properties.
    *
    * @return json representation of property map
    */
-  public String getConfigData();
+  String getConfigData();
 
   /**
    * Get the configuration attributes.
    *
    * @return json representation of attributes map
    */
-  public String getConfigAttributes();
+  String getConfigAttributes();
 
   /**
    * Set the configuration attributes.
@@ -80,5 +80,5 @@
    *
    * @param configAttributes json representation of attributes map
    */
-  public void setConfigAttributes(String configAttributes);
+  void setConfigAttributes(String configAttributes);
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
index 876063d..1092ac5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
@@ -129,6 +129,9 @@
   @ManyToMany(mappedBy = "clusterConfigEntities")
   private Collection<ServiceConfigEntity> serviceConfigEntities;
 
+  @Column(name = "service_deleted", nullable = false, insertable = true, updatable = true)
+  private short serviceDeleted = 0;
+
   /**
    * Unidirectional one-to-one association to {@link StackEntity}
    */
@@ -136,6 +139,14 @@
   @JoinColumn(name = "stack_id", unique = false, nullable = false, insertable = true, updatable = true)
   private StackEntity stack;
 
+  public boolean isServiceDeleted() {
+    return serviceDeleted != 0;
+  }
+
+  public void setServiceDeleted(boolean serviceDeleted) {
+    this.serviceDeleted = (short)(serviceDeleted ? 1 : 0);
+  }
+
   public Long getConfigId() {
     return configId;
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExecutionCommandEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExecutionCommandEntity.java
index 85f3a25..7015709 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExecutionCommandEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExecutionCommandEntity.java
@@ -26,11 +26,16 @@
 import javax.persistence.Id;
 import javax.persistence.JoinColumn;
 import javax.persistence.Lob;
+import javax.persistence.NamedQueries;
+import javax.persistence.NamedQuery;
 import javax.persistence.OneToOne;
 import javax.persistence.Table;
 
 @Table(name = "execution_command")
 @Entity
+@NamedQueries({
+    @NamedQuery(name = "ExecutionCommandEntity.removeByTaskIds", query = "DELETE FROM ExecutionCommandEntity command WHERE command.taskId IN :taskIds")
+})
 public class ExecutionCommandEntity {
 
   @Id
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExtensionLinkEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExtensionLinkEntity.java
index 12b3ce0..e2b48bf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExtensionLinkEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExtensionLinkEntity.java
@@ -42,6 +42,7 @@
 @TableGenerator(name = "link_id_generator", table = "ambari_sequences", pkColumnName = "sequence_name", valueColumnName = "sequence_value", pkColumnValue = "link_id_seq", initialValue = 0)
 @NamedQueries({
     @NamedQuery(name = "ExtensionLinkEntity.findAll", query = "SELECT link FROM ExtensionLinkEntity link"),
+    @NamedQuery(name = "ExtensionLinkEntity.findByStackAndExtensionName", query = "SELECT link FROM ExtensionLinkEntity link WHERE link.stack.stackName = :stackName AND link.stack.stackVersion = :stackVersion AND link.extension.extensionName = :extensionName"),
     @NamedQuery(name = "ExtensionLinkEntity.findByStackAndExtension", query = "SELECT link FROM ExtensionLinkEntity link WHERE link.stack.stackName = :stackName AND link.stack.stackVersion = :stackVersion AND link.extension.extensionName = :extensionName AND link.extension.extensionVersion = :extensionVersion"),
     @NamedQuery(name = "ExtensionLinkEntity.findByStack", query = "SELECT link FROM ExtensionLinkEntity link WHERE link.stack.stackName = :stackName AND link.stack.stackVersion = :stackVersion"),
     @NamedQuery(name = "ExtensionLinkEntity.findByExtension", query = "SELECT link FROM ExtensionLinkEntity link WHERE link.extension.extensionName = :extensionName AND link.extension.extensionVersion = :extensionVersion") })
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntity.java
index 9d35e2a..0b3d8ce 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntity.java
@@ -67,7 +67,12 @@
         query = "SELECT hcs from HostComponentStateEntity hcs WHERE hcs.serviceName=:serviceName AND hcs.componentName=:componentName AND hcs.hostEntity.hostName=:hostName"),
     @NamedQuery(
         name = "HostComponentStateEntity.findByIndex",
-        query = "SELECT hcs from HostComponentStateEntity hcs WHERE hcs.clusterId=:clusterId AND hcs.serviceName=:serviceName AND hcs.componentName=:componentName AND hcs.hostId=:hostId") })
+        query = "SELECT hcs from HostComponentStateEntity hcs WHERE hcs.clusterId=:clusterId AND hcs.serviceName=:serviceName AND hcs.componentName=:componentName AND hcs.hostId=:hostId"),
+    @NamedQuery(
+        name = "HostComponentStateEntity.findByServiceAndComponentAndNotVersion",
+        query = "SELECT hcs from HostComponentStateEntity hcs WHERE hcs.serviceName=:serviceName AND hcs.componentName=:componentName AND hcs.version != :version")
+})
+
 public class HostComponentStateEntity {
 
   @Id
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostRoleCommandEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostRoleCommandEntity.java
index fdec5f0..bfc83ca 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostRoleCommandEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostRoleCommandEntity.java
@@ -60,6 +60,7 @@
     , initialValue = 1
 )
 @NamedQueries({
+    @NamedQuery(name = "HostRoleCommandEntity.findTaskIdsByRequestStageIds", query = "SELECT command.taskId FROM HostRoleCommandEntity command WHERE command.stageId = :stageId AND command.requestId = :requestId"),
     @NamedQuery(name = "HostRoleCommandEntity.findCountByCommandStatuses", query = "SELECT COUNT(command.taskId) FROM HostRoleCommandEntity command WHERE command.status IN :statuses"),
     @NamedQuery(name = "HostRoleCommandEntity.findByRequestIdAndStatuses", query="SELECT task FROM HostRoleCommandEntity task WHERE task.requestId=:requestId AND task.status IN :statuses ORDER BY task.taskId ASC"),
     @NamedQuery(name = "HostRoleCommandEntity.findTasksByStatusesOrderByIdDesc", query = "SELECT task FROM HostRoleCommandEntity task WHERE task.requestId = :requestId AND task.status IN :statuses ORDER BY task.taskId DESC"),
@@ -71,12 +72,9 @@
     @NamedQuery(name = "HostRoleCommandEntity.findByStatusBetweenStages", query = "SELECT command FROM HostRoleCommandEntity command WHERE command.requestId = :requestId AND command.stageId >= :minStageId AND command.stageId <= :maxStageId AND command.status = :status"),
     @NamedQuery(name = "HostRoleCommandEntity.updateAutoSkipExcludeRoleCommand", query = "UPDATE HostRoleCommandEntity command SET command.autoSkipOnFailure = :autoSkipOnFailure WHERE command.requestId = :requestId AND command.roleCommand <> :roleCommand"),
     @NamedQuery(name = "HostRoleCommandEntity.updateAutoSkipForRoleCommand", query = "UPDATE HostRoleCommandEntity command SET command.autoSkipOnFailure = :autoSkipOnFailure WHERE command.requestId = :requestId AND command.roleCommand = :roleCommand"),
-    @NamedQuery(
-        name = "HostRoleCommandEntity.findHostsByCommandStatus",
-        query = "SELECT DISTINCT(host.hostName) FROM HostRoleCommandEntity command, HostEntity host WHERE (command.requestId >= :iLowestRequestIdInProgress AND command.requestId <= :iHighestRequestIdInProgress) AND command.status IN :statuses AND command.hostId = host.hostId AND host.hostName IS NOT NULL"),
-    @NamedQuery(
-        name = "HostRoleCommandEntity.getBlockingHostsForRequest",
-        query = "SELECT DISTINCT(host.hostName) FROM HostRoleCommandEntity command, HostEntity host WHERE command.requestId >= :lowerRequestIdInclusive AND command.requestId < :upperRequestIdExclusive AND command.status IN :statuses AND command.isBackgroundCommand=0 AND command.hostId = host.hostId AND host.hostName IS NOT NULL")
+    @NamedQuery(name = "HostRoleCommandEntity.removeByTaskIds", query = "DELETE FROM HostRoleCommandEntity command WHERE command.taskId IN :taskIds"),
+    @NamedQuery(name = "HostRoleCommandEntity.findHostsByCommandStatus", query = "SELECT DISTINCT(host.hostName) FROM HostRoleCommandEntity command, HostEntity host WHERE (command.requestId >= :iLowestRequestIdInProgress AND command.requestId <= :iHighestRequestIdInProgress) AND command.status IN :statuses AND command.hostId = host.hostId AND host.hostName IS NOT NULL"),
+    @NamedQuery(name = "HostRoleCommandEntity.getBlockingHostsForRequest", query = "SELECT DISTINCT(host.hostName) FROM HostRoleCommandEntity command, HostEntity host WHERE command.requestId >= :lowerRequestIdInclusive AND command.requestId < :upperRequestIdExclusive AND command.status IN :statuses AND command.isBackgroundCommand=0 AND command.hostId = host.hostId AND host.hostName IS NOT NULL")
 
 })
 public class HostRoleCommandEntity {
@@ -183,6 +181,11 @@
   @Basic
   private String commandDetail;
 
+  // An optional property that can be used for setting the displayName for operations window
+  @Column(name = "ops_display_name")
+  @Basic
+  private String opsDisplayName;
+
   // When command type id CUSTOM_COMMAND and CUSTOM_ACTION this is the name
   @Column(name = "custom_command_name")
   @Basic
@@ -203,7 +206,7 @@
   private TopologyLogicalTaskEntity topologyLogicalTaskEntity;
 
   @Basic
-  @Column(name = "is_background_command", nullable = false)
+  @Column(name = "is_background", nullable = false)
   private short isBackgroundCommand = 0;
 
   public Long getTaskId() {
@@ -380,6 +383,16 @@
     this.customCommandName = customCommandName;
   }
 
+  public String getOpsDisplayName() {
+    return opsDisplayName;
+  }
+
+  public void setOpsDisplayName(String opsDisplayName) {
+    this.opsDisplayName = opsDisplayName;
+  }
+
+  
+
   /**
    * Determine whether this task should hold for retry when an error occurs.
    *
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/PrivilegeEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/PrivilegeEntity.java
index ba39efb..92a9387 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/PrivilegeEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/PrivilegeEntity.java
@@ -173,4 +173,14 @@
   public int hashCode() {
     return Objects.hash(id, permission, resource, principal);
   }
+
+  @Override
+  public String toString() {
+    return "PrivilegeEntity{" +
+        "id=" + id +
+        ", permission=" + permission +
+        ", resource=" + resource +
+        ", principal=" + principal +
+        '}';
+  }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepositoryVersionEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepositoryVersionEntity.java
index 772b151..f5d669e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepositoryVersionEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepositoryVersionEntity.java
@@ -249,8 +249,8 @@
       try {
         return repositoryVersionHelperProvider.get().parseOperatingSystems(operatingSystems);
       } catch (Exception ex) {
-        // Should never happen as we validate json before storing it to DB
-        LOG.error("Could not parse operating systems json stored in database:" + operatingSystems, ex);
+        String msg = String.format("Failed to parse repository from OS/Repo information in the database: %s. Required fields: repo_name, repo_id, base_url", operatingSystems);
+        LOG.error(msg, ex);
       }
     }
     return Collections.emptyList();
@@ -349,7 +349,7 @@
    * @return the XSD name extracted from the XML.
    */
   public String getVersionXsd() {
-    return versionXml;
+    return versionXsd;
   }
 
   /**
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestEntity.java
index f19aa72..adf6647 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestEntity.java
@@ -26,10 +26,13 @@
 import javax.persistence.Entity;
 import javax.persistence.EnumType;
 import javax.persistence.Enumerated;
+import javax.persistence.FetchType;
 import javax.persistence.Id;
 import javax.persistence.JoinColumn;
 import javax.persistence.Lob;
 import javax.persistence.ManyToOne;
+import javax.persistence.NamedQueries;
+import javax.persistence.NamedQuery;
 import javax.persistence.OneToMany;
 import javax.persistence.OneToOne;
 import javax.persistence.Table;
@@ -39,6 +42,10 @@
 
 @Table(name = "request")
 @Entity
+@NamedQueries({
+  @NamedQuery(name = "RequestEntity.findRequestStageIdsInClusterBeforeDate", query = "SELECT NEW org.apache.ambari.server.orm.dao.RequestDAO.StageEntityPK(request.requestId, stage.stageId) FROM RequestEntity request JOIN StageEntity stage ON request.requestId = stage.requestId WHERE request.clusterId = :clusterId AND request.createTime <= :beforeDate"),
+  @NamedQuery(name = "RequestEntity.removeByRequestIds", query = "DELETE FROM RequestEntity request WHERE request.requestId IN :requestIds")
+})
 public class RequestEntity {
 
   @Column(name = "request_id")
@@ -61,6 +68,16 @@
   @Basic
   private String commandName;
 
+  /**
+   * On large clusters, this value can be in the 10,000's of kilobytes. During
+   * an upgrade, all stages are loaded in memory for every request, which can
+   * lead to an OOM. As a result, lazy load this since it's barely ever
+   * requested or used.
+   */
+  @Column(name = "cluster_host_info")
+  @Basic(fetch = FetchType.LAZY)
+  private byte[] clusterHostInfo;
+
   @Column(name = "inputs")
   @Lob
   private byte[] inputs = new byte[0];
@@ -145,6 +162,14 @@
     this.stages = stages;
   }
 
+  public String getClusterHostInfo() {
+    return clusterHostInfo == null ? "{}" : new String(clusterHostInfo);
+  }
+
+  public void setClusterHostInfo(String clusterHostInfo) {
+    this.clusterHostInfo = clusterHostInfo.getBytes();
+  }
+
   public Long getCreateTime() {
     return createTime;
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestOperationLevelEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestOperationLevelEntity.java
index ff14e3a..a7cd0d0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestOperationLevelEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestOperationLevelEntity.java
@@ -40,7 +40,9 @@
 @NamedQueries({
     @NamedQuery(name = "requestOperationLevelByHostId", query =
         "SELECT requestOperationLevel FROM RequestOperationLevelEntity requestOperationLevel " +
-            "WHERE requestOperationLevel.hostId=:hostId")
+            "WHERE requestOperationLevel.hostId=:hostId"),
+    @NamedQuery(name = "RequestOperationLevelEntity.removeByRequestIds",
+        query = "DELETE FROM RequestOperationLevelEntity requestOperationLevel WHERE requestOperationLevel.requestId IN :requestIds")
 })
 public class RequestOperationLevelEntity {
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestResourceFilterEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestResourceFilterEntity.java
index 8ee41d2..9597db1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestResourceFilterEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RequestResourceFilterEntity.java
@@ -26,6 +26,8 @@
 import javax.persistence.JoinColumn;
 import javax.persistence.Lob;
 import javax.persistence.ManyToOne;
+import javax.persistence.NamedQueries;
+import javax.persistence.NamedQuery;
 import javax.persistence.Table;
 import javax.persistence.TableGenerator;
 
@@ -36,6 +38,9 @@
   , pkColumnValue = "resourcefilter_id_seq"
   , initialValue = 1
 )
+@NamedQueries({
+  @NamedQuery(name = "RequestResourceFilterEntity.removeByRequestIds", query = "DELETE FROM RequestResourceFilterEntity filter WHERE filter.requestId IN :requestIds")
+})
 public class RequestResourceFilterEntity {
 
   @Id
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RoleSuccessCriteriaEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RoleSuccessCriteriaEntity.java
index 3386c24..66e7fd8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RoleSuccessCriteriaEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RoleSuccessCriteriaEntity.java
@@ -26,6 +26,8 @@
 import javax.persistence.JoinColumn;
 import javax.persistence.JoinColumns;
 import javax.persistence.ManyToOne;
+import javax.persistence.NamedQueries;
+import javax.persistence.NamedQuery;
 import javax.persistence.Table;
 
 import org.apache.ambari.server.Role;
@@ -33,6 +35,9 @@
 @IdClass(org.apache.ambari.server.orm.entities.RoleSuccessCriteriaEntityPK.class)
 @Table(name = "role_success_criteria")
 @Entity
+@NamedQueries({
+  @NamedQuery(name = "RoleSuccessCriteriaEntity.removeByRequestStageIds", query = "DELETE FROM RoleSuccessCriteriaEntity criteria WHERE criteria.stageId = :stageId AND criteria.requestId = :requestId")
+})
 public class RoleSuccessCriteriaEntity {
 
   @Id
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceComponentDesiredStateEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceComponentDesiredStateEntity.java
index 9b93517..17fd323 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceComponentDesiredStateEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceComponentDesiredStateEntity.java
@@ -40,6 +40,7 @@
 import javax.persistence.TableGenerator;
 import javax.persistence.UniqueConstraint;
 
+import org.apache.ambari.server.state.RepositoryVersionState;
 import org.apache.ambari.server.state.State;
 
 @Entity
@@ -84,6 +85,10 @@
   @Column(name = "recovery_enabled", nullable = false, insertable = true, updatable = true)
   private Integer recoveryEnabled = 0;
 
+  @Column(name = "repo_state", nullable = false, insertable = true, updatable = true)
+  @Enumerated(EnumType.STRING)
+  private RepositoryVersionState repoState = RepositoryVersionState.INIT;
+
   /**
    * Unidirectional one-to-one association to {@link StackEntity}
    */
@@ -297,4 +302,18 @@
     this.hostComponentDesiredStateEntities = hostComponentDesiredStateEntities;
   }
 
+  /**
+   * @param state the repository state for {@link #getDesiredVersion()}
+   */
+  public void setRepositoryState(RepositoryVersionState state) {
+    repoState = state;
+  }
+
+  /**
+   * @return the state of the repository for {@link #getDesiredVersion()}
+   */
+  public RepositoryVersionState getRepositoryState() {
+    return repoState;
+  }
+
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceComponentVersionEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceComponentVersionEntity.java
index 5085d18..f0b9660 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceComponentVersionEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceComponentVersionEntity.java
@@ -49,9 +49,22 @@
     valueColumnName = "sequence_value",
     pkColumnValue = "servicecomponent_version_id_seq",
     initialValue = 0)
-@NamedQueries({ @NamedQuery(
+@NamedQueries({
+  @NamedQuery(
     name = "ServiceComponentVersionEntity.findByComponent",
-    query = "SELECT version FROM ServiceComponentVersionEntity version WHERE version.m_serviceComponentDesiredStateEntity.clusterId = :clusterId AND version.m_serviceComponentDesiredStateEntity.serviceName = :serviceName AND version.m_serviceComponentDesiredStateEntity.componentName = :componentName") })
+    query = "SELECT version FROM ServiceComponentVersionEntity version WHERE " +
+      "version.m_serviceComponentDesiredStateEntity.clusterId = :clusterId AND " +
+      "version.m_serviceComponentDesiredStateEntity.serviceName = :serviceName AND " +
+      "version.m_serviceComponentDesiredStateEntity.componentName = :componentName"),
+  @NamedQuery(
+    name = "ServiceComponentVersionEntity.findByComponentAndVersion",
+    query = "SELECT version FROM ServiceComponentVersionEntity version WHERE " +
+        "version.m_serviceComponentDesiredStateEntity.clusterId = :clusterId AND " +
+        "version.m_serviceComponentDesiredStateEntity.serviceName = :serviceName AND " +
+        "version.m_serviceComponentDesiredStateEntity.componentName = :componentName AND " +
+        "version.m_repositoryVersion.version = :repoVersion")
+})
+
 public class ServiceComponentVersionEntity {
 
   @Id
@@ -66,7 +79,7 @@
   private ServiceComponentDesiredStateEntity m_serviceComponentDesiredStateEntity;
 
   @ManyToOne
-  @JoinColumn(name = "repo_version_id", referencedColumnName = "repo_version_id", nullable = false)
+  @JoinColumn(name  = "repo_version_id", referencedColumnName = "repo_version_id", nullable = false)
   private RepositoryVersionEntity m_repositoryVersion;
 
   @Column(name = "state", nullable = false, insertable = true, updatable = true)
@@ -99,6 +112,13 @@
   }
 
   /**
+   * @return the repository
+   */
+  public RepositoryVersionEntity getRepositoryVersion() {
+    return m_repositoryVersion;
+  }
+
+  /**
    * @return the id
    */
   public long getId() {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntity.java
index d035729..6ee0a3b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntity.java
@@ -50,7 +50,11 @@
         query = "SELECT stage.requestId, MIN(stage.stageId) from StageEntity stage, HostRoleCommandEntity hrc WHERE hrc.status IN :statuses AND hrc.stageId = stage.stageId AND hrc.requestId = stage.requestId GROUP by stage.requestId ORDER BY stage.requestId"),
     @NamedQuery(
         name = "StageEntity.findByRequestIdAndCommandStatuses",
-        query = "SELECT stage from StageEntity stage WHERE stage.status IN :statuses AND stage.requestId = :requestId ORDER BY stage.stageId") })
+        query = "SELECT stage from StageEntity stage WHERE stage.status IN :statuses AND stage.requestId = :requestId ORDER BY stage.stageId"),
+    @NamedQuery(
+        name = "StageEntity.removeByRequestStageIds",
+        query = "DELETE FROM StageEntity stage WHERE stage.stageId = :stageId AND stage.requestId = :requestId")
+})
 public class StageEntity {
 
   @Basic
@@ -92,16 +96,6 @@
    * lead to an OOM. As a result, lazy load this since it's barely ever
    * requested or used.
    */
-  @Column(name = "cluster_host_info")
-  @Basic(fetch = FetchType.LAZY)
-  private byte[] clusterHostInfo;
-
-  /**
-   * On large clusters, this value can be in the 10,000's of kilobytes. During
-   * an upgrade, all stages are loaded in memory for every request, which can
-   * lead to an OOM. As a result, lazy load this since it's barely ever
-   * requested or used.
-   */
   @Column(name = "command_params")
   @Basic(fetch = FetchType.LAZY)
   private byte[] commandParamsStage;
@@ -183,14 +177,6 @@
     return defaultString(requestContext);
   }
 
-  public String getClusterHostInfo() {
-    return clusterHostInfo == null ? new String() : new String(clusterHostInfo);
-  }
-
-  public void setClusterHostInfo(String clusterHostInfo) {
-    this.clusterHostInfo = clusterHostInfo.getBytes();
-  }
-
   public String getCommandParamsStage() {
     return commandParamsStage == null ? new String() : new String(commandParamsStage);
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntity_.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntity_.java
index dc39e55..637a18b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntity_.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntity_.java
@@ -41,7 +41,6 @@
   public static volatile SingularAttribute<StageEntity, String> logInfo;
   public static volatile SingularAttribute<StageEntity, String> requestContext;
 
-  public static volatile SingularAttribute<StageEntity, byte[]> clusterHostInfo;
   public static volatile SingularAttribute<StageEntity, byte[]> commandParamsStage;
   public static volatile SingularAttribute<StageEntity, byte[]> hostParamsStage;
 
@@ -74,9 +73,6 @@
     mapping.put(StageResourceProvider.STAGE_CONTEXT,
         Collections.singletonList(requestContext));
 
-    mapping.put(StageResourceProvider.STAGE_CLUSTER_HOST_INFO,
-        Collections.singletonList(clusterHostInfo));
-
     mapping.put(StageResourceProvider.STAGE_COMMAND_PARAMS,
         Collections.singletonList(commandParamsStage));
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/TopologyHostRequestEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/TopologyHostRequestEntity.java
index b90e192..2700f68 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/TopologyHostRequestEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/TopologyHostRequestEntity.java
@@ -25,11 +25,16 @@
 import javax.persistence.Id;
 import javax.persistence.JoinColumn;
 import javax.persistence.ManyToOne;
+import javax.persistence.NamedQueries;
+import javax.persistence.NamedQuery;
 import javax.persistence.OneToMany;
 import javax.persistence.Table;
 
 @Entity
 @Table(name = "topology_host_request")
+@NamedQueries({
+  @NamedQuery(name = "TopologyHostRequestEntity.removeByIds", query = "DELETE FROM TopologyHostRequestEntity topologyHostRequest WHERE topologyHostRequest.id IN :hostRequestIds")
+})
 public class TopologyHostRequestEntity {
   @Id
 //  @GeneratedValue(strategy = GenerationType.TABLE, generator = "topology_host_request_id_generator")
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/TopologyHostTaskEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/TopologyHostTaskEntity.java
index bba0e06..0bb3e19 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/TopologyHostTaskEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/TopologyHostTaskEntity.java
@@ -40,7 +40,11 @@
   pkColumnValue = "topology_host_task_id_seq", initialValue = 0)
 @NamedQueries({
   @NamedQuery(name = "TopologyHostTaskEntity.findByHostRequest",
-      query = "SELECT req FROM TopologyHostTaskEntity req WHERE req.topologyHostRequestEntity.id = :hostRequestId")
+      query = "SELECT req FROM TopologyHostTaskEntity req WHERE req.topologyHostRequestEntity.id = :hostRequestId"),
+  @NamedQuery(name = "TopologyLogicalTaskEntity.findHostRequestIdsByHostTaskIds",
+      query = "SELECT tht.hostRequestId from TopologyHostTaskEntity tht WHERE tht.id IN :hostTaskIds"),
+  @NamedQuery(name = "TopologyHostTaskEntity.removeByTaskIds",
+      query = "DELETE FROM TopologyHostTaskEntity tht WHERE tht.id IN :hostTaskIds")
 })
 public class TopologyHostTaskEntity {
   @Id
@@ -51,6 +55,9 @@
   @Column(name = "type", length = 255, nullable = false)
   private String type;
 
+  @Column(name = "host_request_id", nullable = false, insertable = false, updatable = false)
+  private Long hostRequestId;
+
   @ManyToOne
   @JoinColumn(name = "host_request_id", referencedColumnName = "id", nullable = false)
   private TopologyHostRequestEntity topologyHostRequestEntity;
@@ -67,7 +74,11 @@
   }
 
   public Long getHostRequestId() {
-    return topologyHostRequestEntity != null ? topologyHostRequestEntity.getId() : null;
+    return hostRequestId;
+  }
+
+  public void setHostRequestId(Long hostRequestId) {
+    this.hostRequestId = hostRequestId;
   }
 
   public String getType() {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/TopologyLogicalRequestEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/TopologyLogicalRequestEntity.java
index 4f865f4..605a043 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/TopologyLogicalRequestEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/TopologyLogicalRequestEntity.java
@@ -24,12 +24,17 @@
 import javax.persistence.Entity;
 import javax.persistence.Id;
 import javax.persistence.JoinColumn;
+import javax.persistence.NamedQueries;
+import javax.persistence.NamedQuery;
 import javax.persistence.OneToMany;
 import javax.persistence.OneToOne;
 import javax.persistence.Table;
 
 @Entity
 @Table(name = "topology_logical_request")
+@NamedQueries({
+  @NamedQuery(name = "TopologyLogicalRequestEntity.findRequestIds", query = "SELECT logicalrequest.topologyRequestId from TopologyLogicalRequestEntity logicalrequest WHERE logicalrequest.id IN :ids")
+})
 public class TopologyLogicalRequestEntity {
   @Id
   @Column(name = "id", nullable = false, updatable = false)
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/TopologyLogicalTaskEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/TopologyLogicalTaskEntity.java
index c71d4e4..2954863 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/TopologyLogicalTaskEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/TopologyLogicalTaskEntity.java
@@ -24,6 +24,8 @@
 import javax.persistence.Id;
 import javax.persistence.JoinColumn;
 import javax.persistence.ManyToOne;
+import javax.persistence.NamedQueries;
+import javax.persistence.NamedQuery;
 import javax.persistence.OneToOne;
 import javax.persistence.Table;
 import javax.persistence.TableGenerator;
@@ -33,6 +35,10 @@
 @TableGenerator(name = "topology_logical_task_id_generator", table = "ambari_sequences",
   pkColumnName = "sequence_name", valueColumnName = "sequence_value",
   pkColumnValue = "topology_logical_task_id_seq", initialValue = 0)
+@NamedQueries({
+  @NamedQuery(name = "TopologyLogicalTaskEntity.findHostTaskIdsByPhysicalTaskIds", query = "SELECT logicaltask.hostTaskId from TopologyLogicalTaskEntity logicaltask WHERE logicaltask.physicalTaskId IN :physicalTaskIds"),
+  @NamedQuery(name = "TopologyLogicalTaskEntity.removeByPhysicalTaskIds", query = "DELETE FROM TopologyLogicalTaskEntity logicaltask WHERE logicaltask.physicalTaskId IN :taskIds")
+})
 public class TopologyLogicalTaskEntity {
   @Id
   @GeneratedValue(strategy = GenerationType.TABLE, generator = "topology_logical_task_id_generator")
@@ -42,12 +48,18 @@
   @Column(name = "component", length = 255)
   private String componentName;
 
+  @Column(name = "host_task_id", nullable = false, insertable = false, updatable = false)
+  private Long hostTaskId;
+
+  @Column(name = "physical_task_id", nullable = false, insertable = false, updatable = false)
+  private Long physicalTaskId;
+
   @ManyToOne
   @JoinColumn(name = "host_task_id", referencedColumnName = "id", nullable = false)
   private TopologyHostTaskEntity topologyHostTaskEntity;
 
   @OneToOne
-  @JoinColumn(name = "physical_task_id", referencedColumnName = "task_id")
+  @JoinColumn(name = "physical_task_id", referencedColumnName = "task_id", nullable = false)
   private HostRoleCommandEntity hostRoleCommandEntity;
 
   public Long getId() {
@@ -58,14 +70,22 @@
     this.id = id;
   }
 
-  public Long getHostTaskId() {
-    return topologyHostTaskEntity != null ? topologyHostTaskEntity.getId() : null;
-  }
-
   public Long getPhysicalTaskId() {
     return hostRoleCommandEntity != null ? hostRoleCommandEntity.getTaskId() : null;
   }
 
+  public void setPhysicalTaskId(Long physicalTaskId) {
+    this.physicalTaskId = physicalTaskId;
+  }
+
+  public void setHostTaskId(Long hostTaskId) {
+    this.hostTaskId = hostTaskId;
+  }
+
+  public Long getHostTaskId() {
+    return hostTaskId;
+  }
+
   public String getComponentName() {
     return componentName;
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeEntity.java
index 89574bc..e5e2de3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeEntity.java
@@ -59,10 +59,14 @@
       query = "SELECT u FROM UpgradeEntity u WHERE u.clusterId = :clusterId"),
   @NamedQuery(name = "UpgradeEntity.findUpgrade",
       query = "SELECT u FROM UpgradeEntity u WHERE u.upgradeId = :upgradeId"),
+  @NamedQuery(name = "UpgradeEntity.findUpgradeByRequestId",
+      query = "SELECT u FROM UpgradeEntity u WHERE u.requestId = :requestId"),  
   @NamedQuery(name = "UpgradeEntity.findLatestForClusterInDirection",
       query = "SELECT u FROM UpgradeEntity u JOIN RequestEntity r ON u.requestId = r.requestId WHERE u.clusterId = :clusterId AND u.direction = :direction ORDER BY r.startTime DESC, u.upgradeId DESC"),
   @NamedQuery(name = "UpgradeEntity.findLatestForCluster",
       query = "SELECT u FROM UpgradeEntity u JOIN RequestEntity r ON u.requestId = r.requestId WHERE u.clusterId = :clusterId ORDER BY r.startTime DESC"),
+  @NamedQuery(name = "UpgradeEntity.findAllRequestIds",
+      query = "SELECT upgrade.requestId FROM UpgradeEntity upgrade")
 })
 public class UpgradeEntity {
 
@@ -174,9 +178,6 @@
     return requestId;
   }
 
-  /**
-   * @param id the request id
-   */
   public void setRequestEntity(RequestEntity requestEntity) {
     this.requestEntity = requestEntity;
     requestId = requestEntity.getRequestId();
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeGroupEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeGroupEntity.java
index 4830e3b..53a1925 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeGroupEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeGroupEntity.java
@@ -111,7 +111,7 @@
   }
 
   /**
-   * @param text the item text
+   * @param title the item text
    */
   public void setTitle(String title) {
     groupTitle = title;
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeItemEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeItemEntity.java
index 560970a..35ea769 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeItemEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeItemEntity.java
@@ -27,6 +27,8 @@
 import javax.persistence.Id;
 import javax.persistence.JoinColumn;
 import javax.persistence.ManyToOne;
+import javax.persistence.NamedQueries;
+import javax.persistence.NamedQuery;
 import javax.persistence.Table;
 import javax.persistence.TableGenerator;
 
@@ -48,6 +50,9 @@
     pkColumnValue = "upgrade_item_id_seq",
     initialValue = 0,
     allocationSize = 1000)
+@NamedQueries({
+  @NamedQuery(name = "UpgradeItemEntity.findAllStageIds", query = "SELECT upgradeItem.stageId FROM UpgradeItemEntity upgradeItem")
+})
 public class UpgradeItemEntity {
 
   @Id
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UserEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UserEntity.java
index 576ca97..432191e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UserEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UserEntity.java
@@ -42,6 +42,7 @@
 import javax.persistence.TemporalType;
 import javax.persistence.UniqueConstraint;
 
+import org.apache.ambari.server.security.authorization.UserName;
 import org.apache.ambari.server.security.authorization.UserType;
 
 @Table(name = "users", uniqueConstraints = {@UniqueConstraint(columnNames = {"user_name", "user_type"})})
@@ -117,8 +118,8 @@
     return userName;
   }
 
-  public void setUserName(String userName) {
-    this.userName = userName;
+  public void setUserName(UserName userName) {
+    this.userName = userName.toString();
   }
 
   public Boolean getLdapUser() {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ViewEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ViewEntity.java
index 6ccec36..7d9e5f0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ViewEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ViewEntity.java
@@ -894,4 +894,13 @@
   public static String getViewName(String name, String version) {
     return name + "{" + version + "}";
   }
+
+  @Override
+  public String toString() {
+    return "ViewEntity{" +
+        "name='" + name + '\'' +
+        ", label='" + label + '\'' +
+        ", description='" + description + '\'' +
+        '}';
+  }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ViewInstanceEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ViewInstanceEntity.java
index 95ec52d..e1cdd8a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ViewInstanceEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ViewInstanceEntity.java
@@ -1071,4 +1071,13 @@
     }
   }
 
+  @Override
+  public String toString() {
+    return "ViewInstanceEntity{" +
+        "viewInstanceId=" + viewInstanceId +
+        ", viewName='" + viewName + '\'' +
+        ", name='" + name + '\'' +
+        ", label='" + label + '\'' +
+        '}';
+  }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ViewURLEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ViewURLEntity.java
index 27cb8e2..7765a12 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ViewURLEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ViewURLEntity.java
@@ -80,7 +80,6 @@
 
   /**
    * Set the URL suffix
-   * @param URL suffix
      */
   public void setUrlSuffix(String urlSuffix) {
     this.urlSuffix = urlSuffix;
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/DbmsHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/DbmsHelper.java
index c2778d3..7f74bb0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/DbmsHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/DbmsHelper.java
@@ -66,7 +66,6 @@
    *
    * @param indexName
    * @param tableName
-   * @param columnNames
    * @return
    */
   String getDropIndexStatement(String indexName, String tableName);
@@ -128,6 +127,27 @@
   String getSetNullableStatement(String tableName, DBAccessor.DBColumnInfo columnInfo, boolean nullable);
 
   /**
+   * Get's the {@code UPDATE} statement for {@code sourceTable} for copy column from {@code targetTable} by matching
+   * table keys {@code sourceIDColumnName} and {@code targetIDColumnName}
+   *
+   * @param sourceTable
+   *          the source table name
+   * @param sourceColumnName
+   *          the source column name
+   * @param sourceIDColumnName
+   *          source key id column which would be used to math right rows for {@code targetTable}
+   * @param targetTable
+   *          the destination table name
+   * @param targetColumnName
+   *          the destination column name
+   * @param targetIDColumnName
+   *          destination key id column name which should math {@code sourceIDColumnName}
+   * @return
+   */
+  String getCopyColumnToAnotherTableStatement(String sourceTable, String sourceColumnName, String sourceIDColumnName,
+                                              String targetTable, String targetColumnName, String targetIDColumnName);
+
+  /**
    * Gets whether the database platform supports adding contraints after the
    * {@code NULL} constraint. Some database, such as Oracle, don't allow this.
    * Unfortunately, EclipsLink hard codes the order of constraints.
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/GenericDbmsHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/GenericDbmsHelper.java
index f60c138..7e3092d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/GenericDbmsHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/GenericDbmsHelper.java
@@ -78,6 +78,14 @@
     return stringBuilder.toString();
   }
 
+  /**
+   {@inheritDoc}
+   */
+  @Override
+  public String getCopyColumnToAnotherTableStatement(String sourceTable, String sourceColumnName, String sourceIDColumnName, String targetTable, String targetColumnName, String targetIDColumnName) {
+    throw new UnsupportedOperationException("Column copy is not supported for generic DB");
+  }
+
   public StringBuilder writeAlterTableClause(StringBuilder builder, String tableName) {
     builder.append("ALTER TABLE ").append(tableName).append(" ");
     return builder;
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/MySqlHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/MySqlHelper.java
index c693be5..0daea72 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/MySqlHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/MySqlHelper.java
@@ -93,4 +93,16 @@
     }
     return defaultWriter;
   }
+
+  /**
+   {@inheritDoc}
+   */
+  @Override
+  public String getCopyColumnToAnotherTableStatement(String sourceTable, String sourceColumnName,
+         String sourceIDColumnName, String targetTable, String targetColumnName, String targetIDColumnName) {
+
+    return String.format("UPDATE %1$s AS a INNER JOIN %2$s AS b ON a.%5$s = b.%6$s SET a.%3$s = b.%4$s",
+      targetTable, sourceTable, targetColumnName, sourceColumnName, targetIDColumnName, sourceIDColumnName);
+  }
+
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/OracleHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/OracleHelper.java
index b5955b4..73356d1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/OracleHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/OracleHelper.java
@@ -79,4 +79,16 @@
   public boolean isConstraintSupportedAfterNullability() {
     return false;
   }
+
+  /**
+   {@inheritDoc}
+   */
+  @Override
+  public String getCopyColumnToAnotherTableStatement(String sourceTable, String sourceColumnName,
+         String sourceIDColumnName, String targetTable, String targetColumnName, String targetIDColumnName) {
+
+    // sub-query should return only one value, ROWNUM is safe-guard for this
+    return String.format("UPDATE %1$s a SET (a.%3$s) = (SELECT b.%4$s FROM %2$s b WHERE b.%6$s = a.%5$s and ROWNUM < 2)",
+      targetTable, sourceTable, targetColumnName, sourceColumnName, targetIDColumnName, sourceIDColumnName);
+  }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/PostgresHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/PostgresHelper.java
index 2237f86..37c1184 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/PostgresHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/PostgresHelper.java
@@ -44,6 +44,18 @@
     return builder;
   }
 
+  /**
+   {@inheritDoc}
+   */
+  @Override
+  public String getCopyColumnToAnotherTableStatement(String sourceTable, String sourceColumnName,
+         String sourceIDColumnName, String targetTable, String targetColumnName, String targetIDColumnName) {
+
+    return String.format("UPDATE %1$s AS a SET %3$s = b.%4$s FROM %2$s AS b WHERE a.%5$s = b.%6$s",
+      targetTable, sourceTable, targetColumnName, sourceColumnName, targetIDColumnName, sourceIDColumnName);
+  }
+
+
   @Override
   public StringBuilder writeSetNullableString(StringBuilder builder,
       String tableName, DBAccessor.DBColumnInfo columnInfo, boolean nullable) {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/scheduler/ExecutionJob.java b/ambari-server/src/main/java/org/apache/ambari/server/scheduler/ExecutionJob.java
index ff83729..6180c53 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/scheduler/ExecutionJob.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/scheduler/ExecutionJob.java
@@ -23,12 +23,12 @@
  * Type of Quartz Job that can be executed by the @ExecutionScheduleManager
  */
 public interface ExecutionJob extends Job {
-  public static final String NEXT_EXECUTION_JOB_NAME_KEY = "ExecutionJob.Name";
-  public static final String NEXT_EXECUTION_JOB_GROUP_KEY = "ExecutionJob.Group";
-  public static final String NEXT_EXECUTION_SEPARATION_SECONDS =
+  String NEXT_EXECUTION_JOB_NAME_KEY = "ExecutionJob.Name";
+  String NEXT_EXECUTION_JOB_GROUP_KEY = "ExecutionJob.Group";
+  String NEXT_EXECUTION_SEPARATION_SECONDS =
     "ExecutionJob.SeparationMinutes";
-  public static final String LINEAR_EXECUTION_JOB_GROUP =
+  String LINEAR_EXECUTION_JOB_GROUP =
     "LinearExecutionJobs";
-  public static final String LINEAR_EXECUTION_TRIGGER_GROUP =
+  String LINEAR_EXECUTION_TRIGGER_GROUP =
     "LinearExecutionTriggers";
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/scheduler/ExecutionScheduler.java b/ambari-server/src/main/java/org/apache/ambari/server/scheduler/ExecutionScheduler.java
index bb0a6c0..c184f08 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/scheduler/ExecutionScheduler.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/scheduler/ExecutionScheduler.java
@@ -30,27 +30,27 @@
    * Initialize and start the scheduler to accept jobs.
    * @throws AmbariException
    */
-  public void startScheduler(Integer delay) throws AmbariException;
+  void startScheduler(Integer delay) throws AmbariException;
 
   /**
    * Shutdown the scheduler threads and do not accept any more jobs.
    * @throws AmbariException
    */
-  public void stopScheduler() throws AmbariException;
+  void stopScheduler() throws AmbariException;
 
   /**
    * Add a trigger to the execution scheduler
    * @param trigger
    * @throws SchedulerException
    */
-  public void scheduleJob(Trigger trigger) throws SchedulerException;
+  void scheduleJob(Trigger trigger) throws SchedulerException;
 
   /**
    * Persist job data
    * @param job
    * @throws SchedulerException
    */
-  public void addJob(JobDetail job) throws SchedulerException;
+  void addJob(JobDetail job) throws SchedulerException;
 
 
   /**
@@ -58,14 +58,14 @@
    * @param jobKey
    * @throws SchedulerException
    */
-  public void deleteJob(JobKey jobKey) throws SchedulerException;
+  void deleteJob(JobKey jobKey) throws SchedulerException;
 
   /**
    * Get details for a job from scheduler.
    * @param jobKey
    * @return
    */
-  public JobDetail getJobDetail(JobKey jobKey) throws SchedulerException;
+  JobDetail getJobDetail(JobKey jobKey) throws SchedulerException;
 
   /**
    * Get all triggers created for a job.
@@ -73,12 +73,12 @@
    * @return
    * @throws SchedulerException
    */
-  public List<? extends Trigger> getTriggersForJob(JobKey jobKey)
+  List<? extends Trigger> getTriggersForJob(JobKey jobKey)
     throws SchedulerException;
 
   /**
    * Check whether the scheduler is already running.
    * @return
    */
-  public boolean isSchedulerStarted() throws SchedulerException;
+  boolean isSchedulerStarted() throws SchedulerException;
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/security/SecurityHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/security/SecurityHelper.java
index d4fdcf5..cb7413f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/security/SecurityHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/security/SecurityHelper.java
@@ -32,12 +32,12 @@
    *
    * @return the current user.
    */
-  public String getCurrentUserName();
+  String getCurrentUserName();
 
   /**
    * Get the granted authorities for the current user.
    *
    * @return the granted authorities
    */
-  public Collection<? extends GrantedAuthority> getCurrentAuthorities();
+  Collection<? extends GrantedAuthority> getCurrentAuthorities();
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariPamAuthenticationProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariPamAuthenticationProvider.java
index ca7cd31..4c0963d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariPamAuthenticationProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariPamAuthenticationProvider.java
@@ -17,7 +17,6 @@
  */
 package org.apache.ambari.server.security.authorization;
 
-import java.security.Principal;
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.Set;
@@ -40,7 +39,6 @@
 import org.springframework.security.authentication.UsernamePasswordAuthenticationToken;
 import org.springframework.security.core.Authentication;
 import org.springframework.security.core.AuthenticationException;
-import org.springframework.security.core.context.SecurityContextHolder;
 
 import com.google.inject.Inject;
 
@@ -77,6 +75,13 @@
   public Authentication authenticate(Authentication authentication) throws AuthenticationException {
       if(isPamEnabled()){
         PAM pam;
+        String userName = String.valueOf(authentication.getPrincipal());
+        UserEntity existingUser = userDAO.findUserByName(userName);
+        if ((existingUser != null) && (existingUser.getUserType() != UserType.PAM)) {
+          String errorMsg = String.format("%s user exists with the username %s. Cannot authenticate via PAM", existingUser.getUserType(), userName);
+          LOG.error(errorMsg);
+          return null;
+        }
         try{
           //Set PAM configuration file (found under /etc/pam.d)
           String pamConfig = configuration.getPamConfigurationFile();
@@ -124,18 +129,10 @@
               users.getUserAuthorities(userName, UserType.PAM);
 
           final User user = users.getUser(userName, UserType.PAM);
-
-          Principal principal = new Principal() {
-            @Override
-            public String getName() {
-              return user.getUserName();
-            }
-          };
-
-          UsernamePasswordAuthenticationToken token = new UsernamePasswordAuthenticationToken(principal, null, userAuthorities);
-          SecurityContextHolder.getContext().setAuthentication(token);
-          return token;
-
+ 
+          Authentication authToken = new AmbariUserAuthentication(passwd, user, userAuthorities);
+          authToken.setAuthenticated(true);
+          return authToken;   
         } catch (PAMException ex) {
           LOG.error("Unable to sign in. Invalid username/password combination - " + ex.getMessage());
           Throwable t = ex.getCause();
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/LdapServerProperties.java b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/LdapServerProperties.java
index e06d8f2..4435e98 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/LdapServerProperties.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/LdapServerProperties.java
@@ -148,8 +148,8 @@
   /**
    * Returns the LDAP filter to search users by.
    * @param useAlternateUserSearchFilter if true than return LDAP filter that expects user name in
-   *                                  User Principal Name format to filter users constructed from {@value org.apache.ambari.server.configuration.Configuration#LDAP_ALT_USER_SEARCH_FILTER_KEY}.
-   *                                  Otherwise the filter is constructed from {@value org.apache.ambari.server.configuration.Configuration#LDAP_USER_SEARCH_FILTER_KEY}
+   *                                  User Principal Name format to filter users constructed from {@link org.apache.ambari.server.configuration.Configuration#LDAP_ALT_USER_SEARCH_FILTER}.
+   *                                  Otherwise the filter is constructed from {@link org.apache.ambari.server.configuration.Configuration#LDAP_USER_SEARCH_FILTER}
    * @return the LDAP filter string
    */
   public String getUserSearchFilter(boolean useAlternateUserSearchFilter) {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/ResourceType.java b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/ResourceType.java
index 40a10e9..9b94437 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/ResourceType.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/ResourceType.java
@@ -37,7 +37,7 @@
    *
    * @param id the ID value for this ResourceType
    */
-  private ResourceType(int id) {
+  ResourceType(int id) {
     this.id = id;
   }
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/UserName.java b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/UserName.java
new file mode 100644
index 0000000..605183c
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/UserName.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.security.authorization;
+
+import java.util.Arrays;
+
+import org.apache.commons.lang.StringUtils;
+
+/**
+ * Represents an Ambari user name
+ */
+public class UserName {
+  private static final char[] FORBIDDEN_CHARS = {'<', '>', '&', '|', '\\', '`'};
+  private final String userName;
+
+  /**
+   * Creates a UserName from the given string
+   */
+  public static UserName fromString(String userName) {
+    return new UserName(validated(userName));
+  }
+
+  private static String validated(String userName) {
+    if (StringUtils.isBlank(userName)) {
+      throw new IllegalArgumentException("Username cannot be empty");
+    }
+    rejectIfContainsAnyOf(userName, FORBIDDEN_CHARS);
+    return userName;
+  }
+
+  private static void rejectIfContainsAnyOf(String name, char[] forbiddenChars) {
+    for (char each : forbiddenChars) {
+      if (name.contains(Character.toString(each))) {
+        throw new IllegalArgumentException("Invalid username: " + name + " Avoid characters " + Arrays.toString(forbiddenChars));
+      }
+    }
+  }
+
+  private UserName(String userName) {
+    this.userName = userName;
+  }
+
+  @Override
+  public String toString() {
+    return userName;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    UserName userName1 = (UserName) o;
+    return userName.equals(userName1.userName);
+  }
+
+  @Override
+  public int hashCode() {
+    return userName.hashCode();
+  }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
index 4ed777b..9cdde8f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
@@ -321,7 +321,7 @@
     principalDAO.create(principalEntity);
 
     UserEntity userEntity = new UserEntity();
-    userEntity.setUserName(userName);
+    userEntity.setUserName(UserName.fromString(userName));
     if (userType == UserType.LOCAL) {
       //passwords should be stored for local users only
       userEntity.setUserPassword(passwordEncoder.encode(password));
@@ -709,7 +709,7 @@
       principalsToCreate.add(principalEntity);
 
       final UserEntity userEntity = new UserEntity();
-      userEntity.setUserName(userName);
+      userEntity.setUserName(UserName.fromString(userName));
       userEntity.setUserPassword("");
       userEntity.setPrincipal(principalEntity);
       userEntity.setLdapUser(true);
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/security/encryption/MasterKeyService.java b/ambari-server/src/main/java/org/apache/ambari/server/security/encryption/MasterKeyService.java
index 895b8a3..ef3779d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/security/encryption/MasterKeyService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/security/encryption/MasterKeyService.java
@@ -18,6 +18,6 @@
 package org.apache.ambari.server.security.encryption;
 
 public interface MasterKeyService {
-  public char[] getMasterSecret();
-  public boolean isMasterKeyInitialized();
+  char[] getMasterSecret();
+  boolean isMasterKeyInitialized();
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosConfigDataFile.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosConfigDataFile.java
index 82b9225..b2bba22 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosConfigDataFile.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosConfigDataFile.java
@@ -23,13 +23,13 @@
  * for the Kerberos configuration data files.
  */
 public interface KerberosConfigDataFile extends KerberosDataFile {
-  public static final String DATA_FILE_NAME = "configs.dat";
+  String DATA_FILE_NAME = "configs.dat";
 
-  public static final String CONFIGURATION_TYPE = "config";
-  public static final String KEY = "key";
-  public static final String VALUE = "value";
-  public static final String OPERATION = "operation";
+  String CONFIGURATION_TYPE = "config";
+  String KEY = "key";
+  String VALUE = "value";
+  String OPERATION = "operation";
 
-  public static final String OPERATION_TYPE_SET = "SET";
-  public static final String OPERATION_TYPE_REMOVE = "REMOVE";
+  String OPERATION_TYPE_SET = "SET";
+  String OPERATION_TYPE_REMOVE = "REMOVE";
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosIdentityDataFile.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosIdentityDataFile.java
index 3c14627..81e345a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosIdentityDataFile.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosIdentityDataFile.java
@@ -23,19 +23,19 @@
  * for the Kerberos action (metadata) data files.
  */
 public interface KerberosIdentityDataFile extends KerberosDataFile {
-  public static final String DATA_FILE_NAME = "identity.dat";
+  String DATA_FILE_NAME = "identity.dat";
 
-  public static final String HOSTNAME = "hostname";
-  public static final String SERVICE = "service";
-  public static final String COMPONENT = "component";
-  public static final String PRINCIPAL = "principal";
-  public static final String PRINCIPAL_TYPE = "principal_type";
-  public static final String KEYTAB_FILE_PATH = "keytab_file_path";
-  public static final String KEYTAB_FILE_OWNER_NAME = "keytab_file_owner_name";
-  public static final String KEYTAB_FILE_OWNER_ACCESS = "keytab_file_owner_access";
-  public static final String KEYTAB_FILE_GROUP_NAME = "keytab_file_group_name";
-  public static final String KEYTAB_FILE_GROUP_ACCESS = "keytab_file_group_access";
-  public static final String KEYTAB_FILE_IS_CACHABLE = "keytab_file_is_cachable";
+  String HOSTNAME = "hostname";
+  String SERVICE = "service";
+  String COMPONENT = "component";
+  String PRINCIPAL = "principal";
+  String PRINCIPAL_TYPE = "principal_type";
+  String KEYTAB_FILE_PATH = "keytab_file_path";
+  String KEYTAB_FILE_OWNER_NAME = "keytab_file_owner_name";
+  String KEYTAB_FILE_OWNER_ACCESS = "keytab_file_owner_access";
+  String KEYTAB_FILE_GROUP_NAME = "keytab_file_group_name";
+  String KEYTAB_FILE_GROUP_ACCESS = "keytab_file_group_access";
+  String KEYTAB_FILE_IS_CACHABLE = "keytab_file_is_cachable";
 
 
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/AbstractUpgradeServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/AbstractUpgradeServerAction.java
new file mode 100644
index 0000000..8557b93
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/AbstractUpgradeServerAction.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.serveraction.upgrades;
+
+import java.util.Collections;
+import java.util.Set;
+
+import org.apache.ambari.server.controller.internal.UpgradeResourceProvider;
+import org.apache.ambari.server.serveraction.AbstractServerAction;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.UpgradeContext;
+import org.apache.ambari.server.state.stack.upgrade.Direction;
+import org.apache.commons.lang.StringUtils;
+
+import com.google.common.collect.Sets;
+import com.google.inject.Inject;
+
+/**
+ * Abstract class that reads values from command params in a consistent way.
+ */
+public abstract class AbstractUpgradeServerAction extends AbstractServerAction {
+
+  public static final String CLUSTER_NAME_KEY = UpgradeContext.COMMAND_PARAM_CLUSTER_NAME;
+  public static final String UPGRADE_DIRECTION_KEY = UpgradeContext.COMMAND_PARAM_DIRECTION;
+  public static final String VERSION_KEY = UpgradeContext.COMMAND_PARAM_VERSION;
+  protected static final String REQUEST_ID = UpgradeContext.COMMAND_PARAM_REQUEST_ID;
+
+  /**
+   * The original "current" stack of the cluster before the upgrade started.
+   * This is the same regardless of whether the current direction is
+   * {@link Direction#UPGRADE} or {@link Direction#DOWNGRADE}.
+   */
+  protected static final String ORIGINAL_STACK_KEY = UpgradeContext.COMMAND_PARAM_ORIGINAL_STACK;
+
+  /**
+   * The target upgrade stack before the upgrade started. This is the same
+   * regardless of whether the current direction is {@link Direction#UPGRADE} or
+   * {@link Direction#DOWNGRADE}.
+   */
+  protected static final String TARGET_STACK_KEY = UpgradeContext.COMMAND_PARAM_TARGET_STACK;
+
+  protected static final String SUPPORTED_SERVICES_KEY = UpgradeResourceProvider.COMMAND_PARAM_SUPPORTED_SERVICES;
+
+  @Inject
+  protected Clusters m_clusters;
+
+  /**
+   * @return the set of supported services
+   */
+  protected Set<String> getSupportedServices() {
+    String services = getCommandParameterValue(SUPPORTED_SERVICES_KEY);
+    if (StringUtils.isBlank(services)) {
+      return Collections.emptySet();
+    } else {
+      return Sets.newHashSet(StringUtils.split(services, ','));
+    }
+  }
+
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java
index 7705c7d..52c0cf2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java
@@ -53,7 +53,7 @@
     StackId targetStackId = new StackId(commandParams.get(TARGET_STACK_KEY));
     String clusterName = getExecutionCommand().getClusterName();
 
-    Cluster cluster = clusters.getCluster(clusterName);
+    Cluster cluster = m_clusters.getCluster(clusterName);
 
     List<InfoTuple> errors = checkHostComponentVersions(cluster, version, targetStackId);
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
index 086055d..32d6151 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
@@ -49,9 +49,7 @@
 import org.apache.ambari.server.orm.entities.ServiceComponentHistoryEntity;
 import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.orm.entities.UpgradeEntity;
-import org.apache.ambari.server.serveraction.AbstractServerAction;
 import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ComponentInfo;
 import org.apache.ambari.server.state.RepositoryVersionState;
 import org.apache.ambari.server.state.Service;
@@ -59,7 +57,6 @@
 import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.UpgradeState;
-import org.apache.ambari.server.state.stack.upgrade.Direction;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostSummary;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.text.StrBuilder;
@@ -69,35 +66,11 @@
 /**
  * Action that represents finalizing the Upgrade by completing any database changes.
  */
-public class FinalizeUpgradeAction extends AbstractServerAction {
+public class FinalizeUpgradeAction extends AbstractUpgradeServerAction {
 
-  public static final String CLUSTER_NAME_KEY = "cluster_name";
-  public static final String UPGRADE_DIRECTION_KEY = "upgrade_direction";
-  public static final String VERSION_KEY = "version";
-  public static final String REQUEST_ID = "request_id";
   public static final String PREVIOUS_UPGRADE_NOT_COMPLETED_MSG = "It is possible that a previous upgrade was not finalized. " +
       "For this reason, Ambari will not remove any configs. Please ensure that all database records are correct.";
 
-  /**
-   * The original "current" stack of the cluster before the upgrade started.
-   * This is the same regardless of whether the current direction is
-   * {@link Direction#UPGRADE} or {@link Direction#DOWNGRADE}.
-   */
-  public static final String ORIGINAL_STACK_KEY = "original_stack";
-
-  /**
-   * The target upgrade stack before the upgrade started. This is the same
-   * regardless of whether the current direction is {@link Direction#UPGRADE} or
-   * {@link Direction#DOWNGRADE}.
-   */
-  public static final String TARGET_STACK_KEY = "target_stack";
-
-  /**
-   * The Cluster that this ServerAction implementation is executing on
-   */
-  @Inject
-  protected Clusters clusters;
-
   @Inject
   private ClusterVersionDAO clusterVersionDAO;
 
@@ -169,7 +142,7 @@
     try {
       outSB.append(MessageFormat.format("Begin finalizing the upgrade of cluster {0} to version {1}\n", clusterName, version));
 
-      Cluster cluster = clusters.getCluster(clusterName);
+      Cluster cluster = m_clusters.getCluster(clusterName);
       StackId clusterDesiredStackId = cluster.getDesiredStackVersion();
       StackId clusterCurrentStackId = cluster.getCurrentStackVersion();
 
@@ -359,7 +332,7 @@
     StringBuilder err = new StringBuilder();
 
     try {
-      Cluster cluster = clusters.getCluster(clusterName);
+      Cluster cluster = m_clusters.getCluster(clusterName);
       StackId currentClusterStackId = cluster.getCurrentStackVersion();
 
       // Safety check that the cluster's stack (from clusterstate's current_stack_id) is equivalent to the
@@ -484,7 +457,15 @@
 
     ArrayList<InfoTuple> errors = new ArrayList<>();
 
+    Set<String> supportedServices = getSupportedServices();
+
     for (Service service : cluster.getServices().values()) {
+
+      // !!! if there are supported services for upgrade, and the cluster service is NOT in the list, skip
+      if (!supportedServices.isEmpty() && !supportedServices.contains(service.getName())) {
+        continue;
+      }
+
       for (ServiceComponent serviceComponent : service.getServiceComponents().values()) {
         for (ServiceComponentHost serviceComponentHost : serviceComponent.getServiceComponentHosts().values()) {
           ComponentInfo componentInfo = ambariMetaInfo.getComponent(targetStackId.getStackName(),
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixCapacitySchedulerOrderingPolicy.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixCapacitySchedulerOrderingPolicy.java
index fbb88d8..92f6e50 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixCapacitySchedulerOrderingPolicy.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixCapacitySchedulerOrderingPolicy.java
@@ -66,6 +66,11 @@
     Cluster cluster = clusters.getCluster(clusterName);
     Config config = cluster.getDesiredConfigByType(SOURCE_CONFIG_TYPE);
 
+    if (null == config) {
+      return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
+          String.format("The cluster does not have %s defined.", SOURCE_CONFIG_TYPE), "");
+    }
+
     Map<String, String> properties = config.getProperties();
 
     Set<String> parentQueueNames = new HashSet<>();
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredStackAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredStackAction.java
index 134288f..7bcb9d0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredStackAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredStackAction.java
@@ -119,13 +119,18 @@
       LOG.warn(String.format("Did not receive role parameter %s, will save configs using anonymous username %s", ServerAction.ACTION_USER_NAME, userName));
     }
 
-    return updateDesiredStack(clusterName, originalStackId, targetStackId, version, direction, upgradePack, userName);
+    // invalidate any cached effective ID
+    Cluster cluster = clusters.getCluster(clusterName);
+    cluster.invalidateUpgradeEffectiveVersion();
+
+    return updateDesiredStack(cluster, originalStackId, targetStackId, version, direction,
+        upgradePack, userName);
   }
 
   /**
    * Set the cluster's Desired Stack Id during an upgrade.
    *
-   * @param clusterName the name of the cluster the action is meant for
+   * @param cluster the cluster
    * @param originalStackId the stack Id of the cluster before the upgrade.
    * @param targetStackId the stack Id that was desired for this upgrade.
    * @param direction direction, either upgrade or downgrade
@@ -134,14 +139,15 @@
    * @return the command report to return
    */
   private CommandReport updateDesiredStack(
-      String clusterName, StackId originalStackId, StackId targetStackId,
+      Cluster cluster, StackId originalStackId, StackId targetStackId,
       String version, Direction direction, UpgradePack upgradePack, String userName)
       throws AmbariException, InterruptedException {
+
+    String clusterName = cluster.getClusterName();
     StringBuilder out = new StringBuilder();
     StringBuilder err = new StringBuilder();
 
     try {
-      Cluster cluster = clusters.getCluster(clusterName);
       StackId currentClusterStackId = cluster.getCurrentStackVersion();
       out.append(String.format("Params: %s %s %s %s %s %s\n",
           clusterName, originalStackId.getStackId(), targetStackId.getStackId(), version, direction.getText(false), upgradePack.getName()));
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/ConfigurationInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/ConfigurationInfo.java
index f8a19ac..51deeb7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/ConfigurationInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/ConfigurationInfo.java
@@ -153,7 +153,7 @@
    * For example, Hadoop configuration types like 'core-site' and 'hdfs-site'
    * can support the ability to define certain configs as 'final'.
    */
-  public static enum Supports {
+  public enum Supports {
 
     FINAL("supports_final"),
     ADDING_FORBIDDEN("supports_adding_forbidden"),
@@ -164,11 +164,11 @@
     private String defaultValue;
     private String xmlAttributeName;
 
-    private Supports(String xmlAttributeName) {
+    Supports(String xmlAttributeName) {
       this(xmlAttributeName, Boolean.FALSE.toString());
     }
 
-    private Supports(String xmlAttributeName, String defaultValue) {
+    Supports(String xmlAttributeName, String defaultValue) {
       this.defaultValue = defaultValue;
       this.xmlAttributeName = xmlAttributeName;
     }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionModule.java
index ce92ecf..3ad0487 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionModule.java
@@ -394,6 +394,8 @@
       extensionInfo.setParentExtensionVersion(emx.getExtends());
       extensionInfo.setStacks(emx.getStacks());
       extensionInfo.setExtensions(emx.getExtensions());
+      extensionInfo.setActive(emx.getVersion().isActive());
+      extensionInfo.setAutoLink(emx.isAutoLink());
     }
 
     try {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/QuickLinksConfigurationModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/QuickLinksConfigurationModule.java
index c879c02..0f36a9a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/QuickLinksConfigurationModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/QuickLinksConfigurationModule.java
@@ -125,7 +125,7 @@
 
   @Override
   public void addErrors(Collection<String> errors) {
-    errors.addAll(errors);
+    this.errors.addAll(errors);
   }
 
   @Override
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDefinitionModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDefinitionModule.java
index c5ce976..5e095a1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDefinitionModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDefinitionModule.java
@@ -41,33 +41,33 @@
    *
    * @throws AmbariException if resolution fails
    */
-  public void resolve(T parent, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions) throws AmbariException;
+  void resolve(T parent, Map<String, StackModule> allStacks, Map<String, ServiceModule> commonServices, Map<String, ExtensionModule> extensions) throws AmbariException;
 
   /**
    * Obtain the associated module information.
    *
    * @return associated module information
    */
-  public I getModuleInfo();
+  I getModuleInfo();
 
   /**
    * Determine whether the module has been marked for deletion.
    *
    * @return true if the module is marked for deletion; otherwise false
    */
-  public boolean isDeleted();
+  boolean isDeleted();
 
   /**
    * Obtain the id of the module.
    *
    * @return module id
    */
-  public String getId();
+  String getId();
 
   /**
    * Lifecycle even which is called when the associated stack has been fully resolved.
    */
-  public void finalizeModule();
+  void finalizeModule();
 
   /**
    * Module state.
@@ -77,17 +77,17 @@
    *
    * @return the module state
    */
-  public ModuleState getModuleState();
+  ModuleState getModuleState();
   
   /**
    * 
    * @return valid module flag
    */
-  public boolean isValid();
+  boolean isValid();
 
   /**
    * 
    * @param valid set validity flag
    */
-  public void setValid(boolean valid);
+  void setValid(boolean valid);
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
index d0950a7..5b44112 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
@@ -20,6 +20,7 @@
 
 import java.io.File;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
@@ -437,6 +438,9 @@
     if (configUpgradePack != null) {
       this.configUpgradePack = configUpgradePack;
     } else {
+      ConfigUpgradePack emptyConfigUpgradePack = new ConfigUpgradePack();
+      emptyConfigUpgradePack.services = new ArrayList<>();
+      this.configUpgradePack = emptyConfigUpgradePack;
       LOG.info("Stack '{}' doesn't contain config upgrade pack file", getPath());
     }
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
index 20e28fc..d19aeba 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
@@ -19,11 +19,13 @@
 package org.apache.ambari.server.stack;
 
 import java.io.File;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import javax.annotation.Nullable;
 import javax.xml.XMLConstants;
@@ -35,6 +37,7 @@
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.AmbariManagementHelper;
 import org.apache.ambari.server.metadata.ActionMetadata;
 import org.apache.ambari.server.orm.dao.ExtensionDAO;
 import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
@@ -56,7 +59,6 @@
 import com.google.inject.assistedinject.Assisted;
 import com.google.inject.assistedinject.AssistedInject;
 
-
 /**
  * Manages all stack related behavior including parsing of stacks and providing access to
  * stack information.
@@ -109,6 +111,8 @@
    */
   private Map<String, ExtensionInfo> extensionMap = new HashMap<>();
 
+  private AmbariManagementHelper helper;
+
   /**
    * Constructor. Initialize stack manager.
    *
@@ -130,6 +134,8 @@
    *          extension DAO automatically injected
    * @param linkDao
    *          extension link DAO automatically injected
+   * @param helper
+   *          Ambari management helper automatically injected
    *
    * @throws AmbariException
    *           if an exception occurs while processing the stacks
@@ -140,7 +146,7 @@
       @Assisted("extensionRoot") @Nullable File extensionRoot,
       @Assisted OsFamily osFamily, @Assisted boolean validate,
       MetainfoDAO metaInfoDAO, ActionMetadata actionMetadata, StackDAO stackDao,
-      ExtensionDAO extensionDao, ExtensionLinkDAO linkDao)
+      ExtensionDAO extensionDao, ExtensionLinkDAO linkDao, AmbariManagementHelper helper)
       throws AmbariException {
 
     LOG.info("Initializing the stack manager...");
@@ -154,6 +160,7 @@
     stackMap = new HashMap<>();
     stackContext = new StackContext(metaInfoDAO, actionMetadata, osFamily);
     extensionMap = new HashMap<>();
+    this.helper = helper;
 
     parseDirectories(stackRoot, commonServicesRoot, extensionRoot);
 
@@ -188,6 +195,7 @@
     LOG.info("About to parse extension directories");
     extensionModules = parseExtensionDirectory(extensionRoot);
   }
+
   private void populateDB(StackDAO stackDao, ExtensionDAO extensionDao) throws AmbariException {
     // for every stack read in, ensure that we have a database entry for it;
     // don't put try/catch logic around this since a failure here will
@@ -226,6 +234,51 @@
         extensionDao.create(extensionEntity);
       }
     }
+
+    createLinks();
+  }
+
+  /**
+   * Attempts to automatically create links between extension versions and stack versions.
+   * This is limited to 'active' extensions that have the 'autolink' attribute set (in the metainfo.xml).
+   * Stack versions are selected based on the minimum stack versions that the extension supports.
+   * The extension and stack versions are processed in order of most recent to oldest.
+   * In this manner, the newest extension version will be autolinked before older extension versions.
+   * If a different version of the same extension is already linked to a stack version then that stack version
+   * will be skipped.
+   */
+  private void createLinks() {
+    LOG.info("Creating links");
+    Collection<ExtensionInfo> extensions = getExtensions();
+    Set<String> names = new HashSet<String>();
+    for(ExtensionInfo extension : extensions){
+      names.add(extension.getName());
+    }
+    for(String name : names) {
+      createLinksForExtension(name);
+    }
+  }
+
+  /**
+   * Attempts to automatically create links between versions of a particular extension and stack versions they support.
+   * This is limited to 'active' extensions that have the 'autolink' attribute set (in the metainfo.xml).
+   * Stack versions are selected based on the minimum stack versions that the extension supports.
+   * The extension and stack versions are processed in order of most recent to oldest.
+   * In this manner, the newest extension version will be autolinked before older extension versions.
+   * If a different version of the same extension is already linked to a stack version then that stack version
+   * will be skipped.
+   */
+  private void createLinksForExtension(String name) {
+    Collection<ExtensionInfo> collection = getExtensions(name);
+    List<ExtensionInfo> extensions = new ArrayList<ExtensionInfo>(collection.size());
+    extensions.addAll(collection);
+    try {
+      helper.createExtensionLinks(this, extensions);
+    }
+    catch (AmbariException e) {
+      String msg = String.format("Failed to create link for extension: %s with exception: %s", name, e.getMessage());
+      LOG.error(msg);
+    }
   }
 
   /**
@@ -258,6 +311,24 @@
   }
 
   /**
+   * Obtain all a map of all stacks by name.
+   *
+   * @return A map of all stacks with the name as the key.
+   */
+  public Map<String, List<StackInfo>> getStacksByName() {
+    Map<String, List<StackInfo>> stacks = new HashMap<String, List<StackInfo>>();
+    for (StackInfo stack: stackMap.values()) {
+      List<StackInfo> list = stacks.get(stack.getName());
+      if (list == null) {
+        list = new ArrayList<StackInfo>();
+        stacks.put(stack.getName(),  list);
+      }
+      list.add(stack);
+    }
+    return stacks;
+  }
+
+  /**
    * Obtain all stacks.
    *
    * @return collection of all stacks
@@ -459,8 +530,6 @@
     }
   }
 
-
-
   /**
    * Validate that the specified extension root is a valid directory.
    *
@@ -567,9 +636,11 @@
   }
 
   public void linkStackToExtension(StackInfo stack, ExtensionInfo extension) throws AmbariException {
+    stack.addExtension(extension);
   }
 
   public void unlinkStackAndExtension(StackInfo stack, ExtensionInfo extension) throws AmbariException {
+    stack.removeExtension(extension);
   }
 
   /**
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
index dfe7072..672b72f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
@@ -408,9 +408,7 @@
 
   private void addExtensionServices() throws AmbariException {
     for (ExtensionModule extension : extensionModules.values()) {
-      stackInfo.getExtensions().add(extension.getModuleInfo());
-      Collection<ServiceModule> services = extension.getServiceModules().values();
-      addServices(services);
+      stackInfo.addExtension(extension.getModuleInfo());
     }
   }
 
@@ -1046,7 +1044,7 @@
    * Finds an upgrade pack that:
    * <ul>
    *   <li>Is found in the $SERVICENAME/upgrades/$STACKNAME folder</li>
-   *   <li>Matches the same {@link UpgradeType#getType()}as the {@code base} upgrade pack</li>
+   *   <li>Matches the same {@link UpgradeType} as the {@code base} upgrade pack</li>
    *   <li>Has the {@link UpgradePack#getTarget()} value equals to "*"</li>
    *   <li>Has the {@link UpgradePack#getTargetStack()} value equals to "*"</li>
    * </ul>
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/ThemeModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/ThemeModule.java
index 72d3bd3..d0a5f34 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/ThemeModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/ThemeModule.java
@@ -125,7 +125,7 @@
 
   @Override
   public void addErrors(Collection<String> errors) {
-    errors.addAll(errors);
+    this.errors.addAll(errors);
   }
 
   @Override
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/Validable.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/Validable.java
index 0a557b4..0f9b367 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/Validable.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/Validable.java
@@ -30,19 +30,19 @@
    * 
    * @return valid xml flag
    */
-  public boolean isValid();
+  boolean isValid();
 
   /**
    * 
    * @param valid set validity flag
    */
-  public void setValid(boolean valid);
+  void setValid(boolean valid);
   
-  public void addError(String error);
+  void addError(String error);
   
-  public void addErrors(Collection<String> errors);
+  void addErrors(Collection<String> errors);
   
-  public Collection<String> getErrors();
+  Collection<String> getErrors();
   
   
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stageplanner/RoleGraph.java b/ambari-server/src/main/java/org/apache/ambari/server/stageplanner/RoleGraph.java
index b54c7c7..b6b756b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stageplanner/RoleGraph.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stageplanner/RoleGraph.java
@@ -283,7 +283,7 @@
     Stage newStage = stageFactory.createNew(origStage.getRequestId(),
         origStage.getLogDir(), origStage.getClusterName(),
         origStage.getClusterId(),
-        origStage.getRequestContext(), origStage.getClusterHostInfo(),
+        origStage.getRequestContext(),
         origStage.getCommandParamsStage(), origStage.getHostParamsStage());
     newStage.setSuccessFactors(origStage.getSuccessFactors());
     newStage.setSkippable(origStage.isSkippable());
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/AlertState.java b/ambari-server/src/main/java/org/apache/ambari/server/state/AlertState.java
index 8e04eb1..3834472 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/AlertState.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/AlertState.java
@@ -17,6 +17,8 @@
  */
 package org.apache.ambari.server.state;
 
+import java.util.EnumSet;
+
 /**
  * Represents the state of an alert.
  */
@@ -48,6 +50,8 @@
    */
   SKIPPED(4);
 
+  public static EnumSet<AlertState> RECALCULATE_AGGREGATE_ALERT_STATES = EnumSet.of(CRITICAL, WARNING);
+
   private final int intValue;
 
   public int getIntValue() {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
index 9594803..1ef204d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
@@ -36,6 +36,7 @@
 import org.apache.ambari.server.orm.entities.UpgradeEntity;
 import org.apache.ambari.server.security.authorization.AuthorizationException;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
+import org.apache.ambari.server.state.repository.VersionDefinitionXml;
 import org.apache.ambari.server.state.scheduler.RequestExecution;
 
 import com.google.common.collect.ListMultimap;
@@ -200,7 +201,7 @@
    * @param stackId the stack id
    * @param cascade {@code true} to cascade the desired version
    */
-  public void setDesiredStackVersion(StackId stackId, boolean cascade) throws AmbariException;
+  void setDesiredStackVersion(StackId stackId, boolean cascade) throws AmbariException;
 
 
   /**
@@ -229,25 +230,40 @@
   /**
    * Creates or updates host versions for all of the hosts within a cluster
    * based on state of cluster stack version. This is used to transition all
-   * hosts into the specified state.
+   * hosts into the correct state (which may not be
+   * {@link RepositoryVersionState#INSTALLING}).
    * <p/>
    * The difference between this method compared to
    * {@link Cluster#mapHostVersions} is that it affects all hosts (not only
    * missing hosts).
    * <p/>
    * Hosts that are in maintenance mode will be transititioned directly into
-   * {@link RepositoryVersionState#OUT_OF_SYNC} instead.
+   * {@link RepositoryVersionState#OUT_OF_SYNC} instead. Hosts which do not need
+   * the version distributed to them will move into the
+   * {@link RepositoryVersionState#NOT_REQUIRED} state.
    *
    * @param sourceClusterVersion
    *          cluster version to be queried for a stack name/version info and
    *          desired RepositoryVersionState. The only valid state of a cluster
    *          version is {@link RepositoryVersionState#INSTALLING}
-   * @param state
-   *          the state to transition the cluster's hosts to.
+   * @param repoVersionEntity
+   *          the repository that the hosts are being transitioned for (not
+   *          {@code null}).
+   * @param versionDefinitionXml
+   *          the VDF, or {@code null} if none.
+   * @param forceInstalled
+   *          if {@code true}, then this will transition everything directly to
+   *          {@link RepositoryVersionState#INSTALLED} instead of
+   *          {@link RepositoryVersionState#INSTALLING}. Hosts which should
+   *          received other states (like
+   *          {@link RepositoryVersionState#NOT_REQUIRED} will continue to
+   *          receive those states.
+   * @return a list of hosts which need the repository installed.
    * @throws AmbariException
    */
-  void transitionHosts(ClusterVersionEntity sourceClusterVersion, RepositoryVersionState state)
-      throws AmbariException;
+  List<Host> transitionHostsToInstalling(ClusterVersionEntity sourceClusterVersion,
+      RepositoryVersionEntity repoVersionEntity, VersionDefinitionXml versionDefinitionXml,
+      boolean forceInstalled) throws AmbariException;
 
   /**
    * For a given host, will either either update an existing Host Version Entity for the given version, or create
@@ -280,8 +296,9 @@
    * Create a cluster version for the given stack and version, whose initial
    * state must either be either {@link RepositoryVersionState#UPGRADING} (if no
    * other cluster version exists) or {@link RepositoryVersionState#INSTALLING}
-   * (if at exactly one CURRENT cluster version already exists) or {@link RepositoryVersionState#INIT}
-   * (if the cluster is being created using a specific repository version).
+   * (if at exactly one CURRENT cluster version already exists) or
+   * {@link RepositoryVersionState#INIT} (if the cluster is being created using
+   * a specific repository version).
    *
    * @param stackId
    *          Stack ID
@@ -291,9 +308,10 @@
    *          User performing the operation
    * @param state
    *          Initial state
+   * @return the newly created and persisted {@link ClusterVersionEntity}.
    * @throws AmbariException
    */
-  void createClusterVersion(StackId stackId, String version,
+  ClusterVersionEntity createClusterVersion(StackId stackId, String version,
       String userName, RepositoryVersionState state) throws AmbariException;
 
   /**
@@ -437,7 +455,7 @@
    * @param serviceName service name
    * @return
    */
-  public List<ServiceConfigVersionResponse> getActiveServiceConfigVersionResponse(String serviceName);
+  List<ServiceConfigVersionResponse> getActiveServiceConfigVersionResponse(String serviceName);
 
   /**
    * Get service config version history
@@ -675,10 +693,10 @@
    * Gets an {@link UpgradeEntity} if there is an upgrade in progress or an
    * upgrade that has been suspended. This will return the associated
    * {@link UpgradeEntity} if it exists.
-   * 
+   *
    * @return an upgrade which will either be in progress or suspended, or
    *         {@code null} if none.
-   * 
+   *
    */
   UpgradeEntity getUpgradeInProgress();
 
@@ -695,8 +713,7 @@
    * Gets whether there is an upgrade which has been suspended and not yet
    * finalized.
    *
-   * @return {@code true} if the last upgrade is in the
-   *         {@link UpgradeState#SUSPENDED}.
+   * @return {@code true} if the last upgrade is suspended
    */
   boolean isUpgradeSuspended();
 
@@ -754,4 +771,11 @@
    */
   void addSuspendedUpgradeParameters(Map<String, String> commandParams,
       Map<String, String> roleParams);
+
+  /**
+   * Invalidates any cached effective cluster versions for upgrades.
+   *
+   * @see #getEffectiveClusterVersion()
+   */
+  void invalidateUpgradeEffectiveVersion();
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/CommandScriptDefinition.java b/ambari-server/src/main/java/org/apache/ambari/server/state/CommandScriptDefinition.java
index cee55fa..ed565d0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/CommandScriptDefinition.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/CommandScriptDefinition.java
@@ -57,7 +57,7 @@
     return timeout;
   }
 
-  public static enum Type {
+  public enum Type {
     PYTHON
   }
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java
index 67570f4..4346cf5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java
@@ -33,53 +33,53 @@
   /**
    * @return Config Type
    */
-  public String getType();
+  String getType();
 
   /**
    * @return Version Tag this config instance is mapped to
    */
-  public String getTag();
+  String getTag();
 
   /**
    * Gets the stack that this configuration belongs to.
    *
    * @return the stack (not {@code null).
    */
-  public StackId getStackId();
+  StackId getStackId();
 
   /**
    *
    * @return version of config by type
    */
-  public Long getVersion();
+  Long getVersion();
 
   /**
    * @return Properties that define this config instance
    */
-  public Map<String, String> getProperties();
+  Map<String, String> getProperties();
 
   /**
    * @return Map of attributes in this config-type to value per property
    */
-  public Map<String, Map<String, String>> getPropertiesAttributes();
+  Map<String, Map<String, String>> getPropertiesAttributes();
 
   /**
    * Replace properties with new provided set
    * @param properties Property Map to replace existing one
    */
-  public void setProperties(Map<String, String> properties);
+  void setProperties(Map<String, String> properties);
 
   /**
    * Replace property attributes with new provided set
    * @param propertiesAttributes Property Attributes Map to replace existing one
    */
-  public void setPropertiesAttributes(Map<String, Map<String, String>> propertiesAttributes);
+  void setPropertiesAttributes(Map<String, Map<String, String>> propertiesAttributes);
 
   /**
    * Update provided properties' values.
    * @param properties Property Map with updated values
    */
-  public void updateProperties(Map<String, String> properties);
+  void updateProperties(Map<String, String> properties);
 
   /**
    * Ger service config versions containing this config
@@ -91,10 +91,10 @@
    * Delete certain properties
    * @param properties Property keys to be deleted
    */
-  public void deleteProperties(List<String> properties);
+  void deleteProperties(List<String> properties);
 
   /**
    * Persist the configuration.
    */
-  public void save();
+  void save();
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
index 543dadd..0e40254 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
@@ -52,7 +52,7 @@
   private final static Logger LOG = LoggerFactory.getLogger(ConfigImpl.class);
 
   /**
-   * A label for {@link #hostLock} to use with the {@link LockFactory}.
+   * A label for {@link #propertyLock} to use with the {@link LockFactory}.
    */
   private static final String PROPERTY_LOCK_LABEL = "configurationPropertyLock";
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/CustomCommandDefinition.java b/ambari-server/src/main/java/org/apache/ambari/server/state/CustomCommandDefinition.java
index 280a59b..ac16f87 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/CustomCommandDefinition.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/CustomCommandDefinition.java
@@ -30,6 +30,7 @@
 public class CustomCommandDefinition {
 
   private String name;
+  private String opsDisplayName;
   private CommandScriptDefinition commandScript;
   private boolean background = false;
 
@@ -41,6 +42,10 @@
     return background;
   }
 
+  public String getOpsDisplayName() {
+    return opsDisplayName;
+  }
+
   public CommandScriptDefinition getCommandScript() {
     return commandScript;
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ExtensionInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ExtensionInfo.java
index 656a8ce..58643cb 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ExtensionInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ExtensionInfo.java
@@ -29,6 +29,7 @@
 import org.apache.ambari.server.controller.ExtensionVersionResponse;
 import org.apache.ambari.server.stack.Validable;
 import org.apache.ambari.server.state.stack.ExtensionMetainfoXml;
+import org.apache.ambari.server.utils.VersionUtils;
 
 /**
  * An extension version is like a stack version but it contains custom services.  Linking an extension
@@ -44,6 +45,8 @@
   private List<ExtensionMetainfoXml.Stack> stacks;
   private List<ExtensionMetainfoXml.Extension> extensions;
   private boolean valid = true;
+  private boolean autoLink = false;
+  private boolean active = false;
 
   /**
    *
@@ -184,9 +187,10 @@
 
   @Override
   public int compareTo(ExtensionInfo o) {
-    String myId = name + "-" + version;
-    String oId = o.name + "-" + o.version;
-    return myId.compareTo(oId);
+    if (name.equals(o.name)) {
+      return VersionUtils.compareVersions(version, o.version);
+    }
+    return name.compareTo(o.name);
   }
 
   public List<ExtensionMetainfoXml.Stack> getStacks() {
@@ -204,4 +208,20 @@
   public void setExtensions(List<ExtensionMetainfoXml.Extension> extensions) {
     this.extensions = extensions;
   }
+
+  public boolean isAutoLink() {
+    return autoLink;
+  }
+
+  public void setAutoLink(boolean autoLink) {
+    this.autoLink = autoLink;
+  }
+
+  public boolean isActive() {
+    return active;
+  }
+
+  public void setActive(boolean active) {
+    this.active = active;
+  }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/HostHealthStatus.java b/ambari-server/src/main/java/org/apache/ambari/server/state/HostHealthStatus.java
index fb8fe96..0893548 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/HostHealthStatus.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/HostHealthStatus.java
@@ -46,7 +46,7 @@
     return healthReport;
   }
 
-  public static enum HealthStatus {
+  public enum HealthStatus {
     UNKNOWN,      // lost heartbeat
     HEALTHY,      // all masters and slaves are live
     UNHEALTHY,    // at least one master is dead
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
index e93ab9a..b5b6821 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
@@ -97,4 +97,19 @@
       String hostName) throws AmbariException;
 
   void delete() throws AmbariException;
+
+  /**
+   * This method computes the state of the repository that's associated with the desired
+   * version.  It is used, for example, when a host component reports its version and the
+   * state can be in flux.
+   *
+   * @param reportedVersion
+   * @throws AmbariException
+   */
+  void updateRepositoryState(String reportedVersion) throws AmbariException;
+
+  /**
+   * @return the repository state for the desired version
+   */
+  RepositoryVersionState getRepositoryState();
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
index f6ddc6d..4cfb250 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
@@ -19,6 +19,7 @@
 package org.apache.ambari.server.state;
 
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.concurrent.ConcurrentHashMap;
@@ -32,21 +33,31 @@
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.controller.ServiceComponentResponse;
 import org.apache.ambari.server.events.ServiceComponentRecoveryChangedEvent;
+import org.apache.ambari.server.events.listeners.upgrade.StackVersionListener;
 import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
 import org.apache.ambari.server.orm.dao.ClusterServiceDAO;
 import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
+import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
 import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO;
 import org.apache.ambari.server.orm.dao.StackDAO;
 import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
 import org.apache.ambari.server.orm.entities.ClusterServiceEntityPK;
 import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
 import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
+import org.apache.ambari.server.orm.entities.ServiceComponentVersionEntity;
 import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.state.cluster.ClusterImpl;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.collections.MapUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.base.Function;
+import com.google.common.collect.Maps;
+import com.google.inject.Inject;
 import com.google.inject.ProvisionException;
 import com.google.inject.assistedinject.Assisted;
 import com.google.inject.assistedinject.AssistedInject;
@@ -86,6 +97,12 @@
    */
   private final StackDAO stackDAO;
 
+  @Inject
+  private RepositoryVersionDAO repoVersionDAO;
+
+  @Inject
+  private HostComponentStateDAO hostComponentDAO;
+
   @AssistedInject
   public ServiceComponentImpl(@Assisted Service service, @Assisted String componentName,
       AmbariMetaInfo ambariMetaInfo,
@@ -121,6 +138,7 @@
     desiredStateEntityId = desiredStateEntity.getId();
   }
 
+  @Override
   public void updateComponentInfo() throws AmbariException {
     StackId stackId = service.getDesiredStackVersion();
     try {
@@ -427,7 +445,8 @@
     ServiceComponentResponse r = new ServiceComponentResponse(getClusterId(),
         cluster.getClusterName(), service.getName(), getName(),
         getDesiredStackVersion().getStackId(), getDesiredState().toString(),
-        getServiceComponentStateCount(), isRecoveryEnabled(), displayName);
+        getServiceComponentStateCount(), isRecoveryEnabled(), displayName,
+        getDesiredVersion(), getRepositoryState());
     return r;
   }
 
@@ -575,6 +594,151 @@
   }
 
 
+  /**
+   * Follows this version logic:
+   * <table border="1">
+   *   <tr>
+   *     <th>DB hostcomponent1</th>
+   *     <th>DB hostcomponentN</th>
+   *     <th>DB desired</th>
+   *     <th>New desired</th>
+   *     <th>Repo State</th>
+   *   </tr>
+   *   <tr>
+   *     <td>v1</td>
+   *     <td>v1</td>
+   *     <td>UNKNOWN</td>
+   *     <td>v1</td>
+   *     <td>CURRENT</td>
+   *   </tr>
+   *   <tr>
+   *     <td>v1</td>
+   *     <td>v2</td>
+   *     <td>UNKNOWN</td>
+   *     <td>UNKNOWN</td>
+   *     <td>OUT_OF_SYNC</td>
+   *   </tr>
+   *   <tr>
+   *     <td>v1</td>
+   *     <td>v2</td>
+   *     <td>v2</td>
+   *     <td>v2 (no change)</td>
+   *     <td>OUT_OF_SYNC</td>
+   *   </tr>
+   *   <tr>
+   *     <td>v2</td>
+   *     <td>v2</td>
+   *     <td>v1</td>
+   *     <td>v1 (no change)</td>
+   *     <td>OUT_OF_SYNC</td>
+   *   </tr>
+   *   <tr>
+   *     <td>v2</td>
+   *     <td>v2</td>
+   *     <td>v2</td>
+   *     <td>v2 (no change)</td>
+   *     <td>CURRENT</td>
+   *   </tr>
+   * </table>
+   */
+  @Override
+  @Transactional
+  public void updateRepositoryState(String reportedVersion) throws AmbariException {
+
+    ServiceComponentDesiredStateEntity component = serviceComponentDesiredStateDAO.findById(
+        desiredStateEntityId);
+
+    List<ServiceComponentVersionEntity> componentVersions = serviceComponentDesiredStateDAO.findVersions(
+        getClusterId(), getServiceName(), getName());
+
+    // per component, this list should be small, so iterating here isn't a big deal
+    Map<String, ServiceComponentVersionEntity> map = new HashMap<>(Maps.uniqueIndex(componentVersions,
+        new Function<ServiceComponentVersionEntity, String>() {
+          @Override
+          public String apply(ServiceComponentVersionEntity input) {
+            return input.getRepositoryVersion().getVersion();
+          }
+      }));
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Existing versions for {}/{}/{}: {}",
+          getClusterName(), getServiceName(), getName(), map.keySet());
+    }
+
+    ServiceComponentVersionEntity componentVersion = map.get(reportedVersion);
+
+    if (null == componentVersion) {
+      RepositoryVersionEntity repoVersion = repoVersionDAO.findByStackAndVersion(
+          getDesiredStackVersion(), reportedVersion);
+
+      if (null != repoVersion) {
+        componentVersion = new ServiceComponentVersionEntity();
+        componentVersion.setRepositoryVersion(repoVersion);
+        componentVersion.setState(RepositoryVersionState.INSTALLED);
+        componentVersion.setUserName("auto-reported");
+
+        // since we've never seen this version before, mark the component as CURRENT
+        component.setRepositoryState(RepositoryVersionState.CURRENT);
+        component.addVersion(componentVersion);
+
+        component = serviceComponentDesiredStateDAO.merge(component);
+
+        map.put(reportedVersion, componentVersion);
+
+      } else {
+        LOG.warn("There is no repository available for stack {}, version {}",
+            getDesiredStackVersion(), reportedVersion);
+      }
+    }
+
+    if (MapUtils.isNotEmpty(map)) {
+      String desiredVersion = component.getDesiredVersion();
+
+      List<HostComponentStateEntity> hostComponents = hostComponentDAO.findByServiceAndComponentAndNotVersion(
+          component.getServiceName(), component.getComponentName(), reportedVersion);
+
+      LOG.debug("{}/{} reportedVersion={}, desiredVersion={}, non-matching desired count={}, repo_state={}",
+          component.getServiceName(), component.getComponentName(), reportedVersion,
+          desiredVersion, hostComponents.size(), component.getRepositoryState());
+
+      // !!! if we are unknown, that means it's never been set.  Try to determine it.
+      if (StackVersionListener.UNKNOWN_VERSION.equals(desiredVersion)) {
+        if (CollectionUtils.isEmpty(hostComponents)) {
+          // all host components are the same version as reported
+          component.setDesiredVersion(reportedVersion);
+          component.setRepositoryState(RepositoryVersionState.CURRENT);
+        } else {
+          // desired is UNKNOWN and there's a mix of versions in the host components
+          component.setRepositoryState(RepositoryVersionState.OUT_OF_SYNC);
+        }
+      } else {
+        if (!reportedVersion.equals(desiredVersion)) {
+          component.setRepositoryState(RepositoryVersionState.OUT_OF_SYNC);
+        } else if (CollectionUtils.isEmpty(hostComponents)) {
+          component.setRepositoryState(RepositoryVersionState.CURRENT);
+        }
+      }
+
+      component = serviceComponentDesiredStateDAO.merge(component);
+    }
+  }
+
+  @Override
+  public RepositoryVersionState getRepositoryState() {
+    ServiceComponentDesiredStateEntity component = serviceComponentDesiredStateDAO.findById(
+        desiredStateEntityId);
+
+    if (null != component) {
+      return component.getRepositoryState();
+    } else {
+      LOG.warn("Cannot retrieve repository state on component that may have been deleted: service {}, component {}",
+          service != null ? service.getName() : null, componentName);
+
+      return null;
+    }
+  }
+
+
   private int getSCHCountByState(State state) {
     int count = 0;
     for (ServiceComponentHost sch : hostComponents.values()) {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
index a0c0db1..61f44cc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
@@ -492,12 +492,12 @@
   void deleteAllServiceConfigs() throws AmbariException {
     long clusterId = getClusterId();
     ServiceConfigEntity lastServiceConfigEntity = serviceConfigDAO.findMaxVersion(clusterId, getName());
-
     // de-select every configuration from the service
     if (lastServiceConfigEntity != null) {
       for (ClusterConfigEntity serviceConfigEntity : lastServiceConfigEntity.getClusterConfigEntities()) {
         LOG.info("Disabling configuration {}", serviceConfigEntity);
         serviceConfigEntity.setSelected(false);
+        serviceConfigEntity.setServiceDeleted(true);
         clusterDAO.merge(serviceConfigEntity);
       }
     }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
index 24c2d94..6c58585 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
@@ -36,6 +36,7 @@
 import org.apache.ambari.server.state.stack.RepositoryXml;
 import org.apache.ambari.server.state.stack.StackRoleCommandOrder;
 import org.apache.ambari.server.state.stack.UpgradePack;
+import org.apache.ambari.server.utils.VersionUtils;
 
 import com.google.common.collect.ListMultimap;
 import com.google.common.collect.Multimaps;
@@ -202,6 +203,7 @@
   public ExtensionInfo getExtensionByService(String serviceName) {
     Collection<ExtensionInfo> extensions = getExtensions();
     for (ExtensionInfo extension : extensions) {
+      Collection<ServiceInfo> services = extension.getServices();
       for (ServiceInfo service : services) {
         if (service.getName().equals(serviceName))
           return extension;
@@ -211,6 +213,24 @@
     return null;
   }
 
+  public void addExtension(ExtensionInfo extension) {
+    Collection<ExtensionInfo> extensions = getExtensions();
+    extensions.add(extension);
+    Collection<ServiceInfo> services = getServices();
+    for (ServiceInfo service : extension.getServices()) {
+      services.add(service);
+    }
+  }
+
+  public void removeExtension(ExtensionInfo extension) {
+    Collection<ExtensionInfo> extensions = getExtensions();
+    extensions.remove(extension);
+    Collection<ServiceInfo> services = getServices();
+    for (ServiceInfo service : extension.getServices()) {
+      services.remove(service);
+    }
+  }
+
   public List<PropertyInfo> getProperties() {
     if (properties == null) properties = new ArrayList<>();
     return properties;
@@ -459,9 +479,10 @@
 
   @Override
   public int compareTo(StackInfo o) {
-    String myId = name + "-" + version;
-    String oId = o.name + "-" + o.version;
-    return myId.compareTo(oId);
+    if (name.equals(o.name)) {
+      return VersionUtils.compareVersions(version, o.version);
+    }
+    return name.compareTo(o.name);
   }
 
   //todo: ensure that required properties are never modified...
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
index 71fb5d9..97f5003 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
@@ -575,6 +575,15 @@
   }
 
   /**
+   * @return the set of supported services, or an empty set if ALL services
+   * are supported
+   */
+  @Experimental(feature=ExperimentalFeature.PATCH_UPGRADES)
+  public Set<String> getSupportedServices() {
+    return Collections.unmodifiableSet(m_supported);
+  }
+
+  /**
    * Gets if a service is supported.  If there are no services marked for the context,
    * then ALL services are supported
    * @param serviceName the service name to check.
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
index 5a8c25e..92e01c2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
@@ -98,7 +98,7 @@
   /**
    * Enum used to define placeholder text for replacement
    */
-  private static enum Placeholder {
+  private enum Placeholder {
     /**
      * No placeholder defined
      */
@@ -154,7 +154,7 @@
     DIRECTION_VERB_PROPER("direction.verb.proper");
 
     private String pattern;
-    private Placeholder(String key) {
+    Placeholder(String key) {
       pattern = "{{" + key + "}}";
     }
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ValueAttributesInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ValueAttributesInfo.java
index f5387a0..4137115 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ValueAttributesInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ValueAttributesInfo.java
@@ -345,7 +345,7 @@
       return false;
     if (overridable != null ? !overridable.equals(that.overridable) : that.overridable != null)
       return false;
-    if (hidden != null ? !hidden.equals(that.overridable) : that.hidden != null)
+    if (hidden != null ? !hidden.equals(that.hidden) : that.hidden != null)
       return false;
     if (showPropertyName != null ? !showPropertyName.equals(that.showPropertyName) : that.showPropertyName != null)
       return false;
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/action/Action.java b/ambari-server/src/main/java/org/apache/ambari/server/state/action/Action.java
index 6d00813..e79d0df 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/action/Action.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/action/Action.java
@@ -26,7 +26,7 @@
    * Get the Action ID for the action
    * @return ActionId
    */
-  public ActionId getId();
+  ActionId getId();
 
   // TODO requires some form of ActionType to ensure only one running
   // action per action type
@@ -38,38 +38,38 @@
    * Get Start Time of the action
    * @return Start time as a unix timestamp
    */
-  public long getStartTime();
+  long getStartTime();
 
   /**
    * Get the last update time of the Action when its progress status
    * was updated
    * @return Last Update Time as a unix timestamp
    */
-  public long getLastUpdateTime();
+  long getLastUpdateTime();
 
   /**
    * Time when the Action completed
    * @return Completion Time as a unix timestamp
    */
-  public long getCompletionTime();
+  long getCompletionTime();
 
   /**
    * Get the current state of the Action
    * @return ActionState
    */
-  public ActionState getState();
+  ActionState getState();
 
   /**
    * Set the State of the Action
    * @param state ActionState
    */
-  public void setState(ActionState state);
+  void setState(ActionState state);
 
   /**
    * Send a ActionEvent to the Action's StateMachine
    * @param event ActionEvent
    * @throws InvalidStateTransitionException
    */
-  public void handleEvent(ActionEvent event)
+  void handleEvent(ActionEvent event)
       throws InvalidStateTransitionException;
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionHash.java b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionHash.java
index 616bcdb..5896819 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionHash.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionHash.java
@@ -447,8 +447,6 @@
    *
    * @param clusterId
    *          the ID of the cluster.
-   * @param hosts
-   *          the hosts to push {@link AlertDefinitionCommand}s for.
    */
   public void enqueueAgentCommands(long clusterId) {
     String clusterName = null;
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index b7cc4cd..228cf79 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -124,6 +124,7 @@
 import org.apache.ambari.server.state.HostState;
 import org.apache.ambari.server.state.MaintenanceState;
 import org.apache.ambari.server.state.PropertyInfo;
+import org.apache.ambari.server.state.RepositoryType;
 import org.apache.ambari.server.state.RepositoryVersionState;
 import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.state.Service;
@@ -141,6 +142,7 @@
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
 import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
+import org.apache.ambari.server.state.repository.VersionDefinitionXml;
 import org.apache.ambari.server.state.scheduler.RequestExecution;
 import org.apache.ambari.server.state.scheduler.RequestExecutionFactory;
 import org.apache.ambari.server.state.stack.upgrade.Direction;
@@ -334,6 +336,13 @@
    */
   private Map<String, String> m_clusterPropertyCache = new ConcurrentHashMap<>();
 
+  /**
+   * A simple cache of the effective cluster version during an upgrade. Since
+   * calculation of this during an upgrade is not very quick or clean, it's good
+   * to cache it.
+   */
+  private final Map<Long, String> upgradeEffectiveVersionCache = new ConcurrentHashMap<>();
+
   @Inject
   public ClusterImpl(@Assisted ClusterEntity clusterEntity, Injector injector,
       AmbariEventPublisher eventPublisher)
@@ -1026,22 +1035,31 @@
       return getCurrentClusterVersion();
     }
 
-    String effectiveVersion = null;
-    switch (upgradeEntity.getUpgradeType()) {
-      case NON_ROLLING:
-        if (upgradeEntity.getDirection() == Direction.UPGRADE) {
-          boolean pastChangingStack = isNonRollingUpgradePastUpgradingStack(upgradeEntity);
-          effectiveVersion = pastChangingStack ? upgradeEntity.getToVersion() : upgradeEntity.getFromVersion();
-        } else {
-          // Should be the lower value during a Downgrade.
+    // see if this is in the cache first, and only walk the upgrade if it's not
+    Long upgradeId = upgradeEntity.getId();
+    String effectiveVersion = upgradeEffectiveVersionCache.get(upgradeId);
+    if (null == effectiveVersion) {
+      switch (upgradeEntity.getUpgradeType()) {
+        case NON_ROLLING:
+          if (upgradeEntity.getDirection() == Direction.UPGRADE) {
+            boolean pastChangingStack = isNonRollingUpgradePastUpgradingStack(upgradeEntity);
+            effectiveVersion = pastChangingStack ? upgradeEntity.getToVersion()
+                : upgradeEntity.getFromVersion();
+          } else {
+            // Should be the lower value during a Downgrade.
+            effectiveVersion = upgradeEntity.getToVersion();
+          }
+          break;
+        case ROLLING:
+        default:
+          // Version will be higher on upgrade and lower on downgrade
+          // directions.
           effectiveVersion = upgradeEntity.getToVersion();
-        }
-        break;
-      case ROLLING:
-      default:
-        // Version will be higher on upgrade and lower on downgrade directions.
-        effectiveVersion = upgradeEntity.getToVersion();
-        break;
+          break;
+      }
+
+      // cache for later use
+      upgradeEffectiveVersionCache.put(upgradeId, effectiveVersion);
     }
 
     if (effectiveVersion == null) {
@@ -1085,6 +1103,14 @@
   }
 
   /**
+   * {@inheritDoc}
+   */
+  @Override
+  public void invalidateUpgradeEffectiveVersion() {
+    upgradeEffectiveVersionCache.clear();
+  }
+
+  /**
    * Get all of the ClusterVersionEntity objects for the cluster.
    * @return
    */
@@ -1172,99 +1198,115 @@
    * {@inheritDoc}
    */
   @Override
-  public void transitionHosts(ClusterVersionEntity sourceClusterVersion,
-      RepositoryVersionState state) throws AmbariException {
+  @Transactional
+  public List<Host> transitionHostsToInstalling(ClusterVersionEntity sourceClusterVersion,
+      RepositoryVersionEntity repoVersionEntity, VersionDefinitionXml versionDefinitionXml,
+      boolean forceInstalled) throws AmbariException {
 
     if (sourceClusterVersion == null) {
       throw new AmbariException("Could not find current stack version of cluster " + getClusterName());
     }
 
-    if (state != sourceClusterVersion.getState()) {
-      throw new AmbariException("Unable to transition cluster hosts into " + state
+    if (RepositoryVersionState.INSTALLING != sourceClusterVersion.getState()) {
+      throw new AmbariException(
+          "Unable to transition cluster hosts into " + RepositoryVersionState.INSTALLING
           + ". The only valid state is " + sourceClusterVersion.getState());
     }
 
-    Map<String, Host> hosts = clusters.getHostsForCluster(getClusterName());
-    Set<String> existingHostsWithClusterStackAndVersion = new HashSet<>();
-    HashMap<String, HostVersionEntity> existingHostStackVersions = new HashMap<>();
+    // the hosts to return so that INSTALL commands can be generated for them
+    final List<Host> hostsRequiringInstallation;
 
     clusterGlobalLock.writeLock().lock();
     try {
-      StackEntity repoVersionStackEntity = sourceClusterVersion.getRepositoryVersion().getStack();
-      StackId repoVersionStackId = new StackId(repoVersionStackEntity);
 
-      List<HostVersionEntity> existingHostVersionEntities = hostVersionDAO.findByClusterStackAndVersion(
-          getClusterName(), repoVersionStackId,
-          sourceClusterVersion.getRepositoryVersion().getVersion());
+      // get this once for easy lookup later
+      Map<String, Host> hosts = clusters.getHostsForCluster(getClusterName());
+      hostsRequiringInstallation = new ArrayList<>(hosts.size());
 
-      // for each host that already has a stack and version, keep track of them
-      for (HostVersionEntity entity : existingHostVersionEntities) {
-        String hostName = entity.getHostName();
-        existingHostsWithClusterStackAndVersion.add(hostName);
-        existingHostStackVersions.put(hostName, entity);
+      // for every host, either create or update the host version to the right
+      // state - starting with STATE
+      Collection<HostEntity> hostEntities = getClusterEntity().getHostEntities();
+
+      for (HostEntity hostEntity : hostEntities) {
+        // start with INSTALLING
+        RepositoryVersionState state = RepositoryVersionState.INSTALLING;
+        if (forceInstalled) {
+          state = RepositoryVersionState.INSTALLED;
+        }
+
+        // is this host version not required b/c of versionable components
+        Host host = hosts.get(hostEntity.getHostName());
+        if (!host.hasComponentsAdvertisingVersions(desiredStackVersion)) {
+          state = RepositoryVersionState.NOT_REQUIRED;
+        }
+
+        // if the repository is still required, check against the repo type
+        if (state != RepositoryVersionState.NOT_REQUIRED) {
+          if (repoVersionEntity.getType() != RepositoryType.STANDARD) {
+            // does the host gets a different repo state based on VDF and repo
+            // type
+            boolean hostRequiresRepository = false;
+            Set<String> servicesInRepository = versionDefinitionXml.getAvailableServiceNames();
+
+            List<ServiceComponentHost> schs = getServiceComponentHosts(hostEntity.getHostName());
+            for (ServiceComponentHost serviceComponentHost : schs) {
+              String serviceName = serviceComponentHost.getServiceName();
+              if (servicesInRepository.contains(serviceName)) {
+                hostRequiresRepository = true;
+                break;
+              }
+            }
+
+            // if not required, then move onto the next host
+            if (!hostRequiresRepository) {
+              state = RepositoryVersionState.NOT_REQUIRED;
+            }
+          }
+        }
+
+        // last check if it's still required - check for MM
+        if (state != RepositoryVersionState.NOT_REQUIRED) {
+          if (host.getMaintenanceState(clusterId) != MaintenanceState.OFF) {
+            state = RepositoryVersionState.OUT_OF_SYNC;
+          }
+        }
+
+        // now that the correct state is determdined for the host version,
+        // either update or create it
+        HostVersionEntity hostVersionEntity = null;
+        Collection<HostVersionEntity> hostVersions = hostEntity.getHostVersionEntities();
+        for (HostVersionEntity existingHostVersion : hostVersions) {
+          if (existingHostVersion.getRepositoryVersion().getId() == repoVersionEntity.getId()) {
+            hostVersionEntity = existingHostVersion;
+            break;
+          }
+        }
+
+        if (null == hostVersionEntity) {
+          hostVersionEntity = new HostVersionEntity(hostEntity, repoVersionEntity, state);
+          hostVersionDAO.create(hostVersionEntity);
+
+          // bi-directional association update
+          hostVersions.add(hostVersionEntity);
+          hostDAO.merge(hostEntity);
+        } else {
+          hostVersionEntity.setState(state);
+          hostVersionEntity = hostVersionDAO.merge(hostVersionEntity);
+        }
+
+        LOG.info("Created host version for {}, state={}, repository version={} (repo_id={})",
+            hostVersionEntity.getHostName(), hostVersionEntity.getState(),
+            repoVersionEntity.getVersion(), repoVersionEntity.getId());
+
+        if (state == RepositoryVersionState.INSTALLING) {
+          hostsRequiringInstallation.add(host);
+        }
       }
-
-      // find any hosts that do not have the stack/repo version already
-      Sets.SetView<String> hostsMissingRepoVersion = Sets.difference(
-        hosts.keySet(), existingHostsWithClusterStackAndVersion);
-
-      createOrUpdateHostVersionToState(sourceClusterVersion, hosts,
-          existingHostStackVersions, hostsMissingRepoVersion, state);
     } finally {
       clusterGlobalLock.writeLock().unlock();
     }
-  }
 
-  /**
-   * Moved out to a separate method due to performance reasons
-   * Iterates over all hosts and creates or transitions existing host versions
-   * to a given state. If host version for desired stack/version does not exist,
-   * host version is created and initialized to a given state. Otherwise, existing
-   * host version state is updated
-   * Hosts in maintenance mode are auto skipped.
-   *
-   * @param sourceClusterVersion cluster version to be queried for a stack
-   *                             name/version info when creating a new host version
-   * @param hosts list of all hosts
-   * @param existingHostStackVersions map of existing host versions to be updated
-   * @param hostsMissingRepoVersion set of hostnames of hosts that have no desired host version
-   * @param newState target host version state for transition
-   */
-  @Transactional
-  void createOrUpdateHostVersionToState(ClusterVersionEntity sourceClusterVersion,
-      Map<String, Host> hosts, HashMap<String, HostVersionEntity> existingHostStackVersions,
-      Sets.SetView<String> hostsMissingRepoVersion, RepositoryVersionState newState) {
-
-    for (String hostname : hosts.keySet()) {
-      // start off with the requested new state for each host
-      RepositoryVersionState repositoryVersionState = newState;
-
-      // if the host is in maintenance mode, that's an explicit marker which
-      // indicates that it should not be transitioned to INSTALLING; instead
-      // they will be transitioned to OUT_OF_SYNC
-      Host host = hosts.get(hostname);
-      if (host.getMaintenanceState(getClusterId()) != MaintenanceState.OFF) {
-        repositoryVersionState = RepositoryVersionState.OUT_OF_SYNC;
-      }
-
-      if (hostsMissingRepoVersion.contains(hostname)) {
-        // Create new host stack version
-        HostEntity hostEntity = hostDAO.findByName(hostname);
-        HostVersionEntity hostVersionEntity = new HostVersionEntity(hostEntity,
-            sourceClusterVersion.getRepositoryVersion(), repositoryVersionState);
-
-        LOG.info("Creating host version for {}, state={}, repo={} (repo_id={})",
-            hostVersionEntity.getHostName(), hostVersionEntity.getState(),
-            hostVersionEntity.getRepositoryVersion().getVersion(), hostVersionEntity.getRepositoryVersion().getId());
-
-        hostVersionDAO.create(hostVersionEntity);
-      } else {
-        // Update existing host stack version
-        HostVersionEntity hostVersionEntity = existingHostStackVersions.get(hostname);
-        hostVersionEntity.setState(repositoryVersionState);
-        hostVersionEntity = hostVersionDAO.merge(hostVersionEntity);
-      }
-    }
+    return hostsRequiringInstallation;
   }
 
   /**
@@ -1597,11 +1639,11 @@
   }
 
   @Override
-  public void createClusterVersion(StackId stackId, String version,
+  public ClusterVersionEntity createClusterVersion(StackId stackId, String version,
       String userName, RepositoryVersionState state) throws AmbariException {
     clusterGlobalLock.writeLock().lock();
     try {
-      createClusterVersionInternal(stackId, version, userName, state);
+      return createClusterVersionInternal(stackId, version, userName, state);
     } finally {
       clusterGlobalLock.writeLock().unlock();
     }
@@ -1612,7 +1654,7 @@
    *
    * This method is intended to be called only when cluster lock is already acquired.
    */
-  private void createClusterVersionInternal(StackId stackId, String version,
+  private ClusterVersionEntity createClusterVersionInternal(StackId stackId, String version,
       String userName, RepositoryVersionState state) throws AmbariException {
     if (!ALLOWED_REPOSITORY_STATES.contains(state)) {
       throw new AmbariException("The allowed state for a new cluster version must be within " + ALLOWED_REPOSITORY_STATES);
@@ -1630,9 +1672,8 @@
     RepositoryVersionEntity repositoryVersionEntity = repositoryVersionDAO.findByStackAndVersion(
       stackId, version);
     if (repositoryVersionEntity == null) {
-      LOG.warn("Could not find repository version for stack=" + stackId
-          + ", version=" + version);
-      return;
+      throw new AmbariException(
+          "Unable to find repository version for stack " + stackId + " and version " + version);
     }
 
     ClusterEntity clusterEntity = getClusterEntity();
@@ -1642,6 +1683,8 @@
     clusterVersionDAO.create(clusterVersionEntity);
     clusterEntity.getClusterVersionEntities().add(clusterVersionEntity);
     clusterEntity = clusterDAO.merge(clusterEntity);
+
+    return clusterVersionEntity;
   }
 
   /**
@@ -3151,7 +3194,7 @@
    * should make sure the cluster global write lock is acquired.
    *
    * @param stackId
-   * @see Cluster#getClusterGlobalLock()
+   * @see #clusterGlobalLock
    */
   @Transactional
   void removeAllConfigsForStack(StackId stackId) {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
index 2bc1e68..99bc781 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
@@ -811,10 +811,6 @@
       }
     }
 
-
-    entity.setHostRoleCommandEntities(null);
-    hostRoleCommandDAO.removeByHostId(entity.getHostId());
-
     entity.setHostStateEntity(null);
     hostStateDAO.removeByHostId(entity.getHostId());
     hostConfigMappingDAO.removeByHostId(entity.getHostId());
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java
index 5a9c574..60780dd 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java
@@ -35,76 +35,76 @@
    * Primary key of config group
    * @return
    */
-  public Long getId();
+  Long getId();
 
   /**
    * Unique config group name
    * @return
    */
-  public String getName();
+  String getName();
 
   /**
    * Update Group name
    * @param name
    */
-  public void setName(String name);
+  void setName(String name);
 
   /**
    * Cluster name to which config group belongs
    * @return
    */
-  public String getClusterName();
+  String getClusterName();
 
   /**
    * Tag which associates config group to service
    * @return
    */
-  public String getTag();
+  String getTag();
 
   /**
    * Update tag
    * @param tag
    */
-  public void setTag(String tag);
+  void setTag(String tag);
 
   /**
    * Config group description
    * @return
    */
-  public String getDescription();
+  String getDescription();
 
   /**
    * Update description
    * @param description
    */
-  public void setDescription(String description);
+  void setDescription(String description);
 
   /**
    * Gets an unmodifiable list of {@link Host}s.
    *
    * @return
    */
-  public Map<Long, Host> getHosts();
+  Map<Long, Host> getHosts();
 
   /**
    * Gets an unmodifiable map of {@link Config}s.
    *
    * @return
    */
-  public Map<String, Config> getConfigurations();
+  Map<String, Config> getConfigurations();
 
   /**
    * Delete config group and the related host and config mapping
    * entities from the persistence store
    */
-  public void delete();
+  void delete();
 
   /**
    * Add host to Config group
    * @param host
    * @throws AmbariException
    */
-  public void addHost(Host host) throws AmbariException;
+  void addHost(Host host) throws AmbariException;
 
   /**
    * Return @ConfigGroupResponse for the config group
@@ -112,24 +112,24 @@
    * @return @ConfigGroupResponse
    * @throws AmbariException
    */
-  public ConfigGroupResponse convertToResponse() throws AmbariException;
+  ConfigGroupResponse convertToResponse() throws AmbariException;
 
   /**
    * Reassign the set of hosts associated with this config group
    * @param hosts
    */
-  public void setHosts(Map<Long, Host> hosts);
+  void setHosts(Map<Long, Host> hosts);
 
   /**
    * Reassign the set of configs associated with this config group
    * @param configs
    */
-  public void setConfigurations(Map<String, Config> configs);
+  void setConfigurations(Map<String, Config> configs);
 
   /**
    * Remove host mapping
    */
-  public void removeHost(Long hostId) throws AmbariException;
+  void removeHost(Long hostId) throws AmbariException;
 
   /**
    * Name of service which config group is wired to
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
index 906d948..2209dc1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
@@ -29,26 +29,13 @@
 public interface ConfigGroupFactory {
   /**
    * Creates and saves a new {@link ConfigGroup}.
-   *
-   * @param cluster
-   * @param name
-   * @param tag
-   * @param description
-   * @param configs
-   * @param hosts
-   * @param serviceName
-   * @return
    */
   ConfigGroup createNew(@Assisted("cluster") Cluster cluster, @Assisted("name") String name,
       @Assisted("tag") String tag, @Assisted("description") String description,
       @Assisted("configs") Map<String, Config> configs, @Assisted("hosts") Map<Long, Host> hosts);
 
   /**
-   * Instantiates a {@link ConfigGroup} fron an existing, persisted entity.
-   *
-   * @param cluster
-   * @param entity
-   * @return
+   * Instantiates a {@link ConfigGroup} from an existing, persisted entity.
    */
   ConfigGroup createExisting(Cluster cluster, ConfigGroupEntity entity);
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
index 89a03aa..a74e2a2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
@@ -258,7 +258,6 @@
 
   /**
    * Helper method to recreate configs mapping
-   * @param configs
    */
   @Override
   public void setConfigurations(Map<String, Config> configurations) {
@@ -349,10 +348,6 @@
 
   /**
    * Replaces all existing host mappings with the new collection of hosts.
-   *
-   * @param the
-   *          new hosts
-   * @throws Exception
    */
   @Transactional
   void replaceHostMappings(Map<Long, Host> hosts) {
@@ -370,9 +365,6 @@
 
   /**
    * Adds the collection of hosts to the configuration group.
-   *
-   * @param hostEntity
-   * @param configGroupEntity
    */
   @Transactional
   ConfigGroupEntity persistHostMapping(Collection<Host> hosts,
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/MultipleArcTransition.java b/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/MultipleArcTransition.java
index b4c688f..5cf6308 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/MultipleArcTransition.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/MultipleArcTransition.java
@@ -34,6 +34,6 @@
    *                state may change.
    * @param event causal event
    */
-  public STATE transition(OPERAND operand, EVENT event);
+  STATE transition(OPERAND operand, EVENT event);
 
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/SingleArcTransition.java b/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/SingleArcTransition.java
index c802e2a..a8ca919 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/SingleArcTransition.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/SingleArcTransition.java
@@ -30,6 +30,6 @@
    *                state may change.
    * @param event causal event
    */
-  public void transition(OPERAND operand, EVENT event);
+  void transition(OPERAND operand, EVENT event);
 
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/StateMachine.java b/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/StateMachine.java
index e8e2813..60201ef 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/StateMachine.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/StateMachine.java
@@ -21,8 +21,8 @@
 public interface StateMachine
                  <STATE extends Enum<STATE>,
                   EVENTTYPE extends Enum<EVENTTYPE>, EVENT> {
-  public STATE getCurrentState();
-  public void setCurrentState(STATE state);
-  public STATE doTransition(EVENTTYPE eventType, EVENT event)
+  STATE getCurrentState();
+  void setCurrentState(STATE state);
+  STATE doTransition(EVENTTYPE eventType, EVENT event)
         throws InvalidStateTransitionException;
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/StateMachineFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/StateMachineFactory.java
index cc57f2c..1a17a25 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/StateMachineFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/StateMachineFactory.java
@@ -273,9 +273,9 @@
 
   /**
    * Effect a transition due to the effecting stimulus.
-   * @param state current state
+   * @param oldState current state
    * @param eventType trigger to initiate the transition
-   * @param cause causal eventType context
+   * @param event causal eventType context
    * @return transitioned state
    */
   private STATE doTransition
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java
index 9b1fd6b..ae5540e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java
@@ -222,7 +222,7 @@
     private final String descriptorName;
     private final String descriptorPluralName;
 
-    private Type(String descriptorName, String descriptorPluralName) {
+    Type(String descriptorName, String descriptorPluralName) {
       this.descriptorName = descriptorName;
       this.descriptorPluralName = descriptorPluralName;
     }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/repository/VersionDefinitionXml.java b/ambari-server/src/main/java/org/apache/ambari/server/state/repository/VersionDefinitionXml.java
index 69399d6..01e30cb 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/repository/VersionDefinitionXml.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/repository/VersionDefinitionXml.java
@@ -143,6 +143,27 @@
   }
 
   /**
+   * Gets the set of services that are included in this XML
+   * @return an empty set for STANDARD repositories, or a non-empty set for PATCH type.
+   */
+  public Set<String> getAvailableServiceNames() {
+    if (availableServices.isEmpty()) {
+      return Collections.emptySet();
+    } else {
+      Set<String> serviceNames = new HashSet<>();
+
+      Map<String, ManifestService> manifest = buildManifest();
+
+      for (AvailableServiceReference ref : availableServices) {
+        ManifestService ms = manifest.get(ref.serviceIdReference);
+        serviceNames.add(ms.serviceName);
+      }
+
+      return serviceNames;
+    }
+  }
+
+  /**
    * Gets if the version definition was built as the default for a stack
    * @return {@code true} if default for a stack
    */
@@ -353,7 +374,6 @@
 
   /**
    * Builds a Version Definition that is the default for the stack
-   * @param stack
    * @return the version definition
    */
   public static VersionDefinitionXml build(StackInfo stackInfo) {
@@ -441,4 +461,4 @@
     }
   }
 
-}
\ No newline at end of file
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/scheduler/RequestExecution.java b/ambari-server/src/main/java/org/apache/ambari/server/state/scheduler/RequestExecution.java
index 8a325f4..d979cc4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/scheduler/RequestExecution.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/scheduler/RequestExecution.java
@@ -28,137 +28,137 @@
    * Primary key of Request Execution
    * @return
    */
-  public Long getId();
+  Long getId();
 
   /**
    * Cluster name to which request schedule belongs
    * @return
    */
-  public String getClusterName();
+  String getClusterName();
 
   /**
    * Get the batch of requests along with batch settings
    * @return
    */
-  public Batch getBatch();
+  Batch getBatch();
 
   /**
    * Set batch of requests and batch settings
    */
-  public void setBatch(Batch batch);
+  void setBatch(Batch batch);
 
   /**
    * Get schedule for the execution
    * @return
    */
-  public Schedule getSchedule();
+  Schedule getSchedule();
 
   /**
    * Set schedule for the execution
    */
-  public void setSchedule(Schedule schedule);
+  void setSchedule(Schedule schedule);
 
   /**
    * Get @RequestScheduleResponse for this Request Execution
    * @return
    */
-  public RequestScheduleResponse convertToResponse();
+  RequestScheduleResponse convertToResponse();
 
   /**
    * Persist the Request Execution and schedule
    */
-  public void persist();
+  void persist();
 
   /**
    * Refresh entity from DB.
    */
-  public void refresh();
+  void refresh();
 
   /**
    * Delete Request Schedule entity
    */
-  public void delete();
+  void delete();
 
   /**
    * Get status of schedule
    */
-  public String getStatus();
+  String getStatus();
 
   /**
    * Set request execution description
    */
-  public void setDescription(String description);
+  void setDescription(String description);
 
   /**
    * Get description of the request execution
    */
-  public String getDescription();
+  String getDescription();
 
   /**
    * Set status of the schedule
    */
-  public void setStatus(Status status);
+  void setStatus(Status status);
 
   /**
    * Set datetime:status of last request that was executed
    */
-  public void setLastExecutionStatus(String status);
+  void setLastExecutionStatus(String status);
 
   /**
    * Set authenticated user
    */
-  public void setAuthenticatedUserId(Integer username);
+  void setAuthenticatedUserId(Integer username);
 
   /**
    * Set create username
    */
-  public void setCreateUser(String username);
+  void setCreateUser(String username);
 
   /**
    * Set create username
    */
-  public void setUpdateUser(String username);
+  void setUpdateUser(String username);
 
   /**
    * Get created time
    */
-  public String getCreateTime();
+  String getCreateTime();
 
   /**
    * Get updated time
    */
-  public String getUpdateTime();
+  String getUpdateTime();
 
   /**
    * Get authenticated user
    */
-  public Integer getAuthenticatedUserId();
+  Integer getAuthenticatedUserId();
 
   /**
    * Get create user
    */
-  public String getCreateUser();
+  String getCreateUser();
 
   /**
    * Get update user
    */
-  public String getUpdateUser();
+  String getUpdateUser();
 
   /**
    * Get status of the last batch of requests
    * @return
    */
-  public String getLastExecutionStatus();
+  String getLastExecutionStatus();
 
   /**
    * Get response with request body
    */
-  public RequestScheduleResponse convertToResponseWithBody();
+  RequestScheduleResponse convertToResponseWithBody();
 
   /**
    * Get the request body for a batch request
    */
-  public String getRequestBody(Long batchId);
+  String getRequestBody(Long batchId);
 
   /**
    * Get batch request with specified order id
@@ -176,12 +176,12 @@
   /**
    * Update status and save RequestExecution
    */
-  public void updateStatus(Status status);
+  void updateStatus(Status status);
 
   /**
    * Status of the Request execution
    */
-  public enum Status {
+  enum Status {
     SCHEDULED,
     COMPLETED,
     DISABLED
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/services/MetricsRetrievalService.java b/ambari-server/src/main/java/org/apache/ambari/server/state/services/MetricsRetrievalService.java
index 59ec15b..ec6b074 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/services/MetricsRetrievalService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/services/MetricsRetrievalService.java
@@ -281,7 +281,7 @@
    * @param streamProvider
    *          the {@link StreamProvider} to use to read from the remote
    *          endpoint.
-   * @param jmxUrl
+   * @param url
    *          the URL to read from
    *
    * @see #getCachedJMXMetric(String)
@@ -399,7 +399,7 @@
      *          the URLs which are currently waiting to be processed. This
      *          method will remove the specified URL from this {@link Set} when
      *          it completes (successful or not).
-     * @param m_ttlUrlCache
+     * @param ttlUrlCache
      *          an evicting cache which is used to determine if a request for a
      *          metric is too soon after the last request, or {@code null} if
      *          requests can be made sequentially without any separation.
@@ -501,7 +501,7 @@
      *
      * @param throwable
      * @param url
-     * @return the key, such as {@value IOException-http://www.server.com/jmx}.
+     * @return the key, such as <code>IOException-http://www.server.com/jmx</code>.
      */
     private String buildCacheKey(Throwable throwable, String url) {
       if (null == throwable || null == url) {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/ExtensionMetainfoXml.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/ExtensionMetainfoXml.java
index 77a7250..4480ef7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/ExtensionMetainfoXml.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/ExtensionMetainfoXml.java
@@ -77,6 +77,9 @@
   @XmlTransient
   private boolean valid = true;
 
+  @XmlElement(name="auto-link")
+  private boolean autoLink = false;
+
   /**
    *
    * @return valid xml flag
@@ -201,4 +204,12 @@
     }
   }
 
+  public boolean isAutoLink() {
+    return autoLink;
+  }
+
+  public void setAutoLink(boolean autoLink) {
+    this.autoLink = autoLink;
+  }
+
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
index 5cebade..6adb613 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
@@ -25,7 +25,6 @@
 import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
-import java.util.Map.Entry;
 import java.util.Set;
 
 import javax.xml.bind.annotation.XmlAccessType;
@@ -47,9 +46,6 @@
 import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Objects;
-import com.google.gson.JsonArray;
-import com.google.gson.JsonObject;
-import com.google.gson.JsonPrimitive;
 
 /**
  * Used to represent cluster-based operations.
@@ -264,6 +260,7 @@
         return null;
       }
 
+      // !!! FUTURE: check for component
 
       HostsType hosts = ctx.getResolver().getMasterAndHosts(service, component);
 
@@ -317,32 +314,6 @@
   }
 
   /**
-   * Populates the manual task, mt, with information about the list of hosts.
-   * @param mt Manual Task
-   * @param hostToComponents Map from host name to list of components
-   */
-  private void fillHostDetails(ManualTask mt, Map<String, List<String>> hostToComponents) {
-    JsonArray arr = new JsonArray();
-    for (Entry<String, List<String>> entry : hostToComponents.entrySet()) {
-      JsonObject hostObj = new JsonObject();
-      hostObj.addProperty("host", entry.getKey());
-
-      JsonArray componentArr = new JsonArray();
-      for (String comp : entry.getValue()) {
-        componentArr.add(new JsonPrimitive(comp));
-      }
-      hostObj.add("components", componentArr);
-
-      arr.add(hostObj);
-    }
-
-    JsonObject obj = new JsonObject();
-    obj.add("unhealthy", arr);
-
-    mt.structuredOut = obj.toString();
-  }
-
-  /**
    * Attempts to merge the given cluster groupings.  This merges the execute stages
    * in an order specific manner.
    */
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigUpgradeChangeDefinition.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigUpgradeChangeDefinition.java
index 5c0fba7..9d65f26 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigUpgradeChangeDefinition.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigUpgradeChangeDefinition.java
@@ -247,8 +247,7 @@
         }
 
         }catch(Exception e){
-          String message = "";
-          message = "ConfigUpgradeChangeDefinition: getRegexReplacements : Error while fetching config properties ";
+          String message = "getRegexReplacements : Error while fetching config properties : key - " + regexReplaceObj.key + " find - " + regexReplaceObj.find;
           LOG.error(message, e);
 
         }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java
index cd17a70..99ed0aa 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java
@@ -129,6 +129,7 @@
       for (TaskBucket bucket : buckets) {
         // The TaskWrappers take into account if a task is meant to run on all, any, or master.
         // A TaskWrapper may contain multiple tasks, but typically only one, and they all run on the same set of hosts.
+        // Generate a task wrapper for every task in the bucket
         List<TaskWrapper> preTasks = TaskWrapperBuilder.getTaskList(service, pc.name, hostsType, bucket.tasks, params);
         List<List<TaskWrapper>> organizedTasks = organizeTaskWrappersBySyncRules(preTasks);
         for (List<TaskWrapper> tasks : organizedTasks) {
@@ -219,7 +220,6 @@
         int batchNum = 0;
         for (Set<String> hostSubset : hostSets) {
           batchNum++;
-          TaskWrapper expandedTW = new TaskWrapper(tw.getService(), tw.getComponent(), hostSubset, tw.getParams(), tw.getTasks());
 
           String stageText = getStageText(verb, ctx.getComponentDisplay(service, pc.name), hostSubset, batchNum, numBatchesNeeded);
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/HostOrderGrouping.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/HostOrderGrouping.java
index 97864a6..d19406e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/HostOrderGrouping.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/HostOrderGrouping.java
@@ -287,7 +287,6 @@
 
     /**
      * @param upgradeContext  the context
-     * @param hosts           the list of hostnames
      * @return  the wrappers for a host
      */
     private List<StageWrapper> buildServiceChecks(UpgradeContext upgradeContext, List<String> serviceChecks) {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapper.java
index aac8935..81f4e0b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapper.java
@@ -25,6 +25,12 @@
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.lang.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import com.google.common.base.Objects;
 import com.google.gson.Gson;
 
@@ -33,6 +39,8 @@
  */
 public class StageWrapper {
 
+  private static final Logger LOG = LoggerFactory.getLogger(StageWrapper.class);
+
   private static Gson gson = new Gson();
   private String text;
   private Type type;
@@ -163,4 +171,61 @@
         .add("text",text)
         .omitNullValues().toString();
   }
+
+  /**
+   * Gets the maximum timeout for any task that this {@code StageWrapper} encapsulates.  TaskWrappers
+   * are homogeneous across the stage, but timeouts are defined in Upgrade Packs
+   * at the task, so each one should be checked individually.
+   *
+   * <p>
+   * WARNING:  This method relies on incorrect assumptions about {@link StageWrapper}s and the {@link TaskWrapper}s
+   * that are contained in them.  Orchestration is currently forcing a StageWrapper to have only one TaskWrapper,
+   * even though they could have many per the code.
+   *
+   * In addition, a TaskWrapper should have a one-to-one reference with the Task it contains.  That will be
+   * fixed in a future release.
+   * </p>
+   *
+   * @param configuration the configuration instance.  StageWrappers are not injectable, so pass
+   *                      this in.
+   * @return the maximum timeout, or the default agent execution timeout if none are found.  Never {@code null}.
+   */
+  public Short getMaxTimeout(Configuration configuration) {
+
+    Set<String> timeoutKeys = new HashSet<>();
+
+    // !!! FIXME a TaskWrapper should have only one task.
+    for (TaskWrapper wrapper : tasks) {
+      timeoutKeys.addAll(wrapper.getTimeoutKeys());
+    }
+
+    Short defaultTimeout = Short.valueOf(configuration.getDefaultAgentTaskTimeout(false));
+
+    if (CollectionUtils.isEmpty(timeoutKeys)) {
+      return defaultTimeout;
+    }
+
+    Short timeout = null;
+
+    for (String key : timeoutKeys) {
+      String configValue = configuration.getProperty(key);
+
+      if (StringUtils.isNotBlank(configValue)) {
+        try {
+          Short configTimeout = Short.valueOf(configValue);
+
+          if (null == timeout || configTimeout > timeout) {
+            timeout = configTimeout;
+          }
+
+        } catch (Exception e) {
+          LOG.warn("Could not parse {}/{} to a timeout value", key, configValue);
+        }
+      } else {
+        LOG.warn("Configuration {} not found to compute timeout", key);
+      }
+    }
+
+    return null == timeout ? defaultTimeout : timeout;
+  }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Task.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Task.java
index 5c43c2b..5c7cb6c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Task.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Task.java
@@ -42,6 +42,12 @@
   public boolean isSequential = false;
 
   /**
+   * The config property to check for timeout.
+   */
+  @XmlAttribute(name="timeout-config")
+  public String timeoutConfig = null;
+
+  /**
    * @return the type of the task
    */
   public abstract Type getType();
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapper.java
index 11e27cf..25b8a93 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapper.java
@@ -19,10 +19,13 @@
 
 import java.util.Arrays;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.commons.lang.StringUtils;
+
 import com.google.common.base.Objects;
 
 /**
@@ -34,18 +37,21 @@
   private String component;
   private Set<String> hosts; // all the hosts that all the tasks must run
   private Map<String, String> params;
+  /* FIXME a TaskWrapper really should be wrapping ONLY ONE task */
   private List<Task> tasks; // all the tasks defined for the hostcomponent
+  private Set<String> timeoutKeys = new HashSet<>();
 
   /**
-   * @param s the service name for the tasks
-   * @param c the component name for the tasks
-   * @param hosts the set of hosts that the tasks are for
-   * @param tasks an array of tasks as a convenience
+   * @param s the service name for the task
+   * @param c the component name for the task
+   * @param hosts the set of hosts that the task is for
+   * @param task a single task
    */
-  public TaskWrapper(String s, String c, Set<String> hosts, Task... tasks) {
-    this(s, c, hosts, null, Arrays.asList(tasks));
+  public TaskWrapper(String s, String c, Set<String> hosts, Task task) {
+    this(s, c, hosts, null, task);
   }
 
+
   /**
    * @param s the service name for the tasks
    * @param c the component name for the tasks
@@ -71,6 +77,13 @@
     this.hosts = hosts;
     this.params = (params == null) ? new HashMap<String, String>() : params;
     this.tasks = tasks;
+
+    // !!! FIXME there should only be one task
+    for (Task task : tasks) {
+      if (StringUtils.isNotBlank(task.timeoutConfig)) {
+        timeoutKeys.add(task.timeoutConfig);
+      }
+    }
   }
 
   /**
@@ -133,4 +146,12 @@
     return false;
   }
 
+
+  /**
+   * @return the timeout keys for all the tasks in this wrapper.
+   */
+  public Set<String> getTimeoutKeys() {
+    return timeoutKeys;
+  }
+
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapperBuilder.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapperBuilder.java
index a75fe00..2212b5a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapperBuilder.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskWrapperBuilder.java
@@ -40,13 +40,16 @@
   private static Logger LOG = LoggerFactory.getLogger(TaskWrapperBuilder.class);
 
   /**
-   * Creates a collection of tasks based on the set of hosts they are allowed to run on
+   * Creates a collection of task wrappers based on the set of hosts they are allowed to run on
    * by analyzing the "hosts" attribute of any ExecuteTask objects.
+   *
    * @param service the service name for the tasks
    * @param component the component name for the tasks
    * @param hostsType the collection of sets along with their status
    * @param tasks collection of tasks
    * @param params additional parameters
+   *
+   * @return the task wrappers, one for each task that is passed with {@code tasks}
    */
   public static List<TaskWrapper> getTaskList(String service, String component, HostsType hostsType, List<Task> tasks, Map<String, String> params) {
     // Ok if Ambari Server is not part of the cluster hosts since this is only used in the calculation of how many batches
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/UpgradeFunction.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/UpgradeFunction.java
index d58316d..c472ea1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/UpgradeFunction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/UpgradeFunction.java
@@ -22,5 +22,5 @@
   /**
    * @return Return the function that the group must provide.
    */
-  public Task.Type getFunction();
+  Task.Type getFunction();
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index f2b616c..6e65845 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -427,6 +427,9 @@
       for (String actualConfigType : updatedConfigTypes) {
         // get the actual cluster config for comparison
         DesiredConfig actualConfig = cluster.getDesiredConfigs().get(actualConfigType);
+        if (actualConfig == null && actualConfigType.equals("core-site")) {
+          continue;
+        }
         if (!actualConfig.getTag().equals(TopologyManager.TOPOLOGY_RESOLVED_TAG)) {
           // if any expected config is not resolved, deployment must wait
           LOG.info("Config type " + actualConfigType + " not resolved yet, Blueprint deployment will wait until configuration update is completed");
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/Blueprint.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/Blueprint.java
index 346268f..139a1ee 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/Blueprint.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/Blueprint.java
@@ -34,13 +34,13 @@
    *
    * @return blueprint name
    */
-  public String getName();
+  String getName();
 
   /**
    * Get the hot groups contained in the blueprint.
    * @return map of host group name to host group
    */
-  public Map<String, HostGroup> getHostGroups();
+  Map<String, HostGroup> getHostGroups();
 
   /**
    * Get a hostgroup specified by name.
@@ -49,7 +49,7 @@
    *
    * @return the host group with the given name or null
    */
-  public HostGroup getHostGroup(String name);
+  HostGroup getHostGroup(String name);
 
   /**
    * Get the Blueprint cluster scoped configuration.
@@ -59,7 +59,7 @@
    *
    * @return blueprint cluster scoped configuration
    */
-  public Configuration getConfiguration();
+  Configuration getConfiguration();
 
   /**
    * Get the Blueprint cluster scoped setting.
@@ -68,14 +68,14 @@
    *
    * @return blueprint cluster scoped setting
    */
-  public Setting getSetting();
+  Setting getSetting();
 
   /**
    * Get all of the services represented in the blueprint.
    *
    * @return collection of all represented service names
    */
-  public Collection<String> getServices();
+  Collection<String> getServices();
 
   /**
    * Get the components that are included in the blueprint for the specified service.
@@ -84,7 +84,7 @@
    *
    * @return collection of component names for the service.  Will not return null.
    */
-  public Collection<String> getComponents(String service);
+  Collection<String> getComponents(String service);
 
   /**
    * Get whether a component is enabled for auto start.
@@ -94,7 +94,7 @@
    *
    * @return null if value is not specified; true or false if specified.
    */
-  public String getRecoveryEnabled(String serviceName, String componentName);
+  String getRecoveryEnabled(String serviceName, String componentName);
 
   /**
    * Get whether a service is enabled for credential store use.
@@ -103,20 +103,20 @@
    *
    * @return null if value is not specified; true or false if specified.
    */
-  public String getCredentialStoreEnabled(String serviceName);
+  String getCredentialStoreEnabled(String serviceName);
 
   /**
    * Check if auto skip failure is enabled.
    * @return true if enabled, otherwise false.
    */
-  public boolean shouldSkipFailure();
+  boolean shouldSkipFailure();
 
   /**
    * Get the stack associated with the blueprint.
    *
    * @return associated stack
    */
-  public Stack getStack();
+  Stack getStack();
 
   /**
    * Get the host groups which contain components for the specified service.
@@ -126,7 +126,7 @@
    * @return collection of host groups containing components for the specified service;
    *         will not return null
    */
-  public Collection<HostGroup> getHostGroupsForService(String service);
+  Collection<HostGroup> getHostGroupsForService(String service);
 
   /**
    * Get the host groups which contain the give component.
@@ -135,28 +135,36 @@
    *
    * @return collection of host groups containing the specified component; will not return null
    */
-  public Collection<HostGroup> getHostGroupsForComponent(String component);
+  Collection<HostGroup> getHostGroupsForComponent(String component);
 
-  public SecurityConfiguration getSecurity();
+  SecurityConfiguration getSecurity();
 
   /**
    * Validate the blueprint topology.
    *
    * @throws InvalidTopologyException if the topology is invalid
    */
-  public void validateTopology() throws InvalidTopologyException;
+  void validateTopology() throws InvalidTopologyException;
 
   /**
    * Validate that the blueprint contains all of the required properties.
    *
    * @throws InvalidTopologyException if the blueprint doesn't contain all required properties
    */
-  public void validateRequiredProperties() throws InvalidTopologyException;
+  void validateRequiredProperties() throws InvalidTopologyException;
+
+  /**
+   *
+   * A config type is valid if there are services related to except cluster-env and global.
+   * @param configType
+   * @return
+   */
+  boolean isValidConfigType(String configType);
 
   /**
    * Obtain the blueprint as an entity.
    *
    * @return entity representation of the blueprint
    */
-  public BlueprintEntity toEntity();
+  BlueprintEntity toEntity();
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintFactory.java
index 40fa2d3..857061b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintFactory.java
@@ -228,7 +228,7 @@
    * simulate various Stack or error conditions.
    */
   interface StackFactory {
-      public Stack createStack(String stackName, String stackVersion, AmbariManagementController managementController) throws AmbariException;
+      Stack createStack(String stackName, String stackVersion, AmbariManagementController managementController) throws AmbariException;
   }
 
   /**
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImpl.java
index c90e35c..826e4e5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImpl.java
@@ -39,6 +39,7 @@
 import org.apache.ambari.server.orm.entities.HostGroupEntity;
 import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.stack.NoSuchStackException;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.commons.lang.StringUtils;
 
 import com.google.gson.Gson;
@@ -600,4 +601,18 @@
       blueprintEntity.setSettings(settingEntityMap.values());
     }
   }
+
+  /**
+   * A config type is valid if there are services related to except cluster-env and global.
+   */
+  public boolean isValidConfigType(String configType) {
+    if (ConfigHelper.CLUSTER_ENV.equals(configType) || "global".equals(configType)) {
+      return true;
+    }
+    String service = getStack().getServiceForConfigType(configType);
+    if (getServices().contains(service)) {
+        return true;
+    }
+    return false;
+  }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidator.java
index 206d161..3e6fa94 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidator.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidator.java
@@ -29,7 +29,7 @@
    *
    * @throws InvalidTopologyException if the topology is invalid
    */
-  public void validateTopology() throws InvalidTopologyException;
+  void validateTopology() throws InvalidTopologyException;
 
   /**
    * Validate that required properties are provided.
@@ -37,5 +37,5 @@
    *
    * @throws InvalidTopologyException if required properties are not set in blueprint
    */
-  public void validateRequiredProperties() throws InvalidTopologyException;
+  void validateRequiredProperties() throws InvalidTopologyException;
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorImpl.java
index 9688c60..2e5995c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorImpl.java
@@ -20,7 +20,6 @@
 
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -49,6 +48,7 @@
     this.blueprint = blueprint;
     this.stack = blueprint.getStack();
   }
+
   @Override
   public void validateTopology() throws InvalidTopologyException {
     LOGGER.info("Validating topology for blueprint: [{}]", blueprint.getName());
@@ -57,7 +57,7 @@
 
     for (HostGroup group : hostGroups) {
       Map<String, Collection<DependencyInfo>> missingGroupDependencies = validateHostGroup(group);
-      if (! missingGroupDependencies.isEmpty()) {
+      if (!missingGroupDependencies.isEmpty()) {
         missingDependencies.put(group.getName(), missingGroupDependencies);
       }
     }
@@ -73,27 +73,24 @@
           cardinalityFailures.addAll(verifyComponentInAllHostGroups(component, autoDeploy));
         } else {
           cardinalityFailures.addAll(verifyComponentCardinalityCount(
-              component, cardinality, autoDeploy));
+            component, cardinality, autoDeploy));
         }
       }
     }
 
-    if (! missingDependencies.isEmpty() || ! cardinalityFailures.isEmpty()) {
+    if (!missingDependencies.isEmpty() || !cardinalityFailures.isEmpty()) {
       generateInvalidTopologyException(missingDependencies, cardinalityFailures);
     }
   }
 
   @Override
   public void validateRequiredProperties() throws InvalidTopologyException {
-    //todo: combine with RequiredPasswordValidator
-    Map<String, Map<String, Collection<String>>> missingProperties =
-      new HashMap<>();
 
     // we don't want to include default stack properties so we can't just use hostGroup full properties
     Map<String, Map<String, String>> clusterConfigurations = blueprint.getConfiguration().getProperties();
 
     // we need to have real passwords, not references
-    if(clusterConfigurations != null) {
+    if (clusterConfigurations != null) {
       StringBuilder errorMessage = new StringBuilder();
       boolean containsSecretReferences = false;
       for (Map.Entry<String, Map<String, String>> configEntry : clusterConfigurations.entrySet()) {
@@ -104,16 +101,16 @@
             String propertyValue = propertyEntry.getValue();
             if (propertyValue != null) {
               if (SecretReference.isSecret(propertyValue)) {
-                errorMessage.append("  Config:").append(configType).append(" Property:").append(propertyName).append("\n");
+                errorMessage.append("  Config:" + configType + " Property:" + propertyName + "\n");
                 containsSecretReferences = true;
               }
             }
           }
         }
       }
-      if(containsSecretReferences) {
+      if (containsSecretReferences) {
         throw new InvalidTopologyException("Secret references are not allowed in blueprints, " +
-            "replace following properties with real passwords:\n"+errorMessage.toString());
+          "replace following properties with real passwords:\n" + errorMessage.toString());
       }
     }
 
@@ -129,9 +126,9 @@
         if (component.equals("MYSQL_SERVER")) {
           Map<String, String> hiveEnvConfig = clusterConfigurations.get("hive-env");
           if (hiveEnvConfig != null && !hiveEnvConfig.isEmpty() && hiveEnvConfig.get("hive_database") != null
-              && hiveEnvConfig.get("hive_database").startsWith("Existing")) {
+            && hiveEnvConfig.get("hive_database").startsWith("Existing")) {
             throw new InvalidTopologyException("Incorrect configuration: MYSQL_SERVER component is available but hive" +
-                " using existing db!");
+              " using existing db!");
           }
         }
         if (ClusterTopologyImpl.isNameNodeHAEnabled(clusterConfigurations) && component.equals("NAMENODE")) {
@@ -162,71 +159,26 @@
 
         if (component.equals("HIVE_METASTORE")) {
           Map<String, String> hiveEnvConfig = clusterConfigurations.get("hive-env");
-          if (hiveEnvConfig != null && !hiveEnvConfig.isEmpty() && hiveEnvConfig.get("hive_database") !=null
-              && hiveEnvConfig.get("hive_database").equals("Existing SQL Anywhere Database")
-              && VersionUtils.compareVersions(stack.getVersion(), "2.3.0.0") < 0
-              && stack.getName().equalsIgnoreCase("HDP")) {
+          if (hiveEnvConfig != null && !hiveEnvConfig.isEmpty() && hiveEnvConfig.get("hive_database") != null
+            && hiveEnvConfig.get("hive_database").equals("Existing SQL Anywhere Database")
+            && VersionUtils.compareVersions(stack.getVersion(), "2.3.0.0") < 0
+            && stack.getName().equalsIgnoreCase("HDP")) {
             throw new InvalidTopologyException("Incorrect configuration: SQL Anywhere db is available only for stack HDP-2.3+ " +
-                "and repo version 2.3.2+!");
+              "and repo version 2.3.2+!");
           }
         }
 
         if (component.equals("OOZIE_SERVER")) {
           Map<String, String> oozieEnvConfig = clusterConfigurations.get("oozie-env");
-          if (oozieEnvConfig != null && !oozieEnvConfig.isEmpty() && oozieEnvConfig.get("oozie_database") !=null
-              && oozieEnvConfig.get("oozie_database").equals("Existing SQL Anywhere Database")
-              && VersionUtils.compareVersions(stack.getVersion(), "2.3.0.0") < 0
-              && stack.getName().equalsIgnoreCase("HDP")) {
+          if (oozieEnvConfig != null && !oozieEnvConfig.isEmpty() && oozieEnvConfig.get("oozie_database") != null
+            && oozieEnvConfig.get("oozie_database").equals("Existing SQL Anywhere Database")
+            && VersionUtils.compareVersions(stack.getVersion(), "2.3.0.0") < 0
+            && stack.getName().equalsIgnoreCase("HDP")) {
             throw new InvalidTopologyException("Incorrect configuration: SQL Anywhere db is available only for stack HDP-2.3+ " +
-                "and repo version 2.3.2+!");
-          }
-        }
-
-        //for now, AMBARI is not recognized as a service in Stacks
-        if (! component.equals("AMBARI_SERVER")) {
-          String serviceName = stack.getServiceForComponent(component);
-          if (processedServices.add(serviceName)) {
-            Collection<Stack.ConfigProperty> requiredServiceConfigs =
-                stack.getRequiredConfigurationProperties(serviceName);
-
-            for (Stack.ConfigProperty requiredConfig : requiredServiceConfigs) {
-              String configCategory = requiredConfig.getType();
-              String propertyName = requiredConfig.getName();
-              if (! stack.isPasswordProperty(serviceName, configCategory, propertyName)) {
-                Collection<String> typeRequirements = allRequiredProperties.get(configCategory);
-                if (typeRequirements == null) {
-                  typeRequirements = new HashSet<>();
-                  allRequiredProperties.put(configCategory, typeRequirements);
-                }
-                typeRequirements.add(propertyName);
-              }
-            }
+              "and repo version 2.3.2+!");
           }
         }
       }
-      for (Map.Entry<String, Collection<String>> requiredTypeProperties : allRequiredProperties.entrySet()) {
-        String requiredCategory = requiredTypeProperties.getKey();
-        Collection<String> requiredProperties = requiredTypeProperties.getValue();
-        Collection<String> operationalTypeProps = operationalConfiguration.containsKey(requiredCategory) ?
-            operationalConfiguration.get(requiredCategory).keySet() :
-            Collections.<String>emptyList();
-
-        requiredProperties.removeAll(operationalTypeProps);
-        if (! requiredProperties.isEmpty()) {
-          String hostGroupName = hostGroup.getName();
-          Map<String, Collection<String>> hostGroupMissingProps = missingProperties.get(hostGroupName);
-          if (hostGroupMissingProps == null) {
-            hostGroupMissingProps = new HashMap<>();
-            missingProperties.put(hostGroupName, hostGroupMissingProps);
-          }
-          hostGroupMissingProps.put(requiredCategory, requiredProperties);
-        }
-      }
-    }
-
-    if (! missingProperties.isEmpty()) {
-      throw new InvalidTopologyException("Missing required properties.  Specify a value for these " +
-          "properties in the blueprint configuration. " + missingProperties);
     }
   }
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
index 5913f4b..ed25aea 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
@@ -66,6 +66,11 @@
   private Stack stack;
   private boolean configureSecurity = false;
 
+  public ClusterConfigurationRequest(AmbariContext ambariContext, ClusterTopology topology, boolean setInitial, StackAdvisorBlueprintProcessor stackAdvisorBlueprintProcessor, boolean configureSecurity) {
+    this(ambariContext, topology, setInitial, stackAdvisorBlueprintProcessor);
+    this.configureSecurity = configureSecurity;
+  }
+
   public ClusterConfigurationRequest(AmbariContext ambariContext, ClusterTopology clusterTopology, boolean setInitial,
                                      StackAdvisorBlueprintProcessor stackAdvisorBlueprintProcessor) {
     this.ambariContext = ambariContext;
@@ -82,6 +87,21 @@
   }
 
   /**
+   * Remove config-types from the given configuration if there is no any services related to them (except cluster-env and global).
+   */
+  private void removeOrphanConfigTypes(Configuration configuration) {
+    Blueprint blueprint = clusterTopology.getBlueprint();
+
+    Collection<String> configTypes = configuration.getAllConfigTypes();
+    for (String configType : configTypes) {
+      if (!blueprint.isValidConfigType(configType)) {
+        configuration.removeConfigType(configType);
+        LOG.info("Removing config type '{}' as related service is not present in either Blueprint or cluster creation template.", configType);
+      }
+    }
+  }
+
+  /**
    * Remove config-types, if there is no any services related to them (except cluster-env and global).
    */
   private void removeOrphanConfigTypes() {
@@ -100,29 +120,6 @@
     }
   }
 
-  /**
-   * Remove config-types from the given configuration if there is no any services related to them (except cluster-env and global).
-   */
-  private void removeOrphanConfigTypes(Configuration configuration) {
-    Blueprint blueprint = clusterTopology.getBlueprint();
-
-    Collection<String> configTypes = configuration.getAllConfigTypes();
-    for (String configType : configTypes) {
-      if (!"cluster-env".equals(configType) && !"global".equals(configType)) {
-        String service = blueprint.getStack().getServiceForConfigType(configType);
-        if (!blueprint.getServices().contains(service)) {
-          configuration.removeConfigType(configType);
-          LOG.info("Removing config type '{}' as service '{}' is not present in either Blueprint or cluster creation template.", configType, service);
-        }
-      }
-    }
-  }
-
-  public ClusterConfigurationRequest(AmbariContext ambariContext, ClusterTopology topology, boolean setInitial, StackAdvisorBlueprintProcessor stackAdvisorBlueprintProcessor, boolean configureSecurity) {
-    this(ambariContext, topology, setInitial, stackAdvisorBlueprintProcessor);
-    this.configureSecurity = configureSecurity;
-  }
-
   // get names of required host groups
   public Collection<String> getRequiredHostGroups() {
     Collection<String> requiredHostGroups = new HashSet<>();
@@ -137,17 +134,19 @@
     // this will update the topo cluster config and all host group configs in the cluster topology
     Set<String> updatedConfigTypes = new HashSet<>();
 
-    Configuration clusterConfiguration = clusterTopology.getConfiguration();
-    Map<String, Map<String, String>> existingConfigurations = clusterConfiguration.getFullProperties();
+    Map<String, Map<String, String>> userProvidedConfigurations = clusterTopology.getConfiguration().getFullProperties(1);
 
     try {
       if (configureSecurity) {
+        Configuration clusterConfiguration = clusterTopology.getConfiguration();
+        Map<String, Map<String, String>> existingConfigurations = clusterConfiguration.getFullProperties();
         updatedConfigTypes.addAll(configureKerberos(clusterConfiguration, existingConfigurations));
       }
 
       // obtain recommended configurations before config updates
       if (!ConfigRecommendationStrategy.NEVER_APPLY.equals(this.clusterTopology.getConfigRecommendationStrategy())) {
-        stackAdvisorBlueprintProcessor.adviseConfiguration(this.clusterTopology, existingConfigurations);
+        // get merged properties form Blueprint & cluster template (this doesn't contains stack default values)
+        stackAdvisorBlueprintProcessor.adviseConfiguration(this.clusterTopology, userProvidedConfigurations);
       }
 
       updatedConfigTypes.addAll(configurationProcessor.doUpdateForClusterCreate());
@@ -159,20 +158,6 @@
     setConfigurationsOnCluster(clusterTopology, TopologyManager.TOPOLOGY_RESOLVED_TAG, updatedConfigTypes);
   }
 
-  /**
-   * A config type is orphaned if there are services related to except cluster-env and global.
-   */
-  private boolean isOrphanedConfigType(String configType, Blueprint blueprint) {
-    boolean isOrphanedConfigType = false;
-    if (!"cluster-env".equals(configType) && !"global".equals(configType)) {
-      String service = blueprint.getStack().getServiceForConfigType(configType);
-      if (!blueprint.getServices().contains(service)) {
-        isOrphanedConfigType = true;
-      }
-    }
-    return isOrphanedConfigType;
-  }
-
   private Set<String> configureKerberos(Configuration clusterConfiguration, Map<String, Map<String, String>> existingConfigurations) throws AmbariException {
     Set<String> updatedConfigTypes = new HashSet<>();
 
@@ -213,7 +198,7 @@
 
       for (String configType : updatedConfigs.keySet()) {
         // apply only if config type has related services in Blueprint
-        if (!isOrphanedConfigType(configType, blueprint)) {
+        if (blueprint.isValidConfigType(configType)) {
           Map<String, String> propertyMap = updatedConfigs.get(configType);
           Map<String, String> clusterConfigProperties = existingConfigurations.get(configType);
           Map<String, String> stackDefaultConfigProperties = stackDefaultProps.get(configType);
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java
index e37c68d..639c406 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java
@@ -178,4 +178,6 @@
    */
   void removeHost(String hostname);
 
+  String getDefaultPassword();
+
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
index 37fb7d4..2ea904e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
@@ -26,13 +26,13 @@
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.RequestStatusResponse;
 import org.apache.ambari.server.controller.internal.ProvisionAction;
+import org.apache.ambari.server.controller.internal.ProvisionClusterRequest;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -54,6 +54,7 @@
   private Map<String, AdvisedConfiguration> advisedConfigurations = new HashMap<>();
   private final Map<String, HostGroupInfo> hostGroupInfoMap = new HashMap<>();
   private final AmbariContext ambariContext;
+  private final String defaultPassword;
 
   private final static Logger LOG = LoggerFactory.getLogger(ClusterTopologyImpl.class);
 
@@ -65,26 +66,16 @@
     // provision cluster currently requires that all hostgroups have same BP so it is ok to use root level BP here
     this.blueprint = topologyRequest.getBlueprint();
     this.configuration = topologyRequest.getConfiguration();
+    if (topologyRequest instanceof ProvisionClusterRequest) {
+      this.defaultPassword = ((ProvisionClusterRequest) topologyRequest).getDefaultPassword();
+    } else {
+      this.defaultPassword = null;
+    }
 
     registerHostGroupInfo(topologyRequest.getHostGroupInfo());
 
-    validateTopology(topologyRequest.getTopologyValidators());
-    this.ambariContext = ambariContext;
-  }
-
-  //todo: only used in tests, remove.  Validators not invoked when this constructor is used.
-  public ClusterTopologyImpl(AmbariContext ambariContext,
-                             Long clusterId,
-                             Blueprint blueprint,
-                             Configuration configuration,
-                             Map<String, HostGroupInfo> hostGroupInfo)
-                                throws InvalidTopologyException {
-
-    this.clusterId = clusterId;
-    this.blueprint = blueprint;
-    this.configuration = configuration;
-
-    registerHostGroupInfo(hostGroupInfo);
+    // todo extract validation to specialized service
+    validateTopology();
     this.ambariContext = ambariContext;
   }
 
@@ -213,12 +204,9 @@
       && configProperties.get("yarn-site").get("yarn.resourcemanager.ha.enabled").equals("true");
   }
 
-  private void validateTopology(List<TopologyValidator> validators)
+  private void validateTopology()
       throws InvalidTopologyException {
 
-    for (TopologyValidator validator : validators) {
-      validator.validate(this);
-    }
     if(isNameNodeHAEnabled()){
         Collection<String> nnHosts = getHostAssignmentsForComponent("NAMENODE");
         if (nnHosts.size() != 2) {
@@ -320,6 +308,11 @@
     }
   }
 
+  @Override
+  public String getDefaultPassword() {
+    return defaultPassword;
+  }
+
   private void registerHostGroupInfo(Map<String, HostGroupInfo> requestedHostGroupInfoMap) throws InvalidTopologyException {
     LOG.debug("Registering requested host group information for {} hostgroups", requestedHostGroupInfoMap.size());
     checkForDuplicateHosts(requestedHostGroupInfoMap);
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroup.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroup.java
index 4353963..2d464aa 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroup.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroup.java
@@ -38,14 +38,14 @@
    *
    * @return the host group name
    */
-  public String getName();
+  String getName();
 
   /**
    * Get the name of the associated blueprint
    *
    * @return associated blueprint name
    */
-  public String getBlueprintName();
+  String getBlueprintName();
 
   /**
    * Get the fully qualified host group name in the form of
@@ -53,21 +53,21 @@
    *
    * @return fully qualified host group name
    */
-  public String getFullyQualifiedName();
+  String getFullyQualifiedName();
 
   /**
    * Get all of the host group components.
    *
    * @return collection of component instances
    */
-  public Collection<Component> getComponents();
+  Collection<Component> getComponents();
 
   /**
    * Get all of the host group component names
    *
    * @return collection of component names as String
    */
-  public Collection<String> getComponentNames();
+  Collection<String> getComponentNames();
 
   /**
    * Get all host group component names for instances
@@ -79,7 +79,7 @@
    * @return collection of component names as String that are associated with
    *           the specified provision action
    */
-  public Collection<String> getComponentNames(ProvisionAction provisionAction);
+  Collection<String> getComponentNames(ProvisionAction provisionAction);
 
   /**
    * Get the host group components which belong to the specified service.
@@ -88,7 +88,7 @@
    *
    * @return collection of component names for the specified service; will not return null
    */
-  public Collection<String> getComponents(String service);
+  Collection<String> getComponents(String service);
 
   /**
    * Add a component to the host group.
@@ -97,7 +97,7 @@
    *
    * @return true if the component didn't already exist
    */
-  public boolean addComponent(String component);
+  boolean addComponent(String component);
 
   /**
    * Add a component to the host group, with the specified name
@@ -107,21 +107,21 @@
    * @param provisionAction provision action for this component
    * @return
    */
-  public boolean addComponent(String component, ProvisionAction provisionAction);
+  boolean addComponent(String component, ProvisionAction provisionAction);
 
   /**
    * Determine if the host group contains a master component.
    *
    * @return true if the host group contains a master component; false otherwise
    */
-  public boolean containsMasterComponent();
+  boolean containsMasterComponent();
 
   /**
    * Get all of the services associated with the host group components.
    *
    * @return collection of service names
    */
-  public Collection<String> getServices();
+  Collection<String> getServices();
 
   /**
    * Get the configuration associated with the host group.
@@ -130,14 +130,14 @@
    *
    * @return host group configuration
    */
-  public Configuration getConfiguration();
+  Configuration getConfiguration();
 
   /**
    * Get the stack associated with the host group.
    *
    * @return associated stack
    */
-  public Stack getStack();
+  Stack getStack();
 
   /**
    * Get the cardinality value that was specified for the host group.
@@ -146,6 +146,6 @@
    *
    * @return the cardinality specified for the hostgroup
    */
-  public String getCardinality();
+  String getCardinality();
 }
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/LogicalRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/LogicalRequest.java
index a271c0b..b5ee94b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/LogicalRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/LogicalRequest.java
@@ -164,8 +164,8 @@
     return requestsWithReservedHosts.keySet();
   }
 
-  public boolean hasCompleted() {
-    return requestsWithReservedHosts.isEmpty() && outstandingHostRequests.isEmpty();
+  public boolean hasPendingHostRequests() {
+    return !requestsWithReservedHosts.isEmpty() || !outstandingHostRequests.isEmpty();
   }
 
   public Collection<HostRequest> getCompletedHostRequests() {
@@ -176,11 +176,45 @@
     return completedHostRequests;
   }
 
+  public int getPendingHostRequestCount() {
+    return outstandingHostRequests.size() + requestsWithReservedHosts.size();
+  }
+
   //todo: this is only here for toEntity() functionality
   public Collection<HostRequest> getHostRequests() {
     return new ArrayList<>(allHostRequests);
   }
 
+  /**
+   * Removes pending host requests (outstanding requests not picked up by any host, where hostName is null) for a host group.
+   * @param hostGroupName
+   * @return
+   */
+  public Collection<HostRequest> removePendingHostRequests(String hostGroupName) {
+    Collection<HostRequest> pendingHostRequests = new ArrayList<>();
+    for(HostRequest hostRequest : outstandingHostRequests) {
+      if(hostGroupName == null || hostRequest.getHostgroupName().equals(hostGroupName)) {
+        pendingHostRequests.add(hostRequest);
+      }
+    }
+    outstandingHostRequests.clear();
+
+    Collection<String> pendingReservedHostNames = new ArrayList<>();
+    for(String reservedHostName : requestsWithReservedHosts.keySet()) {
+      HostRequest hostRequest = requestsWithReservedHosts.get(reservedHostName);
+      if(hostGroupName == null || hostRequest.getHostgroupName().equals(hostGroupName)) {
+        pendingHostRequests.add(hostRequest);
+        pendingReservedHostNames.add(reservedHostName);
+      }
+    }
+    for (String hostName : pendingReservedHostNames) {
+      requestsWithReservedHosts.remove(hostName);
+    }
+
+    allHostRequests.removeAll(pendingHostRequests);
+    return pendingHostRequests;
+  }
+
   public Map<String, Collection<String>> getProjectedTopology() {
     Map<String, Collection<String>> hostComponentMap = new HashMap<>();
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedState.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedState.java
index 354764b..f353b8c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedState.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedState.java
@@ -18,6 +18,7 @@
 
 package org.apache.ambari.server.topology;
 
+import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 
@@ -78,4 +79,10 @@
    * @return
    */
   LogicalRequest getProvisionRequest(long clusterId);
+
+  /**
+   *
+   * @param hostRequests
+   */
+  void removeHostRequests(Collection<HostRequest> hostRequests);
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java
index 2ac9950..a8b202e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java
@@ -20,7 +20,6 @@
 
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -54,6 +53,7 @@
 
 import com.google.gson.Gson;
 import com.google.inject.Inject;
+import com.google.inject.persist.Transactional;
 
 /**
  * Implementation which uses Ambari Database DAO and Entity objects for persistence
@@ -120,6 +120,15 @@
   }
 
   @Override
+  @Transactional
+  public void removeHostRequests(Collection<HostRequest> hostRequests) {
+    for(HostRequest hostRequest :  hostRequests) {
+      TopologyHostRequestEntity hostRequestEntity = hostRequestDAO.findById(hostRequest.getId());
+      hostRequestDAO.remove(hostRequestEntity);
+    }
+  }
+
+  @Override
   public void registerPhysicalTask(long logicalTaskId, long physicalTaskId) {
     TopologyLogicalTaskEntity entity = topologyLogicalTaskDAO.findById(logicalTaskId);
     HostRoleCommandEntity physicalEntity = hostRoleCommandDAO.findByPK(physicalTaskId);
@@ -399,10 +408,6 @@
       return hostGroupInfoMap;
     }
 
-    @Override
-    public List<TopologyValidator> getTopologyValidators() {
-      return Collections.emptyList();
-    }
 
     @Override
     public String getDescription() {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
index 392a53e..f5cf498 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
@@ -74,6 +74,7 @@
 import org.apache.ambari.server.state.quicklinksprofile.QuickLinksProfile;
 import org.apache.ambari.server.topology.tasks.ConfigureClusterTask;
 import org.apache.ambari.server.topology.tasks.ConfigureClusterTaskFactory;
+import org.apache.ambari.server.topology.validators.TopologyValidatorService;
 import org.apache.ambari.server.utils.RetryHelper;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -89,7 +90,9 @@
 @Singleton
 public class TopologyManager {
 
-  /** internal token for topology related async tasks */
+  /**
+   * internal token for topology related async tasks
+   */
   public static final String INTERNAL_AUTH_TOKEN = "internal_topology_token";
 
   public static final String INITIAL_CONFIG_TAG = "INITIAL";
@@ -135,6 +138,9 @@
   @Inject
   private SettingDAO settingDAO;
 
+  @Inject
+  private TopologyValidatorService topologyValidatorService;
+
   /**
    * A boolean not cached thread-local (volatile) to prevent double-checked
    * locking on the synchronized keyword.
@@ -264,32 +270,35 @@
     // get the id prior to creating ambari resources which increments the counter
     final Long provisionId = ambariContext.getNextRequestId();
 
-    boolean configureSecurity = false;
+    SecurityType securityType = null;
+    Credential credential = null;
 
     SecurityConfiguration securityConfiguration = processSecurityConfiguration(request);
 
     if (securityConfiguration != null && securityConfiguration.getType() == SecurityType.KERBEROS) {
-      configureSecurity = true;
+      securityType = SecurityType.KERBEROS;
       addKerberosClient(topology);
 
       // refresh default stack config after adding KERBEROS_CLIENT component to topology
-      topology.getBlueprint().getConfiguration().setParentConfiguration(stack.getConfiguration(topology.getBlueprint
-        ().getServices()));
+      topology.getBlueprint().getConfiguration().setParentConfiguration(stack.getConfiguration(topology.getBlueprint().getServices()));
 
-      // create Cluster resource with security_type = KERBEROS, this will trigger cluster Kerberization
-      // upon host install task execution
-      ambariContext.createAmbariResources(topology, clusterName, SecurityType.KERBEROS, repoVersion);
-      if (securityConfiguration.getDescriptor() != null) {
-        submitKerberosDescriptorAsArtifact(clusterName, securityConfiguration.getDescriptor());
-      }
-
-      Credential credential = request.getCredentialsMap().get(KDC_ADMIN_CREDENTIAL);
+      credential = request.getCredentialsMap().get(KDC_ADMIN_CREDENTIAL);
       if (credential == null) {
         throw new InvalidTopologyException(KDC_ADMIN_CREDENTIAL + " is missing from request.");
       }
+    }
+
+    topologyValidatorService.validateTopologyConfiguration(topology);
+
+    // create resources
+    ambariContext.createAmbariResources(topology, clusterName, securityType, repoVersion);
+
+    if (securityConfiguration != null && securityConfiguration.getDescriptor() != null) {
+      submitKerberosDescriptorAsArtifact(clusterName, securityConfiguration.getDescriptor());
+    }
+
+    if (credential != null) {
       submitCredential(clusterName, credential);
-    } else {
-      ambariContext.createAmbariResources(topology, clusterName, null, repoVersion);
     }
 
     long clusterId = ambariContext.getClusterId(clusterName);
@@ -312,8 +321,8 @@
 
     clusterTopologyMap.put(clusterId, topology);
 
-    addClusterConfigRequest(topology, new ClusterConfigurationRequest(
-      ambariContext, topology, true, stackAdvisorBlueprintProcessor, configureSecurity));
+    addClusterConfigRequest(topology, new ClusterConfigurationRequest(ambariContext, topology, true,
+      stackAdvisorBlueprintProcessor, securityType == SecurityType.KERBEROS));
 
 
     // Notify listeners that cluster configuration finished
@@ -425,7 +434,7 @@
 
     Map<String, String> requestInfoProps = new HashMap<>();
     requestInfoProps.put(org.apache.ambari.server.controller.spi.Request.REQUEST_INFO_BODY_PROPERTY,
-      "{\"" + ArtifactResourceProvider.ARTIFACT_DATA_PROPERTY + "\": " + descriptor + "}");
+            "{\"" + ArtifactResourceProvider.ARTIFACT_DATA_PROPERTY + "\": " + descriptor + "}");
 
     org.apache.ambari.server.controller.spi.Request request = new RequestImpl(Collections.<String>emptySet(),
         Collections.singleton(properties), requestInfoProps, null);
@@ -476,20 +485,55 @@
 
     final Long requestId = ambariContext.getNextRequestId();
     LogicalRequest logicalRequest = RetryHelper.executeWithRetry(new Callable<LogicalRequest>() {
-        @Override
-        public LogicalRequest call() throws Exception {
-          LogicalRequest logicalRequest = processAndPersistTopologyRequest(request, topology, requestId);
+         @Override
+         public LogicalRequest call() throws Exception {
+           LogicalRequest logicalRequest = processAndPersistTopologyRequest(request, topology, requestId);
 
-          return logicalRequest;
-        }
-      }
+           return logicalRequest;
+         }
+       }
     );
-
     processRequest(request, topology, logicalRequest);
-
     return getRequestStatus(logicalRequest.getRequestId());
   }
 
+  public void removePendingHostRequests(String clusterName, long requestId) {
+    ensureInitialized();
+    LOG.info("TopologyManager.removePendingHostRequests: Entering");
+
+    long clusterId = 0;
+    try {
+      clusterId = ambariContext.getClusterId(clusterName);
+    } catch (AmbariException e) {
+      LOG.error("Unable to retrieve clusterId", e);
+      throw new IllegalArgumentException("Unable to retrieve clusterId");
+    }
+    ClusterTopology topology = clusterTopologyMap.get(clusterId);
+    if (topology == null) {
+      throw new IllegalArgumentException("Unable to retrieve cluster topology for cluster");
+    }
+
+    LogicalRequest logicalRequest = allRequests.get(requestId);
+    if (logicalRequest == null) {
+      throw new IllegalArgumentException("No Logical Request found for requestId: " + requestId);
+    }
+
+    Collection<HostRequest> pendingHostRequests = logicalRequest.removePendingHostRequests(null);
+
+    if (!logicalRequest.hasPendingHostRequests()) {
+      outstandingRequests.remove(logicalRequest);
+    }
+
+    persistedState.removeHostRequests(pendingHostRequests);
+
+    // set current host count to number of currently connected hosts
+    for (HostGroupInfo currentHostGroupInfo : topology.getHostGroupInfo().values()) {
+      currentHostGroupInfo.setRequestedCount(currentHostGroupInfo.getHostNames().size());
+    }
+
+    LOG.info("TopologyManager.removePendingHostRequests: Exit");
+  }
+
   /**
    * Creates and persists a {@see PersistedTopologyRequest} and a {@see LogicalRequest} for the provided
    * provision cluster request and topology.
@@ -928,7 +972,7 @@
 
       for (LogicalRequest logicalRequest : requestEntry.getValue()) {
         allRequests.put(logicalRequest.getRequestId(), logicalRequest);
-        if (!logicalRequest.hasCompleted()) {
+        if (logicalRequest.hasPendingHostRequests()) {
           outstandingRequests.add(logicalRequest);
           for (String reservedHost : logicalRequest.getReservedHosts()) {
             reservedHosts.put(reservedHost, logicalRequest);
@@ -959,6 +1003,7 @@
         }
       }
     }
+    LOG.info("TopologyManager.replayRequests: Exit");
   }
 
   /**
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequest.java
index 516ea14..4cadefa 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequest.java
@@ -18,7 +18,6 @@
 
 package org.apache.ambari.server.topology;
 
-import java.util.List;
 import java.util.Map;
 
 /**
@@ -28,21 +27,21 @@
   /**
    * Request types.
    */
-  public enum Type { PROVISION, SCALE, EXPORT }
+  enum Type { PROVISION, SCALE, EXPORT }
 
   /**
    * Get the cluster id associated with the request. Can be <code>null</code>.
    *
    * @return associated cluster id
    */
-  public Long getClusterId();
+  Long getClusterId();
 
   /**
    * Get the request type.
    *
    * @return the type of request
    */
-  public Type getType();
+  Type getType();
 
   //todo: only a single BP may be specified so all host groups have the same bp.
   //todo: BP really needs to be associated with the HostGroupInfo, even for create which will have a single BP
@@ -53,33 +52,26 @@
    *
    * @return associated blueprint instance
    */
-  public Blueprint getBlueprint();
+  Blueprint getBlueprint();
 
   /**
    * Get the cluster scoped configuration for the request.
    *
    * @return cluster scoped configuration
    */
-  public Configuration getConfiguration();
+  Configuration getConfiguration();
 
   /**
    * Get host group info.
    *
    * @return map of host group name to group info
    */
-  public Map<String, HostGroupInfo> getHostGroupInfo();
-
-  /**
-   * Get request topology validators.
-   *
-   * @return list of topology validators
-   */
-  public List<TopologyValidator> getTopologyValidators();
+  Map<String, HostGroupInfo> getHostGroupInfo();
 
   /**
    * Get request description.
    *
    * @return string description of the request
    */
-  public String getDescription();
+  String getDescription();
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequestFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequestFactory.java
index e78c5e0..8248536 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequestFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequestFactory.java
@@ -28,6 +28,6 @@
  */
 public interface TopologyRequestFactory {
 
-  public ProvisionClusterRequest createProvisionClusterRequest(Map<String, Object> properties, SecurityConfiguration securityConfiguration) throws InvalidTopologyTemplateException;
+  ProvisionClusterRequest createProvisionClusterRequest(Map<String, Object> properties, SecurityConfiguration securityConfiguration) throws InvalidTopologyTemplateException;
   // todo: use to create other request types
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyValidator.java
index 146b424..58e858c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyValidator.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyValidator.java
@@ -22,5 +22,5 @@
  * Performs topology validation.
  */
 public interface TopologyValidator {
-  public void validate(ClusterTopology topology) throws InvalidTopologyException;
+  void validate(ClusterTopology topology) throws InvalidTopologyException;
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/tasks/TopologyTask.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/tasks/TopologyTask.java
index 0753c3d..458191b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/tasks/TopologyTask.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/tasks/TopologyTask.java
@@ -26,7 +26,7 @@
   /**
    * Task type.
    */
-  public enum Type {
+  enum Type {
     RESOURCE_CREATION,
     CONFIGURE,
     INSTALL,
@@ -38,5 +38,5 @@
    *
    * @return the type of task
    */
-  public Type getType();
+  Type getType();
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/ChainedTopologyValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/ChainedTopologyValidator.java
new file mode 100644
index 0000000..8bcbcff
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/ChainedTopologyValidator.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.topology.validators;
+
+import java.util.List;
+
+import org.apache.ambari.server.topology.ClusterTopology;
+import org.apache.ambari.server.topology.InvalidTopologyException;
+import org.apache.ambari.server.topology.TopologyValidator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Topology validator wrapper implementation. Executes a set of validations by calling a preconfgured set of validator implementations.
+ */
+public class ChainedTopologyValidator implements TopologyValidator {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(ChainedTopologyValidator.class);
+  private List<TopologyValidator> validators;
+
+  public ChainedTopologyValidator(List<TopologyValidator> validators) {
+    this.validators = validators;
+  }
+
+  @Override
+  public void validate(ClusterTopology topology) throws InvalidTopologyException {
+    for (TopologyValidator validator : validators) {
+      LOGGER.info("Performing topology validation: {}", validator.getClass());
+      validator.validate(topology);
+    }
+  }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/HiveServiceValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/HiveServiceValidator.java
index 1351739..80b2593 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/HiveServiceValidator.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/HiveServiceValidator.java
@@ -59,7 +59,7 @@
     }
 
     // hive database settings need the mysql-server component in the blueprint
-    if (!topology.getBlueprint().getServices().contains(MYSQL_SERVER_COMPONENT)) {
+    if (!topology.getBlueprint().getComponents(HIVE_SERVICE).contains(MYSQL_SERVER_COMPONENT)) {
       String errorMessage = String.format("Component [%s] must explicitly be set in the blueprint when hive database " +
         "is configured with the current settings. HIVE service validation failed.", MYSQL_SERVER_COMPONENT);
       LOGGER.error(errorMessage);
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidator.java
new file mode 100644
index 0000000..759d9e9
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidator.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.topology.validators;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+
+import org.apache.ambari.server.controller.internal.Stack;
+import org.apache.ambari.server.state.PropertyInfo;
+import org.apache.ambari.server.topology.Blueprint;
+import org.apache.ambari.server.topology.ClusterTopology;
+import org.apache.ambari.server.topology.HostGroup;
+import org.apache.ambari.server.topology.InvalidTopologyException;
+import org.apache.ambari.server.topology.TopologyValidator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+/**
+ * Validates the configuration by checking the existence of required properties for the services listed in the blueprint.
+ * Required properties are specified in the stack and are tied to config types and services.
+ *
+ * The validator ignores password properties that should never be specified in the artifacts (blueprint / cluster creation template)
+ */
+public class RequiredConfigPropertiesValidator implements TopologyValidator {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(RequiredConfigPropertiesValidator.class);
+
+  /**
+   * Validates the configuration coming from the blueprint and cluster creation template and ensures that all the required properties are provided.
+   * It's expected, that a in hostrgroup containing components for a given service all required configuration for the given service is available.
+   *
+   * @param topology the topology instance holding the configuration for cluster provisioning
+   * @throws InvalidTopologyException when there are missing configuration types or properties related to services in the blueprint
+   */
+  @Override
+  public void validate(ClusterTopology topology) throws InvalidTopologyException {
+
+    // collect required properties
+    Map<String, Map<String, Collection<String>>> requiredPropertiesByService = getRequiredPropertiesByService(topology.getBlueprint());
+
+    // find missing properties in the cluster configuration
+    Map<String, Collection<String>> missingProperties = new HashMap<>();
+    Map<String, Map<String, String>> topologyConfiguration = new HashMap<>(topology.getConfiguration().getFullProperties(1));
+
+    for (HostGroup hostGroup : topology.getBlueprint().getHostGroups().values()) {
+      LOGGER.debug("Processing hostgroup configurations for hostgroup: {}", hostGroup.getName());
+
+      // copy of all configurations available in the topology hgConfig -> topologyConfig -> bpConfig
+      Map<String, Map<String, String>> operationalConfigurations = new HashMap<>(topologyConfiguration);
+
+      for (Map.Entry<String, Map<String, String>> hostgroupConfigEntry : hostGroup.getConfiguration().getProperties().entrySet()) {
+        if (operationalConfigurations.containsKey(hostgroupConfigEntry.getKey())) {
+          operationalConfigurations.get(hostgroupConfigEntry.getKey()).putAll(hostgroupConfigEntry.getValue());
+        } else {
+          operationalConfigurations.put(hostgroupConfigEntry.getKey(), hostgroupConfigEntry.getValue());
+        }
+      }
+
+      for (String hostGroupService : hostGroup.getServices()) {
+
+        if (!requiredPropertiesByService.containsKey(hostGroupService)) {
+          // there are no required properties for the service
+          LOGGER.debug("There are no required properties found for hostgroup/service: [{}/{}]", hostGroup.getName(), hostGroupService);
+          continue;
+        }
+
+        Map<String, Collection<String>> requiredPropertiesByType = requiredPropertiesByService.get(hostGroupService);
+
+        for (String configType : requiredPropertiesByType.keySet()) {
+
+          // We need a copy not to modify the original
+          Collection<String> requiredPropertiesForType = new HashSet(requiredPropertiesByType.get(configType));
+
+          if (!operationalConfigurations.containsKey(configType)) {
+            // all required configuration is missing for the config type
+            missingProperties = addTomissingProperties(missingProperties, hostGroup.getName(), requiredPropertiesForType);
+            continue;
+          }
+
+          Collection<String> operationalConfigsForType = operationalConfigurations.get(configType).keySet();
+          requiredPropertiesForType.removeAll(operationalConfigsForType);
+          if (!requiredPropertiesForType.isEmpty()) {
+            LOGGER.info("Found missing properties in hostgroup: {}, config type: {}, mising properties: {}", hostGroup.getName(),
+              configType, requiredPropertiesForType);
+            missingProperties = addTomissingProperties(missingProperties, hostGroup.getName(), requiredPropertiesForType);
+          }
+        }
+      }
+
+    }
+
+    if (!missingProperties.isEmpty()) {
+      throw new InvalidTopologyException("Missing required properties.  Specify a value for these " +
+        "properties in the blueprint or cluster creation template configuration. " + missingProperties);
+    }
+
+  }
+
+
+  /**
+   * Collects required properties for services in the blueprint. Configuration properties are returned by configuration type.
+   * service -> configType -> properties
+   *
+   * @param blueprint the blueprint from the cluster topology
+   * @return a map with configuration types mapped to collections of required property names
+   */
+
+  private Map<String, Map<String, Collection<String>>> getRequiredPropertiesByService(Blueprint blueprint) {
+
+    Map<String, Map<String, Collection<String>>> requiredPropertiesForServiceByType = new HashMap<>();
+
+    for (String bpService : blueprint.getServices()) {
+      LOGGER.debug("Collecting required properties for the service: {}", bpService);
+
+      Collection<Stack.ConfigProperty> requiredConfigsForService = blueprint.getStack().getRequiredConfigurationProperties(bpService);
+      Map<String, Collection<String>> requiredPropertiesByConfigType = new HashMap<>();
+
+      for (Stack.ConfigProperty configProperty : requiredConfigsForService) {
+
+        if (configProperty.getPropertyTypes() != null && configProperty.getPropertyTypes().contains(PropertyInfo.PropertyType.PASSWORD)) {
+          LOGGER.debug("Skipping required property validation for password type: {}", configProperty.getName());
+          // skip password types
+          continue;
+        }
+
+        // add / get  service related required propeByType map
+        if (requiredPropertiesForServiceByType.containsKey(bpService)) {
+          requiredPropertiesByConfigType = requiredPropertiesForServiceByType.get(bpService);
+        } else {
+          LOGGER.debug("Adding required properties entry for service: {}", bpService);
+          requiredPropertiesForServiceByType.put(bpService, requiredPropertiesByConfigType);
+        }
+
+        // add collection of required properties
+        Collection<String> requiredPropsForType = new HashSet<>();
+        if (requiredPropertiesByConfigType.containsKey(configProperty.getType())) {
+          requiredPropsForType = requiredPropertiesByConfigType.get(configProperty.getType());
+        } else {
+          LOGGER.debug("Adding required properties entry for configuration type: {}", configProperty.getType());
+          requiredPropertiesByConfigType.put(configProperty.getType(), requiredPropsForType);
+        }
+
+        requiredPropsForType.add(configProperty.getName());
+        LOGGER.debug("Added required property for service; {}, configuration type: {}, property: {}", bpService,
+          configProperty.getType(), configProperty.getName());
+      }
+    }
+
+    LOGGER.info("Identified required properties for blueprint services: {}", requiredPropertiesForServiceByType);
+    return requiredPropertiesForServiceByType;
+
+  }
+
+  private Map<String, Collection<String>> addTomissingProperties(Map<String, Collection<String>> missingProperties, String hostGroup, Collection<String> values) {
+    Map<String, Collection<String>> missing;
+
+    if (missingProperties == null) {
+      missing = new HashMap<>();
+    } else {
+      missing = new HashMap<>(missingProperties);
+    }
+
+    if (!missing.containsKey(hostGroup)) {
+      missing.put(hostGroup, new HashSet<String>());
+    }
+
+    missing.get(hostGroup).addAll(values);
+
+    return missing;
+  }
+
+
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredPasswordValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredPasswordValidator.java
index 591a124..5b4ecc1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredPasswordValidator.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredPasswordValidator.java
@@ -33,10 +33,10 @@
  */
 public class RequiredPasswordValidator implements TopologyValidator {
 
+  // todo remove the field as all the information is available in the topology being validated
   private String defaultPassword;
 
-  public RequiredPasswordValidator(String defaultPassword) {
-    this.defaultPassword = defaultPassword;
+  public RequiredPasswordValidator() {
   }
 
   /**
@@ -46,6 +46,8 @@
    *                                  default is specified via 'default_password'
    */
   public void validate(ClusterTopology topology) throws InvalidTopologyException {
+
+    defaultPassword = topology.getDefaultPassword();
     Map<String, Map<String, Collection<String>>> missingPasswords = validateRequiredPasswords(topology);
 
     if (! missingPasswords.isEmpty()) {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidator.java
new file mode 100644
index 0000000..f028a31
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidator.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.topology.validators;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.ambari.server.topology.ClusterTopology;
+import org.apache.ambari.server.topology.InvalidTopologyException;
+import org.apache.ambari.server.topology.TopologyValidator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Validates whether incoming config types (form the blueprint or the cluster creation template) are valid.
+ * A configuration type is considered valid if the stack based on which the cluster is to be created contains such a
+ * config type.
+ */
+public class StackConfigTypeValidator implements TopologyValidator {
+  private static final Logger LOGGER = LoggerFactory.getLogger(StackConfigTypeValidator.class);
+
+  public StackConfigTypeValidator() {
+  }
+
+  @Override
+  public void validate(ClusterTopology topology) throws InvalidTopologyException {
+
+    // get the config types form the request
+    Set<String> incomingConfigTypes = new HashSet<>(topology.getConfiguration().getAllConfigTypes());
+
+    if (incomingConfigTypes.isEmpty()) {
+      LOGGER.debug("No config types to be checked.");
+      return;
+    }
+
+    Set<String> stackConfigTypes = new HashSet<>(topology.getBlueprint().getStack().getConfiguration().getAllConfigTypes());
+
+    // remove all "valid" config types from the incoming set
+    incomingConfigTypes.removeAll(stackConfigTypes);
+
+    if (!incomingConfigTypes.isEmpty()) {
+      // there are config types in the request that are not in the stack
+      String message = String.format("The following config types are not defined in the stack: %s ", incomingConfigTypes);
+      LOGGER.error(message);
+      throw new InvalidTopologyException(message);
+    }
+  }
+}
+
+
+
+
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorFactory.java
new file mode 100644
index 0000000..5a6f64e
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorFactory.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.topology.validators;
+
+import java.util.List;
+
+import org.apache.ambari.server.topology.TopologyValidator;
+
+import com.google.common.collect.ImmutableList;
+
+public class TopologyValidatorFactory {
+  List<TopologyValidator> validators;
+
+  public TopologyValidatorFactory() {
+    validators = ImmutableList.of(new RequiredConfigPropertiesValidator(), new RequiredPasswordValidator(), new HiveServiceValidator(),
+      new StackConfigTypeValidator());
+  }
+
+  public TopologyValidator createConfigurationValidatorChain() {
+    return new ChainedTopologyValidator(validators);
+  }
+
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorService.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorService.java
new file mode 100644
index 0000000..425cf1e
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorService.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.topology.validators;
+
+import javax.inject.Inject;
+
+import org.apache.ambari.server.topology.ClusterTopology;
+import org.apache.ambari.server.topology.InvalidTopologyException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Service implementation dealing with topology validation.
+ * It's intended to manage cluster topology validation by grouping validators into different sets as it's imposed by the
+ * callee logic.
+ *
+ * Ideally this service should be used as instead of directly use validator implementations.
+ */
+public class TopologyValidatorService {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(TopologyValidatorService.class);
+
+  @Inject
+  private TopologyValidatorFactory topologyValidatorFactory;
+
+  public TopologyValidatorService() {
+  }
+
+  public void validateTopologyConfiguration(ClusterTopology clusterTopology) throws InvalidTopologyException {
+    LOGGER.info("Validating cluster topology: {}", clusterTopology);
+    topologyValidatorFactory.createConfigurationValidatorChain().validate(clusterTopology);
+  }
+
+}
+
+
+
+
+
+
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
index 3e1d3b8..6c59784 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
@@ -150,9 +150,6 @@
     registerCatalog(this);
   }
 
-  protected AbstractUpgradeCatalog() {
-  }
-
   /**
    * Every subclass needs to register itself
    */
@@ -695,7 +692,7 @@
     private final String description;
 
 
-    private ConfigUpdateType(String description) {
+    ConfigUpdateType(String description) {
       this.description = description;
     }
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
index 590a3e8..48cf5f6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
@@ -189,6 +189,8 @@
       catalogBinder.addBinding().to(UpgradeCatalog2402.class);
       catalogBinder.addBinding().to(UpgradeCatalog242.class);
       catalogBinder.addBinding().to(UpgradeCatalog250.class);
+      catalogBinder.addBinding().to(UpgradeCatalog251.class);
+      catalogBinder.addBinding().to(UpgradeCatalog252.class);
       catalogBinder.addBinding().to(UpgradeCatalog300.class);
       catalogBinder.addBinding().to(FinalUpgradeCatalog.class);
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
index 20f0d72..90854dd 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
@@ -87,9 +87,6 @@
     daoUtils = injector.getInstance(DaoUtils.class);
   }
 
-  protected UpgradeCatalog212() {
-  }
-
   // ----- UpgradeCatalog ----------------------------------------------------
 
   /**
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
index 36c4b24..1740e25 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
@@ -46,10 +46,12 @@
 import org.apache.ambari.server.orm.dao.AlertsDAO;
 import org.apache.ambari.server.orm.dao.ArtifactDAO;
 import org.apache.ambari.server.orm.dao.DaoUtils;
+import org.apache.ambari.server.orm.dao.ViewInstanceDAO;
 import org.apache.ambari.server.orm.entities.AlertCurrentEntity;
 import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
 import org.apache.ambari.server.orm.entities.AlertHistoryEntity;
 import org.apache.ambari.server.orm.entities.ArtifactEntity;
+import org.apache.ambari.server.orm.entities.ViewInstanceEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
@@ -62,6 +64,7 @@
 import org.apache.ambari.server.state.kerberos.KerberosPrincipalDescriptor;
 import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
 import org.apache.ambari.server.view.ViewArchiveUtility;
+import org.apache.ambari.server.view.ViewInstanceOperationHandler;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -111,6 +114,12 @@
   protected static final String HOST_COMPONENT_DESIREDSTATE_INDEX = "UQ_hcdesiredstate_name";
 
   @Inject
+  ViewInstanceDAO viewInstanceDAO;
+
+  @Inject
+  ViewInstanceOperationHandler viewInstanceOperationHandler;
+
+  @Inject
   protected ViewArchiveUtility archiveUtility;
 
   /**
@@ -191,8 +200,7 @@
     updateHadoopEnvConfigs();
     updateKafkaConfigs();
     updateHIVEInteractiveConfigs();
-    updateHiveLlapConfigs();
-    updateTablesForZeppelinViewRemoval();
+    unInstallAllZeppelinViews();
     updateZeppelinConfigs();
     updateAtlasConfigs();
     updateLogSearchConfigs();
@@ -486,56 +494,6 @@
     }
   }
 
-  protected void updateHiveLlapConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-          Set<String> installedServices = cluster.getServices().keySet();
-
-          if (installedServices.contains("HIVE")) {
-            Config hiveSite = cluster.getDesiredConfigByType(HIVE_INTERACTIVE_SITE);
-            if (hiveSite != null) {
-              Map<String, String> hiveSiteProperties = hiveSite.getProperties();
-              String schedulerDelay = hiveSiteProperties.get("hive.llap.task.scheduler.locality.delay");
-              if (schedulerDelay != null) {
-                // Property exists. Change to new default if set to -1.
-                if (schedulerDelay.length() != 0) {
-                  try {
-                    int schedulerDelayInt = Integer.parseInt(schedulerDelay);
-                    if (schedulerDelayInt == -1) {
-                      // Old default. Set to new default.
-                      updateConfigurationProperties(HIVE_INTERACTIVE_SITE, Collections
-                                                        .singletonMap("hive.llap.task.scheduler.locality.delay", "8000"), true,
-                                                    false);
-                    }
-                  } catch (NumberFormatException e) {
-                    // Invalid existing value. Set to new default.
-                    updateConfigurationProperties(HIVE_INTERACTIVE_SITE, Collections
-                                                      .singletonMap("hive.llap.task.scheduler.locality.delay", "8000"), true,
-                                                  false);
-                  }
-                }
-              }
-              updateConfigurationProperties(HIVE_INTERACTIVE_SITE,
-                                            Collections.singletonMap("hive.mapjoin.hybridgrace.hashtable", "true"), true,
-                                            false);
-              updateConfigurationProperties("tez-interactive-site",
-                                            Collections.singletonMap("tez.session.am.dag.submit.timeout.secs", "1209600"), true,
-                                            false);
-              // Explicitly skipping hive.llap.allow.permanent.fns during upgrades, since it's related to security,
-              // and we don't know if the value is set by the user or as a result of the previous default.
-            }
-          }
-        }
-      }
-    }
-  }
-
   protected void updateAMSConfigs() throws AmbariException {
     AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
     Clusters clusters = ambariManagementController.getClusters();
@@ -732,10 +690,19 @@
     }
   }
 
-  protected void updateTablesForZeppelinViewRemoval() throws SQLException {
-    dbAccessor.executeQuery("DELETE from viewinstance WHERE view_name='ZEPPELIN{1.0.0}'", true);
-    dbAccessor.executeQuery("DELETE from viewmain WHERE view_name='ZEPPELIN{1.0.0}'", true);
-    dbAccessor.executeQuery("DELETE from viewparameter WHERE view_name='ZEPPELIN{1.0.0}'", true);
+  protected void unInstallAllZeppelinViews(){
+    LOG.info("Removing all Zeppelin views.");
+    List<ViewInstanceEntity> viewInstanceList =  viewInstanceDAO.findAll();
+    for( ViewInstanceEntity viewInstanceEntity : viewInstanceList ){
+      if(viewInstanceEntity.getViewName().equalsIgnoreCase("ZEPPELIN{1.0.0}")){
+        LOG.info("Uninstalling zeppelin view : {}", viewInstanceEntity);
+        try {
+          viewInstanceOperationHandler.uninstallViewInstance(viewInstanceEntity);
+        }catch(Exception e){
+          LOG.error("Exception occurred while uninstalling view {}. Ignored for now.", viewInstanceEntity);
+        }
+      }
+    }
   }
 
   /**
@@ -1032,7 +999,6 @@
    *
    * @throws AmbariException
    */
-  private static final String HIVE_INTERACTIVE_SITE = "hive-interactive-site";
   private static final String HIVE_INTERACTIVE_ENV = "hive-interactive-env";
   private static final String HIVE_ENV = "hive-env";
   protected void updateHIVEInteractiveConfigs() throws AmbariException {
@@ -1043,26 +1009,6 @@
 
       if (clusterMap != null && !clusterMap.isEmpty()) {
         for (final Cluster cluster : clusterMap.values()) {
-          Config hiveInteractiveSite = cluster.getDesiredConfigByType(HIVE_INTERACTIVE_SITE);
-          if (hiveInteractiveSite != null) {
-            Map<String, String> newProperties = new HashMap<>();
-            newProperties.put("hive.auto.convert.join.noconditionaltask.size", "1000000000");
-
-            String llapRpcPortString = hiveInteractiveSite.getProperties().get("hive.llap.daemon.rpc.port");
-            if (StringUtils.isNotBlank(llapRpcPortString)) {
-              try {
-                int llapRpcPort = Integer.parseInt(llapRpcPortString);
-                if (llapRpcPort == 15001) {
-                  newProperties.put("hive.llap.daemon.rpc.port", "0");
-                  LOG.info("Updating HSI hive.llap.daemon.rpc.port to: 0");
-                }
-              } catch (NumberFormatException e) {
-                LOG.warn("Unable to parse llap.rpc.port as integer: " + llapRpcPortString);
-              }
-            }
-            updateConfigurationProperties(HIVE_INTERACTIVE_SITE, newProperties, true, true);
-          }
-
           Config hiveInteractiveEnv = cluster.getDesiredConfigByType(HIVE_INTERACTIVE_ENV);
           Config hiveEnv = cluster.getDesiredConfigByType(HIVE_ENV);
           if (hiveInteractiveEnv != null) {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
new file mode 100644
index 0000000..9255daf
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.upgrade;
+
+import java.sql.SQLException;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.SecurityType;
+import org.apache.commons.lang.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+
+/**
+ * The {@link UpgradeCatalog251} upgrades Ambari from 2.5.0 to 2.5.1.
+ */
+public class UpgradeCatalog251 extends AbstractUpgradeCatalog {
+
+  static final String HOST_ROLE_COMMAND_TABLE = "host_role_command";
+  static final String HRC_IS_BACKGROUND_COLUMN = "is_background";
+
+  protected static final String KAFKA_BROKER_CONFIG = "kafka-broker";
+
+  private static final String STAGE_TABLE = "stage";
+  private static final String REQUEST_TABLE = "request";
+  private static final String CLUSTER_HOST_INFO_COLUMN = "cluster_host_info";
+  private static final String REQUEST_ID_COLUMN = "request_id";
+
+
+  /**
+   * Logger.
+   */
+  private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog251.class);
+
+  /**
+   * Constructor.
+   *
+   * @param injector
+   */
+  @Inject
+  public UpgradeCatalog251(Injector injector) {
+    super(injector);
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public String getSourceVersion() {
+    return "2.5.0";
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public String getTargetVersion() {
+    return "2.5.1";
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  protected void executeDDLUpdates() throws AmbariException, SQLException {
+    addBackgroundColumnToHostRoleCommand();
+    moveClusterHostColumnFromStageToRequest();
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  protected void executePreDMLUpdates() throws AmbariException, SQLException {
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  protected void executeDMLUpdates() throws AmbariException, SQLException {
+    addNewConfigurationsFromXml();
+    updateKAFKAConfigs();
+  }
+
+  /**
+   * Ensure that the updates from Ambari 2.4.0 are applied in the event the initial version is
+   * Ambari 2.5.0, since this Kafka change failed to make it into Ambari 2.5.0.
+   *
+   * If the base version was before Ambari 2.5.0, this method should wind up doing nothing.
+   * @throws AmbariException
+   */
+  protected void updateKAFKAConfigs() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    Clusters clusters = ambariManagementController.getClusters();
+    if (clusters != null) {
+      Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
+      if (clusterMap != null && !clusterMap.isEmpty()) {
+        for (final Cluster cluster : clusterMap.values()) {
+          Set<String> installedServices = cluster.getServices().keySet();
+
+          if (installedServices.contains("KAFKA") && cluster.getSecurityType() == SecurityType.KERBEROS) {
+            Config kafkaBroker = cluster.getDesiredConfigByType(KAFKA_BROKER_CONFIG);
+            if (kafkaBroker != null) {
+              String listenersPropertyValue = kafkaBroker.getProperties().get("listeners");
+              if (StringUtils.isNotEmpty(listenersPropertyValue)) {
+                String newListenersPropertyValue = listenersPropertyValue.replaceAll("\\bPLAINTEXT\\b", "PLAINTEXTSASL");
+                if(!newListenersPropertyValue.equals(listenersPropertyValue)) {
+                  updateConfigurationProperties(KAFKA_BROKER_CONFIG, Collections.singletonMap("listeners", newListenersPropertyValue), true, false);
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+
+  /**
+   * Adds the {@value #HRC_IS_BACKGROUND_COLUMN} column to the
+   * {@value #HOST_ROLE_COMMAND_TABLE} table.
+   *
+   * @throws SQLException
+   */
+  private void addBackgroundColumnToHostRoleCommand() throws SQLException {
+    dbAccessor.addColumn(HOST_ROLE_COMMAND_TABLE,
+        new DBColumnInfo(HRC_IS_BACKGROUND_COLUMN, Short.class, null, 0, false));
+  }
+
+  /**
+   * Moves the {@value #CLUSTER_HOST_INFO_COLUMN} column from {@value #STAGE_TABLE} table to the
+   * {@value #REQUEST_TABLE} table
+   *
+   *
+   * @throws SQLException
+   */
+  private void moveClusterHostColumnFromStageToRequest() throws SQLException {
+    DBColumnInfo sourceColumn = new DBColumnInfo(CLUSTER_HOST_INFO_COLUMN, byte[].class, null, null, false);
+    DBColumnInfo targetColumn = new DBColumnInfo(CLUSTER_HOST_INFO_COLUMN, byte[].class, null, null, false);
+
+    dbAccessor.moveColumnToAnotherTable(STAGE_TABLE, sourceColumn, REQUEST_ID_COLUMN, REQUEST_TABLE, targetColumn,
+      REQUEST_ID_COLUMN, false);
+  }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
new file mode 100644
index 0000000..c25dec1
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.upgrade;
+
+import java.sql.SQLException;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
+
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+
+/**
+ * The {@link org.apache.ambari.server.upgrade.UpgradeCatalog252} upgrades Ambari from 2.5.1 to 2.5.2.
+ */
+public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
+
+  static final String CLUSTERCONFIG_TABLE = "clusterconfig";
+  static final String SERVICE_DELETED_COLUMN = "service_deleted";
+
+  /**
+   * Constructor.
+   *
+   * @param injector
+   */
+  @Inject
+  public UpgradeCatalog252(Injector injector) {
+    super(injector);
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public String getSourceVersion() {
+    return "2.5.1";
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public String getTargetVersion() {
+    return "2.5.2";
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  protected void executeDDLUpdates() throws AmbariException, SQLException {
+    addServiceDeletedColumnToClusterConfigTable();
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  protected void executePreDMLUpdates() throws AmbariException, SQLException {
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  protected void executeDMLUpdates() throws AmbariException, SQLException {
+  }
+
+  /**
+   * Adds the {@value #SERVICE_DELETED_COLUMN} column to the
+   * {@value #CLUSTERCONFIG_TABLE} table.
+   *
+   * @throws java.sql.SQLException
+   */
+  private void addServiceDeletedColumnToClusterConfigTable() throws SQLException {
+    dbAccessor.addColumn(CLUSTERCONFIG_TABLE,
+        new DBColumnInfo(SERVICE_DELETED_COLUMN, Short.class, null, 0, false));
+  }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
index d9b9b57..44fbd4d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
@@ -23,8 +23,11 @@
 import java.sql.Statement;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import javax.persistence.EntityManager;
 
@@ -36,6 +39,7 @@
 import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.internal.CalculatedStatus;
 import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
 import org.apache.ambari.server.orm.dao.DaoUtils;
 import org.apache.ambari.server.orm.dao.RequestDAO;
 import org.apache.ambari.server.orm.entities.RequestEntity;
@@ -43,11 +47,13 @@
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.RepositoryVersionState;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.springframework.jdbc.support.JdbcUtils;
 
+import com.google.common.collect.Sets;
 import com.google.inject.Inject;
 import com.google.inject.Injector;
 
@@ -67,10 +73,13 @@
   protected static final String CLUSTER_CONFIG_SELECTED_COLUMN = "selected";
   protected static final String CLUSTER_CONFIG_SELECTED_TIMESTAMP_COLUMN = "selected_timestamp";
   protected static final String CLUSTER_CONFIG_MAPPING_TABLE = "clusterconfigmapping";
+  protected static final String HOST_ROLE_COMMAND_TABLE = "host_role_command";
+  protected static final String HRC_OPS_DISPLAY_NAME_COLUMN = "ops_display_name";
 
   @Inject
   DaoUtils daoUtils;
 
+
   // ----- Constructors ------------------------------------------------------
 
   /**
@@ -110,8 +119,10 @@
    */
   @Override
   protected void executeDDLUpdates() throws AmbariException, SQLException {
+    addServiceComponentColumn();
     updateStageTable();
     updateClusterConfigurationTable();
+    addOpsDisplayNameColumnToHostRoleCommand();
   }
 
   protected void updateStageTable() throws SQLException {
@@ -139,6 +150,7 @@
     addNewConfigurationsFromXml();
     showHcatDeletedUserMessage();
     setStatusOfStagesAndRequests();
+    updateLogSearchConfigs();
   }
 
   protected void showHcatDeletedUserMessage() {
@@ -162,6 +174,16 @@
 
   }
 
+  /**
+   * Updates the {@code servicecomponentdesiredstate} table.
+   *
+   * @throws SQLException
+   */
+  protected void addServiceComponentColumn() throws SQLException {
+    dbAccessor.addColumn(UpgradeCatalog250.COMPONENT_TABLE,
+        new DBColumnInfo("repo_state", String.class, 255, RepositoryVersionState.INIT.name(), false));
+
+  }
   protected void setStatusOfStagesAndRequests() {
     executeInTransaction(new Runnable() {
       @Override
@@ -205,7 +227,7 @@
    * <ul>
    * <li>Adds the {@link #CLUSTER_CONFIG_SELECTED_COLUMN} to
    * {@link #CLUSTER_CONFIG_TABLE}.
-   * <li>Adds the {@link #CLUSTER_CONFIG_SELECTED_TIMESTAMP} to
+   * <li>Adds the {@link #CLUSTER_CONFIG_SELECTED_TIMESTAMP_COLUMN} to
    * {@link #CLUSTER_CONFIG_TABLE}.
    * </ul>
    */
@@ -274,4 +296,60 @@
     // the cluster configuration mapping table
     dbAccessor.dropTable(CLUSTER_CONFIG_MAPPING_TABLE);
   }
+
+  /**
+   * Adds the {@value #HRC_OPS_DISPLAY_NAME_COLUMN} column to the
+   * {@value #HOST_ROLE_COMMAND_TABLE} table.
+   *
+   * @throws SQLException
+   */
+  private void addOpsDisplayNameColumnToHostRoleCommand() throws SQLException {
+    dbAccessor.addColumn(HOST_ROLE_COMMAND_TABLE,
+        new DBAccessor.DBColumnInfo(HRC_OPS_DISPLAY_NAME_COLUMN, String.class, 255, null, true));
+  }
+
+  /**
+   * Updates Log Search configs.
+   *
+   * @throws AmbariException
+   */
+  protected void updateLogSearchConfigs() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    Clusters clusters = ambariManagementController.getClusters();
+    if (clusters != null) {
+      Map<String, Cluster> clusterMap = clusters.getClusters();
+
+      if (clusterMap != null && !clusterMap.isEmpty()) {
+        for (final Cluster cluster : clusterMap.values()) {
+          Collection<Config> configs = cluster.getAllConfigs();
+          for (Config config : configs) {
+            String configType = config.getType();
+            if (!configType.endsWith("-logsearch-conf")) {
+              continue;
+            }
+
+            Set<String> removeProperties = new HashSet<>();
+            removeProperties.add("service_name");
+            removeProperties.add("component_mappings");
+            removeProperties.add("content");
+
+            removeConfigurationPropertiesFromCluster(cluster, configType, removeProperties);
+          }
+          
+          Config logSearchProperties = cluster.getDesiredConfigByType("logsearch-properties");
+          Config logFeederProperties = cluster.getDesiredConfigByType("logfeeder-properties");
+          if (logSearchProperties != null && logFeederProperties != null) {
+            String defaultLogLevels = logSearchProperties.getProperties().get("logsearch.logfeeder.include.default.level");
+            
+            Set<String> removeProperties = Sets.newHashSet("logsearch.logfeeder.include.default.level");
+            removeConfigurationPropertiesFromCluster(cluster, "logsearch-properties", removeProperties);
+            
+            Map<String, String> newProperties = new HashMap<>();
+            newProperties.put("logfeeder.include.default.level", defaultLogLevels);
+            updateConfigurationPropertiesForCluster(cluster, "logfeeder-properties", newProperties, true, true);
+          }
+        }
+      }
+    }
+  }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java b/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
index 6a88aea..f184f37 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
@@ -194,20 +194,20 @@
     return requestStageIds;
   }
 
-  public static Stage getATestStage(long requestId, long stageId, String clusterHostInfo, String commandParamsStage, String hostParamsStage) {
+  public static Stage getATestStage(long requestId, long stageId, String commandParamsStage, String hostParamsStage) {
     String hostname;
     try {
       hostname = InetAddress.getLocalHost().getHostName();
     } catch (UnknownHostException e) {
       hostname = "host-dummy";
     }
-    return getATestStage(requestId, stageId, hostname, clusterHostInfo, commandParamsStage, hostParamsStage);
+    return getATestStage(requestId, stageId, hostname, commandParamsStage, hostParamsStage);
   }
 
   //For testing only
   @Inject
-  public static Stage getATestStage(long requestId, long stageId, String hostname, String clusterHostInfo, String commandParamsStage, String hostParamsStage) {
-    Stage s = stageFactory.createNew(requestId, "/tmp", "cluster1", 1L, "context", clusterHostInfo, commandParamsStage, hostParamsStage);
+  public static Stage getATestStage(long requestId, long stageId, String hostname, String commandParamsStage, String hostParamsStage) {
+    Stage s = stageFactory.createNew(requestId, "/tmp", "cluster1", 1L, "context", commandParamsStage, hostParamsStage);
     s.setStageId(stageId);
     long now = System.currentTimeMillis();
     s.addHostRoleExecutionCommand(hostname, Role.NAMENODE, RoleCommand.INSTALL,
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/view/ViewInstanceHandlerList.java b/ambari-server/src/main/java/org/apache/ambari/server/view/ViewInstanceHandlerList.java
index 8d2b52a..65c3771 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/view/ViewInstanceHandlerList.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/view/ViewInstanceHandlerList.java
@@ -32,12 +32,12 @@
    *
    * @throws SystemException if a handler the view instance can not be added
    */
-  public void addViewInstance(ViewInstanceEntity viewInstanceDefinition) throws SystemException;
+  void addViewInstance(ViewInstanceEntity viewInstanceDefinition) throws SystemException;
 
   /**
    * Remove the handler for the given view instance.
    *
    * @param viewInstanceDefinition  the view instance
    */
-  public void removeViewInstance(ViewInstanceEntity viewInstanceDefinition);
+  void removeViewInstance(ViewInstanceEntity viewInstanceDefinition);
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/view/ViewInstanceOperationHandler.java b/ambari-server/src/main/java/org/apache/ambari/server/view/ViewInstanceOperationHandler.java
new file mode 100644
index 0000000..56850f7
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/view/ViewInstanceOperationHandler.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.view;
+
+import java.util.List;
+
+import javax.inject.Inject;
+import javax.inject.Singleton;
+
+import org.apache.ambari.server.orm.dao.PrivilegeDAO;
+import org.apache.ambari.server.orm.dao.ViewDAO;
+import org.apache.ambari.server.orm.dao.ViewInstanceDAO;
+import org.apache.ambari.server.orm.entities.PrincipalEntity;
+import org.apache.ambari.server.orm.entities.PrivilegeEntity;
+import org.apache.ambari.server.orm.entities.ViewEntity;
+import org.apache.ambari.server.orm.entities.ViewInstanceEntity;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Singleton
+public class ViewInstanceOperationHandler {
+  private final static Logger LOG = LoggerFactory.getLogger(ViewInstanceOperationHandler.class);
+
+  @Inject
+  ViewDAO viewDAO;
+
+  /**
+   * View instance data access object.
+   */
+  @Inject
+  ViewInstanceDAO instanceDAO;
+
+  /**
+   * Privilege data access object.
+   */
+  @Inject
+  PrivilegeDAO privilegeDAO;
+
+  // remove a privilege entity.
+  private void removePrivilegeEntity(PrivilegeEntity privilegeEntity) {
+
+    PrincipalEntity principalEntity = privilegeEntity.getPrincipal();
+    if (principalEntity != null) {
+      principalEntity.removePrivilege(privilegeEntity);
+    }
+
+    privilegeDAO.remove(privilegeEntity);
+  }
+
+  public void uninstallViewInstance(ViewInstanceEntity instanceEntity) {
+    LOG.info("uninstalling ViewInstance : {} ", instanceEntity);
+    ViewEntity viewEntity = viewDAO.findByName(instanceEntity.getViewName());
+    LOG.info("viewEntity received corresponding to the view entity : {} ", viewEntity);
+
+    if (viewEntity != null) {
+      String instanceName = instanceEntity.getName();
+      String viewName = viewEntity.getCommonName();
+      String version = viewEntity.getVersion();
+
+      ViewInstanceEntity instanceDefinition = instanceDAO.findByName(instanceEntity.getViewName(), instanceEntity.getName()); //getInstanceDefinition(viewName, version, instanceName);
+      LOG.debug("view instance entity received from database : {}", instanceDefinition);
+      if (instanceDefinition != null) {
+        if (instanceDefinition.isXmlDriven()) {
+          throw new IllegalStateException("View instances defined via xml can't be deleted through api requests");
+        }
+        List<PrivilegeEntity> instancePrivileges = privilegeDAO.findByResourceId(instanceEntity.getResource().getId());
+        LOG.info("Removing privilege entities : {}", instancePrivileges);
+        for (PrivilegeEntity privilegeEntity : instancePrivileges) {
+          removePrivilegeEntity(privilegeEntity);
+        }
+        LOG.info("Deleting view instance : view name : {}, version : {}, instanceName : {}", viewName, version, instanceName);
+        instanceDAO.remove(instanceDefinition);
+      }else{
+        throw new IllegalStateException("View instance '" + instanceEntity.getName() + "' not found.");
+      }
+    }else{
+      throw new IllegalStateException("View '" + instanceEntity.getViewName() + "' not found corresponding to view instance '" + instanceEntity.getName() + "'");
+    }
+  }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/view/ViewRegistry.java b/ambari-server/src/main/java/org/apache/ambari/server/view/ViewRegistry.java
index 6104a8b..dd633b3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/view/ViewRegistry.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/view/ViewRegistry.java
@@ -329,6 +329,8 @@
   @Inject
   ViewURLDAO viewURLDAO;
 
+  @Inject
+  ViewInstanceOperationHandler viewInstanceOperationHandler;
   // ----- Constructors -----------------------------------------------------
 
   /**
@@ -556,6 +558,7 @@
   }
 
   /**
+   * TODO : This should move to {@link ViewInstanceOperationHandler}
    * Install the given view instance with its associated view.
    *
    * @param instanceEntity the view instance entity
@@ -661,35 +664,23 @@
    */
   @Transactional
   public void uninstallViewInstance(ViewInstanceEntity instanceEntity) throws IllegalStateException {
-    ViewEntity viewEntity = getDefinition(instanceEntity.getViewName());
-
-    if (viewEntity != null) {
-      String instanceName = instanceEntity.getName();
-      String viewName = viewEntity.getCommonName();
-      String version = viewEntity.getVersion();
-
-      if (getInstanceDefinition(viewName, version, instanceName) != null) {
-        if (instanceEntity.isXmlDriven()) {
-          throw new IllegalStateException("View instances defined via xml can't be deleted through api requests");
-        }
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Deleting view instance " + viewName + "/" +
-              version + "/" + instanceName);
-        }
-        List<PrivilegeEntity> instancePrivileges = privilegeDAO.findByResourceId(instanceEntity.getResource().getId());
-        for (PrivilegeEntity privilegeEntity : instancePrivileges) {
-          removePrivilegeEntity(privilegeEntity);
-        }
-        instanceDAO.remove(instanceEntity);
-        viewEntity.removeInstanceDefinition(instanceName);
-        removeInstanceDefinition(viewEntity, instanceName);
-
-        // remove the web app context
-        handlerList.removeViewInstance(instanceEntity);
-      }
+    try {
+      viewInstanceOperationHandler.uninstallViewInstance(instanceEntity);
+      updateCaches(instanceEntity);
+    } catch (IllegalStateException illegalStateExcpetion) {
+      LOG.error("Exception occurred while uninstalling view : {}", instanceEntity, illegalStateExcpetion);
+      throw illegalStateExcpetion;
     }
   }
 
+  private void updateCaches(ViewInstanceEntity instanceEntity) {
+    ViewEntity viewEntity = getDefinition(instanceEntity.getViewName());
+    viewEntity.removeInstanceDefinition(instanceEntity.getInstanceName());
+    removeInstanceDefinition(viewEntity, instanceEntity.getInstanceName());
+    // remove the web app context
+    handlerList.removeViewInstance(instanceEntity);
+  }
+
   /**
    * Remove the data entry keyed by the given key from the given instance entity.
    *
@@ -1621,18 +1612,6 @@
     }
   }
 
-  // remove a privilege entity.
-  private void removePrivilegeEntity(PrivilegeEntity privilegeEntity) {
-
-    PrincipalEntity principalEntity = privilegeEntity.getPrincipal();
-    if (principalEntity != null) {
-      principalEntity.removePrivilege(privilegeEntity);
-    }
-
-    privilegeDAO.remove(privilegeEntity);
-  }
-
-
   /**
    * Extract a view archive at the specified path
    *
@@ -1811,7 +1790,7 @@
   }
 
   // read a view archive
-  private void readViewArchive(ViewEntity viewDefinition,
+  private synchronized void readViewArchive(ViewEntity viewDefinition,
                                File archiveFile,
                                File extractedArchiveDirFile,
                                String serverVersion) {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/view/persistence/SchemaManagerFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/view/persistence/SchemaManagerFactory.java
index ae730d1..54598c2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/view/persistence/SchemaManagerFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/view/persistence/SchemaManagerFactory.java
@@ -32,5 +32,5 @@
    *
    * @return the schema manager
    */
-  public SchemaManager getSchemaManager(DatabaseSession session);
+  SchemaManager getSchemaManager(DatabaseSession session);
 }
diff --git a/ambari-server/src/main/package/rpm/postinstall.sh b/ambari-server/src/main/package/rpm/postinstall.sh
index 021a514..1e8e0f0 100644
--- a/ambari-server/src/main/package/rpm/postinstall.sh
+++ b/ambari-server/src/main/package/rpm/postinstall.sh
@@ -17,10 +17,6 @@
 
 INSTALL_HELPER="${RPM_INSTALL_PREFIX}/var/lib/ambari-server/install-helper.sh"
 
-AMBARI_SERVER_KEYS_FOLDER="${ROOT}/var/lib/ambari-server/keys"
-AMBARI_SERVER_KEYS_DB_FOLDER="${ROOT}/var/lib/ambari-server/keys/db"
-AMBARI_SERVER_NEWCERTS_FOLDER="${ROOT}/var/lib/ambari-server/keys/db/newcerts"
-
 case "$1" in
   1) # Action install
     if [ -f "$INSTALL_HELPER" ]; then
@@ -34,18 +30,4 @@
   ;;
 esac
 
-if [ -d "$AMBARI_SERVER_KEYS_FOLDER" ]
-then
-    chmod 700 "$AMBARI_SERVER_KEYS_FOLDER"
-    if [ -d "$AMBARI_SERVER_KEYS_DB_FOLDER" ]
-    then
-        chmod 700 "$AMBARI_SERVER_KEYS_DB_FOLDER"
-        if [ -d "$AMBARI_SERVER_NEWCERTS_FOLDER" ]
-        then
-            chmod 700 "$AMBARI_SERVER_NEWCERTS_FOLDER"
-
-        fi
-    fi
-fi
-
 exit 0
diff --git a/ambari-server/src/main/python/ambari-server.py b/ambari-server/src/main/python/ambari-server.py
index 87cc6c2..4f680cb 100755
--- a/ambari-server/src/main/python/ambari-server.py
+++ b/ambari-server/src/main/python/ambari-server.py
@@ -199,6 +199,12 @@
   start(args)
 
 
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def database_cleanup(args):
+  logger.info("Database cleanup.")
+  if args.silent:
+    stop(args)
+  db_cleanup(args)
 
 #
 # The Ambari Server status.
@@ -361,7 +367,7 @@
             ";".join([print_opt for print_opt, _ in optional_options]))
 
 @OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
-def init_parser_options(parser):
+def init_action_parser(action, parser):
   parser.add_option('-k', '--service-user-name', dest="svc_user",
                     default=None,
                     help="User account under which the Ambari Server service will run")
@@ -449,31 +455,58 @@
   # -h reserved for help
 
 @OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
-def init_parser_options(parser):
-  parser.add_option('-f', '--init-script-file', default=None,
-                    help="File with setup script")
-  parser.add_option('-r', '--drop-script-file', default=None,
-                    help="File with drop script")
-  parser.add_option('-u', '--upgrade-script-file', default=AmbariPath.get("/var/lib/"
-                                                           "ambari-server/resources/upgrade/ddl/"
-                                                           "Ambari-DDL-Postgres-UPGRADE-1.3.0.sql"),
-                    help="File with upgrade script")
-  parser.add_option('-t', '--upgrade-stack-script-file', default=AmbariPath.get("/var/lib/"
-                                                                 "ambari-server/resources/upgrade/dml/"
-                                                                 "Ambari-DML-Postgres-UPGRADE_STACK.sql"),
-                    help="File with stack upgrade script")
-  parser.add_option('-j', '--java-home', default=None,
-                    help="Use specified java_home.  Must be valid on all hosts")
-  parser.add_option("-v", "--verbose",
-                    action="store_true", dest="verbose", default=False,
-                    help="Print verbose status messages")
-  parser.add_option("-s", "--silent",
-                    action="store_true", dest="silent", default=False,
-                    help="Silently accepts default prompt values")
+def init_setup_parser_options(parser):
+  database_group = optparse.OptionGroup(parser, 'Database options (command need to include all options)')
+  database_group.add_option('--database', default=None, help="Database to use embedded|oracle|mysql|mssql|postgres|sqlanywhere", dest="dbms")
+  database_group.add_option('--databasehost', default=None, help="Hostname of database server", dest="database_host")
+  database_group.add_option('--databaseport', default=None, help="Database port", dest="database_port")
+  database_group.add_option('--databasename', default=None, help="Database/Service name or ServiceID",
+                            dest="database_name")
+  database_group.add_option('--databaseusername', default=None, help="Database user login", dest="database_username")
+  database_group.add_option('--databasepassword', default=None, help="Database user password", dest="database_password")
+  parser.add_option_group(database_group)
+
+  jdbc_group = optparse.OptionGroup(parser, 'JDBC options (command need to include all options)')
+  jdbc_group.add_option('--jdbc-driver', default=None, help="Specifies the path to the JDBC driver JAR file or archive " \
+                                                            "with all required files(jdbc jar, libraries and etc), for the " \
+                                                            "database type specified with the --jdbc-db option. " \
+                                                            "Used only with --jdbc-db option. Archive is supported only for" \
+                                                            " sqlanywhere database." ,
+                        dest="jdbc_driver")
+  jdbc_group.add_option('--jdbc-db', default=None, help="Specifies the database type [postgres|mysql|mssql|oracle|hsqldb|sqlanywhere] for the " \
+                                                        "JDBC driver specified with the --jdbc-driver option. Used only with --jdbc-driver option.",
+                        dest="jdbc_db")
+  parser.add_option_group(jdbc_group)
+
+  other_group = optparse.OptionGroup(parser, 'Other options')
+
+  other_group.add_option('-j', '--java-home', default=None,
+                         help="Use specified java_home.  Must be valid on all hosts")
+  other_group.add_option('--skip-view-extraction', action="store_true", default=False, help="Skip extraction of system views", dest="skip_view_extraction")
+  other_group.add_option('--postgresschema', default=None, help="Postgres database schema name",
+                         dest="postgres_schema")
+  other_group.add_option('--sqla-server-name', default=None, help="SQL Anywhere server name", dest="sqla_server_name")
+  other_group.add_option('--sidorsname', default="sname", help="Oracle database identifier type, Service ID/Service "
+                                                               "Name sid|sname", dest="sid_or_sname")
+
+  parser.add_option_group(other_group)
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_start_parser_options(parser):
   parser.add_option('-g', '--debug', action="store_true", dest='debug', default=False,
                     help="Start ambari-server in debug mode")
   parser.add_option('-y', '--suspend-start', action="store_true", dest='suspend_start', default=False,
                     help="Freeze ambari-server Java process at startup in debug mode")
+  parser.add_option('--skip-properties-validation', action="store_true", default=False, help="Skip properties file validation", dest="skip_properties_validation")
+  parser.add_option('--skip-database-check', action="store_true", default=False, help="Skip database consistency check", dest="skip_database_check")
+  parser.add_option('--auto-fix-database', action="store_true", default=False, help="Automatically fix database consistency issues", dest="fix_database_consistency")
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_empty_parser_options(parser):
+  pass
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_ldap_sync_parser_options(parser):
   parser.add_option('--all', action="store_true", default=False, help="LDAP sync all option.  Synchronize all LDAP users and groups.",
                     dest="ldap_sync_all")
   parser.add_option('--existing', action="store_true", default=False,
@@ -482,79 +515,11 @@
                     dest="ldap_sync_users")
   parser.add_option('--groups', default=None, help="LDAP sync groups option.  Specifies the path to a CSV file of group names to be synchronized.",
                     dest="ldap_sync_groups")
-  parser.add_option('--database', default=None, help="Database to use embedded|oracle|mysql|mssql|postgres|sqlanywhere", dest="dbms")
-  parser.add_option('--databasehost', default=None, help="Hostname of database server", dest="database_host")
-  parser.add_option('--databaseport', default=None, help="Database port", dest="database_port")
-  parser.add_option('--databasename', default=None, help="Database/Service name or ServiceID",
-                    dest="database_name")
-  parser.add_option('--postgresschema', default=None, help="Postgres database schema name",
-                    dest="postgres_schema")
-  parser.add_option('--databaseusername', default=None, help="Database user login", dest="database_username")
-  parser.add_option('--databasepassword', default=None, help="Database user password", dest="database_password")
-  parser.add_option('--sidorsname', default="sname", help="Oracle database identifier type, Service ID/Service "
-                                                          "Name sid|sname", dest="sid_or_sname")
-  parser.add_option('--sqla-server-name', default=None, help="SQL Anywhere server name", dest="sqla_server_name")
-  parser.add_option('--jdbc-driver', default=None, help="Specifies the path to the JDBC driver JAR file or archive " \
-                                                        "with all required files(jdbc jar, libraries and etc), for the " \
-                                                        "database type specified with the --jdbc-db option. " \
-                                                        "Used only with --jdbc-db option. Archive is supported only for" \
-                                                        " sqlanywhere database." ,
-                    dest="jdbc_driver")
-  parser.add_option('--jdbc-db', default=None, help="Specifies the database type [postgres|mysql|mssql|oracle|hsqldb|sqlanywhere] for the " \
-                                                    "JDBC driver specified with the --jdbc-driver option. Used only with --jdbc-driver option.",
-                    dest="jdbc_db")
-  parser.add_option('--cluster-name', default=None, help="Cluster name", dest="cluster_name")
-  parser.add_option('--version-display-name', default=None, help="Display name of desired repo version", dest="desired_repo_version")
-  parser.add_option('--skip-properties-validation', action="store_true", default=False, help="Skip properties file validation", dest="skip_properties_validation")
-  parser.add_option('--skip-database-check', action="store_true", default=False, help="Skip database consistency check", dest="skip_database_check")
-  parser.add_option('--skip-view-extraction', action="store_true", default=False, help="Skip extraction of system views", dest="skip_view_extraction")
-  parser.add_option('--auto-fix-database', action="store_true", default=False, help="Automatically fix database consistency issues", dest="fix_database_consistency")
-  parser.add_option('--force-version', action="store_true", default=False, help="Force version to current", dest="force_repo_version")
-  parser.add_option('--version', dest="stack_versions", default=None, action="append", type="string",
-                    help="Specify stack version that needs to be enabled. All other stacks versions will be disabled")
-  parser.add_option('--stack', dest="stack_name", default=None, type="string",
-                    help="Specify stack name for the stack versions that needs to be enabled")
-  parser.add_option("-d", "--from-date", dest="cleanup_from_date", default=None, type="string", help="Specify date for the cleanup process in 'yyyy-MM-dd' format")
-  add_parser_options('--mpack',
-      default=None,
-      help="Specify the path for management pack to be installed/upgraded",
-      dest="mpack_path",
-      parser=parser,
-      required_for_actions=[INSTALL_MPACK_ACTION, UPGRADE_MPACK_ACTION]
-  )
-  add_parser_options('--mpack-name',
-      default=None,
-      help="Specify the management pack name to be uninstalled",
-      dest="mpack_name",
-      parser=parser,
-      required_for_actions=[UNINSTALL_MPACK_ACTION]
-  )
-  add_parser_options('--purge',
-      action="store_true",
-      default=False,
-      help="Purge existing resources specified in purge-list",
-      dest="purge",
-      parser=parser,
-      optional_for_actions=[INSTALL_MPACK_ACTION]
-  )
-  purge_resources = ",".join([STACK_DEFINITIONS_RESOURCE_NAME, SERVICE_DEFINITIONS_RESOURCE_NAME, MPACKS_RESOURCE_NAME])
-  default_purge_resources = ",".join([STACK_DEFINITIONS_RESOURCE_NAME, MPACKS_RESOURCE_NAME])
-  add_parser_options('--purge-list',
-      default=default_purge_resources,
-      help="Comma separated list of resources to purge ({0}). By default ({1}) will be purged.".format(purge_resources, default_purge_resources),
-      dest="purge_list",
-      parser=parser,
-      optional_for_actions=[INSTALL_MPACK_ACTION]
-  )
-  add_parser_options('--force',
-      action="store_true",
-      default=False,
-      help="Force install management pack",
-      dest="force",
-      parser=parser,
-      optional_for_actions=[INSTALL_MPACK_ACTION]
-  )
+  parser.add_option('--ldap-sync-admin-name', default=None, help="Username for LDAP sync", dest="ldap_sync_admin_name")
+  parser.add_option('--ldap-sync-admin-password', default=None, help="Password for LDAP sync", dest="ldap_sync_admin_password")
 
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_ldap_setup_parser_options(parser):
   parser.add_option('--ldap-url', default=None, help="Primary url for LDAP", dest="ldap_url")
   parser.add_option('--ldap-secondary-url', default=None, help="Secondary url for LDAP", dest="ldap_secondary_url")
   parser.add_option('--ldap-ssl', default=None, help="Use SSL [true/false] for LDAP", dest="ldap_ssl")
@@ -570,29 +535,83 @@
   parser.add_option('--ldap-save-settings', action="store_true", default=None, help="Save without review for LDAP", dest="ldap_save_settings")
   parser.add_option('--ldap-referral', default=None, help="Referral method [follow/ignore] for LDAP", dest="ldap_referral")
   parser.add_option('--ldap-bind-anonym', default=None, help="Bind anonymously [true/false] for LDAP", dest="ldap_bind_anonym")
-  parser.add_option('--ldap-sync-admin-name', default=None, help="Username for LDAP sync", dest="ldap_sync_admin_name")
-  parser.add_option('--ldap-sync-admin-password', default=None, help="Password for LDAP sync", dest="ldap_sync_admin_password")
   parser.add_option('--ldap-sync-username-collisions-behavior', default=None, help="Handling behavior for username collisions [convert/skip] for LDAP sync", dest="ldap_sync_username_collisions_behavior")
 
-  parser.add_option('--truststore-type', default=None, help="Type of TrustStore (jks|jceks|pkcs12)", dest="trust_store_type")
-  parser.add_option('--truststore-path', default=None, help="Path of TrustStore", dest="trust_store_path")
-  parser.add_option('--truststore-password', default=None, help="Password for TrustStore", dest="trust_store_password")
-  parser.add_option('--truststore-reconfigure', action="store_true", default=None, help="Force to reconfigure TrustStore if exits", dest="trust_store_reconfigure")
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_set_current_parser_options(parser):
+  parser.add_option('--cluster-name', default=None, help="Cluster name", dest="cluster_name")
+  parser.add_option('--version-display-name', default=None, help="Display name of desired repo version", dest="desired_repo_version")
+  parser.add_option('--force-version', action="store_true", default=False, help="Force version to current", dest="force_repo_version")
 
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_setup_security_parser_options(parser):
   parser.add_option('--security-option', default=None,
                     help="Setup security option (setup-https|encrypt-password|setup-kerberos-jaas|setup-truststore|import-certificate)",
                     dest="security_option")
-  parser.add_option('--api-ssl', default=None, help="Enable SSL for Ambari API [true/false]", dest="api_ssl")
-  parser.add_option('--api-ssl-port', default=None, help="Client API SSL port", dest="api_ssl_port")
-  parser.add_option('--import-cert-path', default=None, help="Path to Certificate (import)", dest="import_cert_path")
-  parser.add_option('--import-cert-alias', default=None, help="Alias for the imported certificate", dest="import_cert_alias")
-  parser.add_option('--import-key-path', default=None, help="Path to Private Key (import)", dest="import_key_path")
-  parser.add_option('--pem-password', default=None, help="Password for Private Key", dest="pem_password")
-  parser.add_option('--master-key', default=None, help="Master key for encrypting passwords", dest="master_key")
-  parser.add_option('--master-key-persist', default=None, help="Persist master key [true/false]", dest="master_key_persist")
-  parser.add_option('--jaas-principal', default=None, help="Kerberos principal for ambari server", dest="jaas_principal")
-  parser.add_option('--jaas-keytab', default=None, help="Keytab path for Kerberos principal", dest="jaas_keytab")
 
+  https_group = optparse.OptionGroup(parser, "setup-https options")
+  https_group.add_option('--api-ssl', default=None, help="Enable SSL for Ambari API [true/false]", dest="api_ssl")
+  https_group.add_option('--api-ssl-port', default=None, help="Client API SSL port", dest="api_ssl_port")
+  https_group.add_option('--import-key-path', default=None, help="Path to Private Key (import)", dest="import_key_path")
+  https_group.add_option('--pem-password', default=None, help="Password for Private Key", dest="pem_password")
+  parser.add_option_group(https_group)
+
+  encrypt_passwords_group = optparse.OptionGroup(parser, "encrypt-passwords options")
+  encrypt_passwords_group.add_option('--master-key', default=None, help="Master key for encrypting passwords", dest="master_key")
+  encrypt_passwords_group.add_option('--master-key-persist', default=None, help="Persist master key [true/false]", dest="master_key_persist")
+  parser.add_option_group(encrypt_passwords_group)
+
+  setup_kerberos_jaas_group = optparse.OptionGroup(parser, "setup-kerberos-jaas options")
+  setup_kerberos_jaas_group.add_option('--jaas-principal', default=None, help="Kerberos principal for ambari server", dest="jaas_principal")
+  setup_kerberos_jaas_group.add_option('--jaas-keytab', default=None, help="Keytab path for Kerberos principal", dest="jaas_keytab")
+  parser.add_option_group(setup_kerberos_jaas_group)
+
+  setup_truststore_group = optparse.OptionGroup(parser, "setup-truststore options, uses encrypt-passwords options if configured")
+  setup_truststore_group.add_option('--truststore-type', default=None, help="Type of TrustStore (jks|jceks|pkcs12)", dest="trust_store_type")
+  setup_truststore_group.add_option('--truststore-path', default=None, help="Path of TrustStore", dest="trust_store_path")
+  setup_truststore_group.add_option('--truststore-password', default=None, help="Password for TrustStore", dest="trust_store_password")
+  setup_truststore_group.add_option('--truststore-reconfigure', action="store_true", default=None, help="Force to reconfigure TrustStore if exits", dest="trust_store_reconfigure")
+  parser.add_option_group(setup_truststore_group)
+
+  import_certificate_group = optparse.OptionGroup(parser, "import-certificate options, uses --truststore-path option")
+  import_certificate_group.add_option('--import-cert-path', default=None, help="Path to Certificate (import)", dest="import_cert_path")
+  import_certificate_group.add_option('--import-cert-alias', default=None, help="Alias for the imported certificate", dest="import_cert_alias")
+  parser.add_option_group(import_certificate_group)
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_enable_stack_parser_options(parser):
+  parser.add_option('--version', dest="stack_versions", default=None, action="append", type="string",
+                    help="Specify stack version that needs to be enabled. All other stacks versions will be disabled")
+  parser.add_option('--stack', dest="stack_name", default=None, type="string",
+                    help="Specify stack name for the stack versions that needs to be enabled")
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_db_cleanup_parser_options(parser):
+  parser.add_option('--cluster-name', default=None, help="Cluster name", dest="cluster_name")
+  parser.add_option("-d", "--from-date", dest="cleanup_from_date", default=None, type="string", help="Specify date for the cleanup process in 'yyyy-MM-dd' format")
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_install_mpack_parser_options(parser):
+  parser.add_option('--mpack', default=None, help="Specify the path for management pack to be installed", dest="mpack_path")
+  parser.add_option('--purge', action="store_true", default=False, help="Purge existing resources specified in purge-list", dest="purge")
+  purge_resources = ",".join([STACK_DEFINITIONS_RESOURCE_NAME, SERVICE_DEFINITIONS_RESOURCE_NAME, MPACKS_RESOURCE_NAME])
+  default_purge_resources = ",".join([STACK_DEFINITIONS_RESOURCE_NAME, MPACKS_RESOURCE_NAME])
+
+  parser.add_option('--purge-list', default=default_purge_resources,
+                    help="Comma separated list of resources to purge ({0}). By default ({1}) will be purged.".format(purge_resources, default_purge_resources),
+                    dest="purge_list")
+  parser.add_option('--force', action="store_true", default=False, help="Force install management pack", dest="force")
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_uninstall_mpack_parser_options(parser):
+  parser.add_option('--mpack-name', default=None, help="Specify the management pack name to be uninstalled", dest="mpack_name")
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_upgrade_mpack_parser_options(parser):
+  parser.add_option('--mpack', default=None, help="Specify the path for management pack to be updated", dest="mpack_path")
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_kerberos_setup_parser_options(parser):
   parser.add_option('--kerberos-setup', default=None, help="Setup Kerberos Authentication", dest="kerberos_setup")
   parser.add_option('--kerberos-enabled', default=False, help="Kerberos enabled", dest="kerberos_enabled")
   parser.add_option('--kerberos-spnego-principal', default="HTTP/_HOST", help="Kerberos SPNEGO principal", dest="kerberos_spnego_principal")
@@ -759,7 +778,7 @@
         CHECK_DATABASE_ACTION: UserAction(check_database, options),
         ENABLE_STACK_ACTION: UserAction(enable_stack, options, args),
         SETUP_SSO_ACTION: UserActionRestart(setup_sso, options),
-        DB_CLEANUP_ACTION: UserAction(db_cleanup, options),
+        DB_CLEANUP_ACTION: UserAction(database_cleanup, options),
         INSTALL_MPACK_ACTION: UserAction(install_mpack, options),
         UNINSTALL_MPACK_ACTION: UserAction(uninstall_mpack, options),
         UPGRADE_MPACK_ACTION: UserAction(upgrade_mpack, options),
@@ -768,6 +787,46 @@
       }
   return action_map
 
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_action_parser(action, parser):
+  action_parser_map = {
+    SETUP_ACTION: init_setup_parser_options,
+    SETUP_JCE_ACTION: init_empty_parser_options,
+    START_ACTION: init_start_parser_options,
+    STOP_ACTION: init_empty_parser_options,
+    RESTART_ACTION: init_start_parser_options,
+    RESET_ACTION: init_empty_parser_options,
+    STATUS_ACTION: init_empty_parser_options,
+    UPGRADE_ACTION: init_empty_parser_options,
+    UPGRADE_STACK_ACTION:init_empty_parser_options,
+    LDAP_SETUP_ACTION: init_ldap_setup_parser_options,
+    LDAP_SYNC_ACTION: init_ldap_sync_parser_options,
+    SET_CURRENT_ACTION: init_set_current_parser_options,
+    SETUP_SECURITY_ACTION: init_setup_security_parser_options,
+    REFRESH_STACK_HASH_ACTION: init_empty_parser_options,
+    BACKUP_ACTION: init_empty_parser_options,
+    RESTORE_ACTION: init_empty_parser_options,
+    UPDATE_HOST_NAMES_ACTION: init_empty_parser_options,
+    CHECK_DATABASE_ACTION: init_empty_parser_options,
+    ENABLE_STACK_ACTION: init_enable_stack_parser_options,
+    SETUP_SSO_ACTION: init_empty_parser_options,
+    DB_CLEANUP_ACTION: init_db_cleanup_parser_options,
+    INSTALL_MPACK_ACTION: init_install_mpack_parser_options,
+    UNINSTALL_MPACK_ACTION: init_uninstall_mpack_parser_options,
+    UPGRADE_MPACK_ACTION: init_upgrade_mpack_parser_options,
+    PAM_SETUP_ACTION: init_empty_parser_options,
+    KERBEROS_SETUP_ACTION: init_kerberos_setup_parser_options,
+  }
+  parser.add_option("-v", "--verbose",
+                    action="store_true", dest="verbose", default=False,
+                    help="Print verbose status messages")
+  parser.add_option("-s", "--silent",
+                    action="store_true", dest="silent", default=False,
+                    help="Silently accepts default prompt values. For db-cleanup command, silent mode will stop ambari server.")
+  try:
+    action_parser_map[action](parser)
+  except KeyError:
+    parser.error("Invalid action: " + action)
 
 def setup_logging(logger, filename, logging_level):
   formatter = logging.Formatter(formatstr)
@@ -819,16 +878,6 @@
 
   options.warnings = []
 
-  if are_cmd_line_db_args_blank(options):
-    options.must_set_database_options = True
-  elif not are_cmd_line_db_args_valid(options):
-    parser.error('All database options should be set. Please see help for the options.')
-  else:
-    options.must_set_database_options = False
-
-  #correct database
-  fix_database_options(options, parser)
-
   if len(args) == 0:
     print parser.print_help()
     parser.error("No action entered")
@@ -842,6 +891,17 @@
   except KeyError:
     parser.error("Invalid action: " + action)
 
+  if action == SETUP_ACTION:
+    if are_cmd_line_db_args_blank(options):
+      options.must_set_database_options = True
+    elif not are_cmd_line_db_args_valid(options):
+      parser.error('All database options should be set. Please see help for the options.')
+    else:
+      options.must_set_database_options = False
+
+    #correct database
+    fix_database_options(options, parser)
+
   matches = 0
   for args_number_required in action_obj.possible_args_numbers:
     matches += int(len(args) == args_number_required)
@@ -894,8 +954,9 @@
     sys.exit(options.exit_code)
 
 def mainBody():
-  parser = optparse.OptionParser(usage="usage: %prog [options] action [stack_id os]",)
-  init_parser_options(parser)
+  parser = optparse.OptionParser(usage="usage: %prog action [options]",)
+  action = sys.argv[1]
+  init_action_parser(action, parser)
   (options, args) = parser.parse_args()
 
   # check if only silent key set
diff --git a/ambari-server/src/main/python/ambari_server/dbCleanup.py b/ambari-server/src/main/python/ambari_server/dbCleanup.py
index abc8267..6e16bc5 100644
--- a/ambari-server/src/main/python/ambari_server/dbCleanup.py
+++ b/ambari-server/src/main/python/ambari_server/dbCleanup.py
@@ -42,25 +42,29 @@
     if validate_args(options):
         return 1
 
-    db_title = get_db_type(get_ambari_properties()).title
-
-    confirmBackup = get_YN_input("Ambari Server configured for {0}. Confirm you have made a backup of the Ambari Server database [y/n]".format(
-            db_title), True)
-    if not confirmBackup:
-        print_info_msg("Ambari Server Database cleanup aborted")
-        return 0
-
     status, stateDesc = is_server_runing()
-    if status:
-        print_error_msg("The database cleanup cannot proceed while Ambari Server is running. Please shut down Ambari first.")
-        return 1
 
-    confirm = get_YN_input(
-        "Ambari server is using db type {0}. Cleanable database entries older than {1} will be cleaned up. Proceed [y/n]".format(
-            db_title, options.cleanup_from_date), True)
-    if not confirm:
-        print_info_msg("Ambari Server Database cleanup aborted")
-        return 0
+    if not options.silent:
+      db_title = get_db_type(get_ambari_properties()).title
+
+      confirmBackup = get_YN_input("Ambari Server configured for {0}. Confirm you have made a backup of the Ambari Server database [y/n]".format(
+              db_title), True)
+      if not confirmBackup:
+          print_info_msg("Ambari Server Database cleanup aborted")
+          return 0
+
+      if status:
+          print_error_msg("The database cleanup cannot proceed while Ambari Server is running. Please shut down Ambari first.")
+          return 1
+
+      confirm = get_YN_input(
+          "Ambari server is using db type {0}. Cleanable database entries older than {1} will be cleaned up. Proceed [y/n]".format(
+              db_title, options.cleanup_from_date), True)
+      if not confirm:
+          print_info_msg("Ambari Server Database cleanup aborted")
+          return 0
+
+
 
     jdk_path = get_java_exe_path()
     if jdk_path is None:
@@ -101,7 +105,6 @@
 # Database cleanup
 #
 def db_cleanup(options):
-    logger.info("Database cleanup.")
     return run_db_cleanup(options)
 
 
diff --git a/ambari-server/src/main/python/ambari_server/resourceFilesKeeper.py b/ambari-server/src/main/python/ambari_server/resourceFilesKeeper.py
index dba6833..b41c400 100644
--- a/ambari-server/src/main/python/ambari_server/resourceFilesKeeper.py
+++ b/ambari-server/src/main/python/ambari_server/resourceFilesKeeper.py
@@ -166,11 +166,17 @@
   def update_directory_archive(self, directory):
     """
     If hash sum for directory is not present or differs from saved value,
-    recalculates hash sum and creates directory archive
+    recalculates hash sum and creates directory archive. The archive is
+    also created if the existing archive does not exist, even if the
+    saved and current hash sums are matching.
     """
     skip_empty_directory = True
+
     cur_hash = self.count_hash_sum(directory)
     saved_hash = self.read_hash_sum(directory)
+
+    directory_archive_name = os.path.join(directory, self.ARCHIVE_NAME)
+
     if cur_hash != saved_hash:
       if not self.nozip:
         self.zip_directory(directory, skip_empty_directory)
@@ -180,6 +186,8 @@
       else:
         self.write_hash_sum(directory, cur_hash)
       pass
+    elif not os.path.isfile(directory_archive_name):
+      self.zip_directory(directory, skip_empty_directory)
 
   def count_hash_sum(self, directory):
     """
@@ -307,4 +315,3 @@
 
 if __name__ == '__main__':
   main(sys.argv)
-
diff --git a/ambari-server/src/main/python/ambari_server/serverConfiguration.py b/ambari-server/src/main/python/ambari_server/serverConfiguration.py
index 7eba9db..4780338 100644
--- a/ambari-server/src/main/python/ambari_server/serverConfiguration.py
+++ b/ambari-server/src/main/python/ambari_server/serverConfiguration.py
@@ -555,6 +555,7 @@
       (AmbariPath.get("/var/lib/ambari-server/resources/stacks/HDP/2.1.GlusterFS/services/STORM/package/files/wordCount.jar"), "644", "{0}", False),
       (AmbariPath.get("/var/lib/ambari-server/resources/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar"), "644", "{0}", False),
       (AmbariPath.get("/var/lib/ambari-server/resources/stacks/HDP/2.1/services/SMARTSENSE/package/files/view/smartsense-ambari-view-1.4.0.0.60.jar"), "644", "{0}", False),
+      (AmbariPath.get("/var/lib/ambari-server/resources/stacks/HDP/3.0/hooks/before-START/files/fast-hdfs-resource.jar"), "644", "{0}", False),
       # Also, /etc/ambari-server/conf/password.dat
       # is generated later at store_password_file
     ]
diff --git a/ambari-server/src/main/python/ambari_server/serverUpgrade.py b/ambari-server/src/main/python/ambari_server/serverUpgrade.py
index 6f17900..160c91d 100644
--- a/ambari-server/src/main/python/ambari_server/serverUpgrade.py
+++ b/ambari-server/src/main/python/ambari_server/serverUpgrade.py
@@ -522,7 +522,28 @@
     print_error_msg(err)
     raise FatalException(1, err)
 
-  services = glob.glob(os.path.join(resources_dir,"stacks","*","*","services","*"))
+  stack_services_search_path = os.path.join("stacks","*","*","services","*")
+  stack_old_dir_name = "stacks_*.old"
+  stack_backup_services_search_path = os.path.join("*","*","services","*")
+  stack_old_dir_mask = r'/stacks.*old/'
+  stack_base_service_dir = '/stacks/'
+
+  find_and_copy_custom_services(resources_dir, stack_services_search_path, stack_old_dir_name,
+                                stack_backup_services_search_path, stack_old_dir_mask, stack_base_service_dir)
+
+  common_services_search_path = os.path.join("common-services","*")
+  common_old_dir_name = "common-services_*.old"
+  common_backup_services_search_path = "*"
+  common_old_dir_mask = r'/common-services.*old'
+  common_base_service_dir = '/common-services/'
+
+  find_and_copy_custom_services(resources_dir, common_services_search_path, common_old_dir_name,
+                                common_backup_services_search_path, common_old_dir_mask, common_base_service_dir)
+
+
+def find_and_copy_custom_services(resources_dir, services_search_path, old_dir_name, backup_services_search_path,
+                                    old_dir_mask, base_service_dir):
+  services = glob.glob(os.path.join(resources_dir, services_search_path))
   managed_services = []
   for service in services:
     if os.path.isdir(service) and not os.path.basename(service) in managed_services:
@@ -530,15 +551,15 @@
   # add deprecated managed services
   managed_services.extend(["NAGIOS","GANGLIA","MAPREDUCE","WEBHCAT"])
 
-  stack_backup_dirs = glob.glob(os.path.join(resources_dir,"stacks_*.old"))
+  stack_backup_dirs = glob.glob(os.path.join(resources_dir, old_dir_name))
   if stack_backup_dirs:
     last_backup_dir = max(stack_backup_dirs, key=os.path.getctime)
-    backup_services = glob.glob(os.path.join(last_backup_dir,"*","*","services","*"))
+    backup_services = glob.glob(os.path.join(last_backup_dir, backup_services_search_path))
 
-    regex = re.compile(r'/stacks.*old/')
+    regex = re.compile(old_dir_mask)
     for backup_service in backup_services:
       backup_base_service_dir = os.path.dirname(backup_service)
-      current_base_service_dir = regex.sub('/stacks/', backup_base_service_dir)
+      current_base_service_dir = regex.sub(base_service_dir, backup_base_service_dir)
       # if services dir does not exists, we do not manage this stack
       if not os.path.exists(current_base_service_dir):
         continue
diff --git a/ambari-server/src/main/python/ambari_server/setupMpacks.py b/ambari-server/src/main/python/ambari_server/setupMpacks.py
index 04b96f5..625e428 100755
--- a/ambari-server/src/main/python/ambari_server/setupMpacks.py
+++ b/ambari-server/src/main/python/ambari_server/setupMpacks.py
@@ -29,7 +29,7 @@
 from ambari_commons.exceptions import FatalException
 from ambari_commons.inet_utils import download_file
 from ambari_commons.logging_utils import print_info_msg, print_error_msg, print_warning_msg
-from ambari_commons.os_utils import copy_file, run_os_command
+from ambari_commons.os_utils import copy_file, run_os_command, change_owner, set_file_permissions
 from ambari_server.serverConfiguration import get_ambari_properties, get_ambari_version, get_stack_location, \
   get_common_services_location, get_mpacks_staging_location, get_server_temp_location, get_extension_location, \
   get_java_exe_path, read_ambari_user, parse_properties_file, JDBC_DATABASE_PROPERTY, get_dashboard_location
@@ -243,6 +243,7 @@
   """
   if force and os.path.islink(dest_link):
     sudo.unlink(dest_link)
+
   sudo.symlink(src_path, dest_link)
   print_info_msg("Symlink: " + dest_link)
 
@@ -713,29 +714,44 @@
     _execute_hook(mpack_metadata, BEFORE_INSTALL_HOOK_NAME, tmp_root_dir)
 
   # Purge previously installed stacks and management packs
-  if options.purge and options.purge_list:
+  if not is_upgrade and options.purge and options.purge_list:
     purge_resources = options.purge_list.split(",")
     validate_purge(options, purge_resources, tmp_root_dir, mpack_metadata, replay_mode)
     purge_stacks_and_mpacks(purge_resources, replay_mode)
 
+  adjust_ownership_list = []
+  change_ownership_list = []
+
   # Get ambari mpack properties
   stack_location, extension_location, service_definitions_location, mpacks_staging_location, dashboard_location = get_mpack_properties()
   mpacks_cache_location = os.path.join(mpacks_staging_location, MPACKS_CACHE_DIRNAME)
   # Create directories
   if not os.path.exists(stack_location):
     sudo.makedir(stack_location, 0755)
+  adjust_ownership_list.append((stack_location, "0755", "{0}", True))
+  change_ownership_list.append((stack_location,"{0}",True))
   if not os.path.exists(extension_location):
     sudo.makedir(extension_location, 0755)
+  adjust_ownership_list.append((extension_location, "0755", "{0}", True))
+  change_ownership_list.append((extension_location,"{0}",True))
   if not os.path.exists(service_definitions_location):
     sudo.makedir(service_definitions_location, 0755)
+  adjust_ownership_list.append((service_definitions_location, "0755", "{0}", True))
+  change_ownership_list.append((service_definitions_location,"{0}",True))
   if not os.path.exists(mpacks_staging_location):
     sudo.makedir(mpacks_staging_location, 0755)
+  adjust_ownership_list.append((mpacks_staging_location, "0755", "{0}", True))
+  change_ownership_list.append((mpacks_staging_location,"{0}",True))
   if not os.path.exists(mpacks_cache_location):
     sudo.makedir(mpacks_cache_location, 0755)
+  adjust_ownership_list.append((mpacks_cache_location, "0755", "{0}", True))
+  change_ownership_list.append((mpacks_cache_location,"{0}",True))
   if not os.path.exists(dashboard_location):
     sudo.makedir(dashboard_location, 0755)
     sudo.makedir(os.path.join(dashboard_location, GRAFANA_DASHBOARDS_DIRNAME), 0755)
     sudo.makedir(os.path.join(dashboard_location, SERVICE_METRICS_DIRNAME), 0755)
+  adjust_ownership_list.append((dashboard_location, "0755", "{0}", True))
+  change_ownership_list.append((dashboard_location,"{0}",True))
 
   # Stage management pack (Stage at /var/lib/ambari-server/resources/mpacks/mpack_name-mpack_version)
   mpack_name = mpack_metadata.name
@@ -780,7 +796,27 @@
     else:
       print_info_msg("Unknown artifact {0} of type {1}".format(artifact_name, artifact_type))
 
-  print_info_msg("Management pack {0}-{1} successfully installed!".format(mpack_name, mpack_version))
+  ambari_user = read_ambari_user()
+
+  if ambari_user:
+     # This is required when a non-admin user is configured to setup ambari-server
+    print_info_msg("Adjusting file permissions and ownerships")
+    for pack in adjust_ownership_list:
+      file = pack[0]
+      mod = pack[1]
+      user = pack[2].format(ambari_user)
+      recursive = pack[3]
+      logger.info("Setting file permissions: {0} {1} {2} {3}".format(file, mod, user, recursive))
+      set_file_permissions(file, mod, user, recursive)
+
+    for pack in change_ownership_list:
+      path = pack[0]
+      user = pack[1].format(ambari_user)
+      recursive = pack[2]
+      logger.info("Changing ownership: {0} {1} {2}".format(path, user, recursive))
+      change_owner(path, user, recursive)
+
+  print_info_msg("Management pack {0}-{1} successfully installed! Please restart ambari-server.".format(mpack_name, mpack_version))
   return mpack_metadata, mpack_name, mpack_version, mpack_staging_dir, mpack_archive_path
 
 def _execute_hook(mpack_metadata, hook_name, base_dir):
@@ -898,9 +934,6 @@
   """
   logger.info("Upgrade mpack.")
   mpack_path = options.mpack_path
-  if options.purge:
-    print_error_msg("Purge is not supported with upgrade_mpack action!")
-    raise FatalException(-1, "Purge is not supported with upgrade_mpack action!")
 
   if not mpack_path:
     print_error_msg("Management pack not specified!")
@@ -926,7 +959,7 @@
 
   print_info_msg("Management pack {0}-{1} successfully upgraded!".format(mpack_name, mpack_version))
   if not replay_mode:
-    add_replay_log(UPGRADE_MPACK_ACTION, mpack_archive_path, options.purge, options.purge_list, options.force, options.verbose)
+    add_replay_log(UPGRADE_MPACK_ACTION, mpack_archive_path, False, [], options.force, options.verbose)
 
 def replay_mpack_logs():
   """
diff --git a/ambari-server/src/main/python/azuredb_create_generator.py b/ambari-server/src/main/python/azuredb_create_generator.py
new file mode 100755
index 0000000..6ceaa90
--- /dev/null
+++ b/ambari-server/src/main/python/azuredb_create_generator.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+# This script transforms SQLServer "create" SQL to idempotent SQL for AzureDB.
+# It is a filter, ie. it expects input on stdin, and prints output on stdout.
+
+import fileinput
+import re
+from textwrap import dedent
+
+flags = re.DOTALL | re.IGNORECASE
+create_table_re = re.compile("CREATE TABLE ([^\s(]+).*", flags = flags)
+create_index_re = re.compile("CREATE(?: NONCLUSTERED)? INDEX ([^ (]+).*", flags = flags)
+add_fk_const_re = re.compile("ALTER TABLE \S+ ADD CONSTRAINT (\S+) FOREIGN KEY.*", flags = flags)
+
+input_sql = "".join(fileinput.input())
+input_statements = re.split(';', input_sql)
+statements = []
+for statement in input_statements:
+  # wrap "CREATE TABLE" in IF for existence check
+  statement = re.sub(
+    create_table_re,
+    dedent('''\
+      IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('dbo.\g<1>') AND type = 'U')
+      BEGIN
+      \g<0>
+      END
+      '''),
+    statement)
+
+  # wrap "CREATE INDEX" in IF for existence check
+  statement = re.sub(
+    create_index_re,
+    dedent('''\
+      IF NOT EXISTS (SELECT 1 FROM sys.indexes WHERE name = '\g<1>')
+      BEGIN
+      \g<0>
+      END
+      '''),
+    statement)
+
+  # wrap "ALTER TABLE ... ADD CONSTRAINT ... FOREIGN KEY" in IF for existence check
+  statement = re.sub(
+    add_fk_const_re,
+    dedent('''\
+      IF NOT EXISTS (SELECT 1 FROM sys.objects WHERE object_id = OBJECT_ID('\g<1>') AND type = 'F')
+      BEGIN
+      \g<0>
+      END
+      '''),
+    statement)
+
+  statements.append(statement)
+
+# find all INSERT statements, create a matching DELETE in reverse order, only one per table
+sql = "".join(statements)
+inserts = re.findall("INSERT INTO ([^ (]+)", sql, flags = re.IGNORECASE)
+tables = set()
+deletes = []
+for table in inserts:
+  if table not in tables:
+    deletes.append("  DELETE {0};".format(table))
+    tables.add(table)
+deletes.reverse()
+delete_sql = "\n".join(deletes)
+sql = re.sub("BEGIN TRANSACTION", "\g<0>\n" + delete_sql, sql, count=1)
+
+print sql
diff --git a/ambari-server/src/main/resources/.gitignore b/ambari-server/src/main/resources/.gitignore
new file mode 100644
index 0000000..173a09f
--- /dev/null
+++ b/ambari-server/src/main/resources/.gitignore
@@ -0,0 +1 @@
+Ambari-DDL-AzureDB-CREATE.sql
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
index 5785a9d..15670f3 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
@@ -76,6 +76,7 @@
   config_data VARCHAR(3000) NOT NULL,
   config_attributes VARCHAR(3000),
   create_timestamp BIGINT NOT NULL,
+  service_deleted SMALLINT NOT NULL DEFAULT 0,
   selected_timestamp BIGINT NOT NULL DEFAULT 0,
   CONSTRAINT PK_clusterconfig PRIMARY KEY (config_id),
   CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id),
@@ -183,6 +184,7 @@
   desired_version VARCHAR(255) NOT NULL DEFAULT 'UNKNOWN',
   service_name VARCHAR(255) NOT NULL,
   recovery_enabled SMALLINT NOT NULL DEFAULT 0,
+  repo_state VARCHAR(255) NOT NULL DEFAULT 'INIT',
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
   CONSTRAINT UQ_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
   CONSTRAINT FK_scds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
@@ -339,6 +341,7 @@
   start_time BIGINT NOT NULL,
   status VARCHAR(255) NOT NULL DEFAULT 'PENDING',
   display_status VARCHAR(255) NOT NULL DEFAULT 'PENDING',
+  cluster_host_info BLOB NOT NULL,
   CONSTRAINT PK_request PRIMARY KEY (request_id),
   CONSTRAINT FK_request_schedule_id FOREIGN KEY (request_schedule_id) REFERENCES requestschedule (schedule_id));
 
@@ -350,7 +353,6 @@
   supports_auto_skip_failure SMALLINT DEFAULT 0 NOT NULL,
   log_info VARCHAR(255) NOT NULL,
   request_context VARCHAR(255),
-  cluster_host_info BLOB NOT NULL,
   command_params BLOB,
   host_params BLOB,
   command_execution_type VARCHAR(32) NOT NULL DEFAULT 'STAGE',
@@ -383,7 +385,8 @@
   role_command VARCHAR(255),
   command_detail VARCHAR(255),
   custom_command_name VARCHAR(255),
-  is_background_command SMALLINT DEFAULT 0 NOT NULL,
+  ops_display_name VARCHAR(255),
+  is_background SMALLINT DEFAULT 0 NOT NULL,
   CONSTRAINT PK_host_role_command PRIMARY KEY (task_id),
   CONSTRAINT FK_host_role_command_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
   CONSTRAINT FK_host_role_command_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id));
diff --git a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
index 96ef0ac..7e41399 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
@@ -96,6 +96,7 @@
   config_data LONGTEXT NOT NULL,
   config_attributes LONGTEXT,
   create_timestamp BIGINT NOT NULL,
+  service_deleted SMALLINT NOT NULL DEFAULT 0,
   selected_timestamp BIGINT NOT NULL DEFAULT 0,
   CONSTRAINT PK_clusterconfig PRIMARY KEY (config_id),
   CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id),
@@ -203,6 +204,7 @@
   desired_state VARCHAR(255) NOT NULL,
   service_name VARCHAR(100) NOT NULL,
   recovery_enabled SMALLINT NOT NULL DEFAULT 0,
+  repo_state VARCHAR(255) NOT NULL DEFAULT 'INIT',
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
   CONSTRAINT UQ_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
   CONSTRAINT FK_scds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
@@ -360,6 +362,7 @@
   start_time BIGINT NOT NULL,
   status VARCHAR(255) NOT NULL DEFAULT 'PENDING',
   display_status VARCHAR(255) NOT NULL DEFAULT 'PENDING',
+  cluster_host_info LONGBLOB,
   CONSTRAINT PK_request PRIMARY KEY (request_id),
   CONSTRAINT FK_request_schedule_id FOREIGN KEY (request_schedule_id) REFERENCES requestschedule (schedule_id));
 
@@ -371,7 +374,6 @@
   supports_auto_skip_failure SMALLINT DEFAULT 0 NOT NULL,
   log_info VARCHAR(255) NOT NULL,
   request_context VARCHAR(255),
-  cluster_host_info LONGBLOB,
   command_params LONGBLOB,
   host_params LONGBLOB,
   command_execution_type VARCHAR(32) NOT NULL DEFAULT 'STAGE',
@@ -403,8 +405,9 @@
   error_log VARCHAR(255) NULL,
   structured_out LONGBLOB,
   command_detail VARCHAR(255),
+  ops_display_name VARCHAR(255),
   custom_command_name VARCHAR(255),
-  is_background_command SMALLINT DEFAULT 0 NOT NULL,
+  is_background SMALLINT DEFAULT 0 NOT NULL,
   CONSTRAINT PK_host_role_command PRIMARY KEY (task_id),
   CONSTRAINT FK_host_role_command_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
   CONSTRAINT FK_host_role_command_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id));
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
index 3396ce9..4d0274f 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
@@ -76,6 +76,7 @@
   config_data CLOB NOT NULL,
   config_attributes CLOB,
   create_timestamp NUMBER(19) NOT NULL,
+  service_deleted SMALLINT NOT NULL DEFAULT 0,
   selected_timestamp NUMBER(19) DEFAULT 0 NOT NULL,
   CONSTRAINT PK_clusterconfig PRIMARY KEY (config_id),
   CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id),
@@ -184,6 +185,7 @@
   desired_version VARCHAR(255) DEFAULT 'UNKNOWN' NOT NULL,
   service_name VARCHAR2(255) NOT NULL,
   recovery_enabled SMALLINT DEFAULT 0 NOT NULL,
+  repo_state VARCHAR2(255) DEFAULT 'INIT' NOT NULL,
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
   CONSTRAINT UQ_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
   CONSTRAINT FK_scds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
@@ -340,6 +342,7 @@
   start_time NUMBER(19) NOT NULL,
   status VARCHAR(255) NOT NULL DEFAULT 'PENDING',
   display_status VARCHAR(255) NOT NULL DEFAULT 'PENDING',
+  cluster_host_info BLOB NOT NULL,
   CONSTRAINT PK_request PRIMARY KEY (request_id),
   CONSTRAINT FK_request_schedule_id FOREIGN KEY (request_schedule_id) REFERENCES requestschedule (schedule_id));
 
@@ -351,7 +354,6 @@
   supports_auto_skip_failure NUMBER(1) DEFAULT 0 NOT NULL,
   log_info VARCHAR2(255) NULL,
   request_context VARCHAR2(255) NULL,
-  cluster_host_info BLOB NOT NULL,
   command_params BLOB,
   host_params BLOB,
   command_execution_type VARCHAR2(32) DEFAULT 'STAGE' NOT NULL,
@@ -384,7 +386,8 @@
   structured_out BLOB NULL,
   command_detail VARCHAR2(255) NULL,
   custom_command_name VARCHAR2(255) NULL,
-  is_background_command SMALLINT DEFAULT 0 NOT NULL,
+  ops_display_name VARCHAR2(255),
+  is_background SMALLINT DEFAULT 0 NOT NULL,
   CONSTRAINT PK_host_role_command PRIMARY KEY (task_id),
   CONSTRAINT FK_host_role_command_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
   CONSTRAINT FK_host_role_command_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id));
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
index c6bfa94..cc933fa 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
@@ -76,6 +76,7 @@
   config_data TEXT NOT NULL,
   config_attributes TEXT,
   create_timestamp BIGINT NOT NULL,
+  service_deleted SMALLINT NOT NULL DEFAULT 0,
   selected_timestamp BIGINT NOT NULL DEFAULT 0,
   CONSTRAINT PK_clusterconfig PRIMARY KEY (config_id),
   CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id),
@@ -183,6 +184,7 @@
   desired_state VARCHAR(255) NOT NULL,
   service_name VARCHAR(255) NOT NULL,
   recovery_enabled SMALLINT NOT NULL DEFAULT 0,
+  repo_state VARCHAR(255) NOT NULL DEFAULT 'INIT',
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
   CONSTRAINT UQ_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
   CONSTRAINT FK_scds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
@@ -339,6 +341,7 @@
   start_time BIGINT NOT NULL,
   status VARCHAR(255) NOT NULL DEFAULT 'PENDING',
   display_status VARCHAR(255) NOT NULL DEFAULT 'PENDING',
+  cluster_host_info BYTEA NOT NULL,
   CONSTRAINT PK_request PRIMARY KEY (request_id),
   CONSTRAINT FK_request_schedule_id FOREIGN KEY (request_schedule_id) REFERENCES requestschedule (schedule_id));
 
@@ -350,7 +353,6 @@
   supports_auto_skip_failure SMALLINT DEFAULT 0 NOT NULL,
   log_info VARCHAR(255) NOT NULL,
   request_context VARCHAR(255),
-  cluster_host_info BYTEA NOT NULL,
   command_params BYTEA,
   host_params BYTEA,
   command_execution_type VARCHAR(32) DEFAULT 'STAGE' NOT NULL,
@@ -383,7 +385,8 @@
   role_command VARCHAR(255),
   command_detail VARCHAR(255),
   custom_command_name VARCHAR(255),
-  is_background_command SMALLINT DEFAULT 0 NOT NULL,
+  is_background SMALLINT DEFAULT 0 NOT NULL,
+  ops_display_name VARCHAR(255),
   CONSTRAINT PK_host_role_command PRIMARY KEY (task_id),
   CONSTRAINT FK_host_role_command_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
   CONSTRAINT FK_host_role_command_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id));
diff --git a/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
index bbf5d3c..5fc14d4 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
@@ -75,6 +75,7 @@
   config_data TEXT NOT NULL,
   config_attributes TEXT,
   create_timestamp NUMERIC(19) NOT NULL,
+  service_deleted SMALLINT NOT NULL DEFAULT 0,
   selected_timestamp NUMERIC(19) NOT NULL DEFAULT 0,
   CONSTRAINT PK_clusterconfig PRIMARY KEY (config_id),
   CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id),
@@ -182,6 +183,7 @@
   desired_state VARCHAR(255) NOT NULL,
   service_name VARCHAR(255) NOT NULL,
   recovery_enabled SMALLINT NOT NULL DEFAULT 0,
+  repo_state VARCHAR(255) NOT NULL DEFAULT 'INIT',
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
   CONSTRAINT UQ_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
   CONSTRAINT FK_scds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
@@ -338,6 +340,7 @@
   start_time NUMERIC(19) NOT NULL,
   status VARCHAR(255) NOT NULL DEFAULT 'PENDING',
   display_status VARCHAR(255) NOT NULL DEFAULT 'PENDING',
+  cluster_host_info IMAGE,
   CONSTRAINT PK_request PRIMARY KEY (request_id),
   CONSTRAINT FK_request_schedule_id FOREIGN KEY (request_schedule_id) REFERENCES requestschedule (schedule_id));
 
@@ -349,7 +352,6 @@
   supports_auto_skip_failure SMALLINT DEFAULT 0 NOT NULL,
   log_info VARCHAR(255) NOT NULL,
   request_context VARCHAR(255),
-  cluster_host_info IMAGE,
   command_params IMAGE,
   host_params IMAGE,
   command_execution_type VARCHAR(32) NOT NULL DEFAULT 'STAGE',
@@ -382,7 +384,8 @@
   structured_out IMAGE,
   command_detail VARCHAR(255),
   custom_command_name VARCHAR(255),
-  is_background_command SMALLINT DEFAULT 0 NOT NULL,
+  is_background SMALLINT DEFAULT 0 NOT NULL,
+  ops_display_name VARCHAR(255),
   CONSTRAINT PK_host_role_command PRIMARY KEY (task_id),
   CONSTRAINT FK_host_role_command_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
   CONSTRAINT FK_host_role_command_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id));
diff --git a/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
index 13ab01d..12e66f9 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
@@ -89,6 +89,7 @@
   config_data VARCHAR(MAX) NOT NULL,
   config_attributes VARCHAR(MAX),
   create_timestamp BIGINT NOT NULL,
+  service_deleted SMALLINT NOT NULL DEFAULT 0,
   selected_timestamp BIGINT NOT NULL DEFAULT 0,
   CONSTRAINT PK_clusterconfig PRIMARY KEY CLUSTERED (config_id),
   CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id),
@@ -196,6 +197,7 @@
   desired_state VARCHAR(255) NOT NULL,
   service_name VARCHAR(255) NOT NULL,
   recovery_enabled SMALLINT NOT NULL DEFAULT 0,
+  repo_state VARCHAR(255) NOT NULL DEFAULT 'INIT',
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
   CONSTRAINT UQ_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
   CONSTRAINT FK_scds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
@@ -344,6 +346,7 @@
   start_time BIGINT NOT NULL,
   status VARCHAR(255) NOT NULL DEFAULT 'PENDING',
   display_status VARCHAR(255) NOT NULL DEFAULT 'PENDING',
+  cluster_host_info VARBINARY(MAX) NOT NULL,
   CONSTRAINT PK_request PRIMARY KEY CLUSTERED (request_id),
   CONSTRAINT FK_request_schedule_id FOREIGN KEY (request_schedule_id) REFERENCES requestschedule (schedule_id));
 
@@ -355,7 +358,6 @@
   supports_auto_skip_failure SMALLINT DEFAULT 0 NOT NULL,
   log_info VARCHAR(255) NOT NULL,
   request_context VARCHAR(255),
-  cluster_host_info VARBINARY(MAX) NOT NULL,
   command_params VARBINARY(MAX),
   host_params VARBINARY(MAX),
   command_execution_type VARCHAR(32) NOT NULL DEFAULT 'STAGE',
@@ -388,7 +390,8 @@
   role_command VARCHAR(255),
   command_detail VARCHAR(255),
   custom_command_name VARCHAR(255),
-  is_background_command SMALLINT DEFAULT 0 NOT NULL,
+  is_background SMALLINT DEFAULT 0 NOT NULL,
+  ops_display_name VARCHAR(255),
   CONSTRAINT PK_host_role_command PRIMARY KEY CLUSTERED (task_id),
   CONSTRAINT FK_host_role_command_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
   CONSTRAINT FK_host_role_command_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES stage (stage_id, request_id));
diff --git a/ambari-server/src/main/resources/alerts.json b/ambari-server/src/main/resources/alerts.json
index d646401..732aae0 100644
--- a/ambari-server/src/main/resources/alerts.json
+++ b/ambari-server/src/main/resources/alerts.json
@@ -106,6 +106,18 @@
             }
           ]
         }
+      },
+      {
+        "name": "ambari_server_component_version",
+        "label": "Component Version",
+        "description": "This alert is triggered if the server detects that there is a problem with the expected and reported version of a component. The alert is suppressed automatically during an upgrade.",
+        "interval": 5,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "SERVER",
+          "class": "org.apache.ambari.server.alerts.ComponentVersionAlertRunnable"
+        }
       }
     ],
     "AMBARI_AGENT" : [
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/configuration/accumulo-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/configuration/accumulo-logsearch-conf.xml
deleted file mode 100644
index ff4f695..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/configuration/accumulo-logsearch-conf.xml
+++ /dev/null
@@ -1,124 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>Accumulo</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>ACCUMULO_MASTER:accumulo_master;ACCUMULO_MONITOR:accumulo_monitor;ACCUMULO_GC:accumulo_gc;ACCUMULO_TRACER:accumulo_tracer;ACCUMULO_TSERVER:accumulo_tserver</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-  "input":[
-    {
-      "type":"accumulo_gc",
-      "rowtype":"service",
-      "path":"{{default('/configurations/accumulo-env/accumulo_log_dir', '/var/log/accumulo')}}/gc_*.log"
-    },
-    {
-      "type":"accumulo_master",
-      "rowtype":"service",
-      "path":"{{default('/configurations/accumulo-env/accumulo_log_dir', '/var/log/accumulo')}}/master_*.log"
-    },
-    {
-      "type":"accumulo_monitor",
-      "rowtype":"service",
-      "path":"{{default('/configurations/accumulo-env/accumulo_log_dir', '/var/log/accumulo')}}/monitor_*.log"
-    },
-    {
-      "type":"accumulo_tracer",
-      "rowtype":"service",
-      "path":"{{default('/configurations/accumulo-env/accumulo_log_dir', '/var/log/accumulo')}}/tracer_*.log"
-    },
-    {
-      "type":"accumulo_tserver",
-      "rowtype":"service",
-      "path":"{{default('/configurations/accumulo-env/accumulo_log_dir', '/var/log/accumulo')}}/tserver_*.log"
-    }
-   ],
-  "filter":[
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "accumulo_master"
-          ]
-         }
-       },
-      "log4j_format":"%d{ISO8601} [%-8c{2}] %-5p: %m%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}\\[%{JAVACLASS:logger_name}\\]%{SPACE}%{LOGLEVEL:level}:%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       }
-     },
-    {
-      "filter":"grok",
-      "comment":"This one has one extra space after LEVEL",
-      "conditions":{
-        "fields":{
-          "type":[
-            "accumulo_gc",
-            "accumulo_monitor",
-            "accumulo_tracer",
-            "accumulo_tserver"
-          ]
-         }
-       },
-      "log4j_format":"%d{ISO8601} [%-8c{2}] %-5p: %X{application} %m%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}\\[%{JAVACLASS:logger_name}\\]%{SPACE}%{LOGLEVEL:level}%{SPACE}:%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-        }
-       }
-     }
-   ]
- }
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_script.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_script.py
index 01fbce2..445c996 100644
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_script.py
+++ b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_script.py
@@ -119,56 +119,6 @@
 
     # some accumulo components depend on the client, so update that too
     stack_select.select("accumulo-client", params.version)
-
-
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    props_value_check = {}
-    props_empty_check = ['general.kerberos.keytab',
-                         'general.kerberos.principal']
-    props_read_check = ['general.kerberos.keytab']
-    accumulo_site_expectations = build_expectations('accumulo-site',
-      props_value_check, props_empty_check, props_read_check)
-
-    accumulo_expectations = {}
-    accumulo_expectations.update(accumulo_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.conf_dir,
-      {'accumulo-site.xml': FILE_TYPE_XML})
-
-    result_issues = validate_security_config_properties(security_params, accumulo_expectations)
-    if not result_issues:  # If all validations passed successfully
-      try:
-        # Double check the dict before calling execute
-        if ( 'accumulo-site' not in security_params
-             or 'general.kerberos.keytab' not in security_params['accumulo-site']
-             or 'general.kerberos.principal' not in security_params['accumulo-site']):
-          self.put_structured_out({"securityState": "UNSECURED"})
-          self.put_structured_out(
-            {"securityIssuesFound": "Keytab file or principal are not set property."})
-          return
-
-        cached_kinit_executor(status_params.kinit_path_local,
-          status_params.accumulo_user,
-          security_params['accumulo-site']['general.kerberos.keytab'],
-          security_params['accumulo-site']['general.kerberos.principal'],
-          status_params.hostname,
-          status_params.tmp_dir,
-          30)
-
-        self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-      except Exception as e:
-        self.put_structured_out({"securityState": "ERROR"})
-        self.put_structured_out({"securityStateErrorInfo": str(e)})
-    else:
-      issues = []
-      for cf in result_issues:
-        issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-      self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-      self.put_structured_out({"securityState": "UNSECURED"})
       
   def get_log_folder(self):
     import params
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
index 53cf002..5d21514 100644
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
@@ -122,12 +122,21 @@
 # metrics2 properties
 ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
 ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
-ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+
+set_instanceId = "false"
+cluster_name = config["clusterName"]
+
+if 'cluster-env' in config['configurations'] and \
+        'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
+  ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
+  set_instanceId = "true"
+else:
+  ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
 has_metric_collector = not len(ams_collector_hosts) == 0
 if has_metric_collector:
   if 'cluster-env' in config['configurations'] and \
-      'metrics_collector_vip_port' in config['configurations']['cluster-env']:
-    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
+      'metrics_collector_external_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
   else:
     metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
     if metric_collector_web_address.find(':') != -1:
@@ -144,6 +153,8 @@
   pass
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
 
 # if accumulo is selected accumulo_tserver_hosts should not be empty, but still default just in case
 if 'slave_hosts' in config['clusterHostInfo']:
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2 b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2
index f7926d0..e59ba11 100644
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2
+++ b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2
@@ -16,6 +16,9 @@
 # Poll collectors every {{metrics_report_interval}} seconds
 *.period={{metrics_collection_period}}
 
+*.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+*.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
+
 {% if has_metric_collector %}
 
 *.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/input.config-accumulo.json.j2 b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/input.config-accumulo.json.j2
new file mode 100644
index 0000000..d093732
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/input.config-accumulo.json.j2
@@ -0,0 +1,92 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"accumulo_gc",
+      "rowtype":"service",
+      "path":"{{default('/configurations/accumulo-env/accumulo_log_dir', '/var/log/accumulo')}}/gc_*.log"
+    },
+    {
+      "type":"accumulo_master",
+      "rowtype":"service",
+      "path":"{{default('/configurations/accumulo-env/accumulo_log_dir', '/var/log/accumulo')}}/master_*.log"
+    },
+    {
+      "type":"accumulo_monitor",
+      "rowtype":"service",
+      "path":"{{default('/configurations/accumulo-env/accumulo_log_dir', '/var/log/accumulo')}}/monitor_*.log"
+    },
+    {
+      "type":"accumulo_tracer",
+      "rowtype":"service",
+      "path":"{{default('/configurations/accumulo-env/accumulo_log_dir', '/var/log/accumulo')}}/tracer_*.log"
+    },
+    {
+      "type":"accumulo_tserver",
+      "rowtype":"service",
+      "path":"{{default('/configurations/accumulo-env/accumulo_log_dir', '/var/log/accumulo')}}/tserver_*.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "accumulo_master"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} [%-8c{2}] %-5p: %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}\\[%{JAVACLASS:logger_name}\\]%{SPACE}%{LOGLEVEL:level}:%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    },
+    {
+      "filter":"grok",
+      "comment":"This one has one extra space after LEVEL",
+      "conditions":{
+        "fields":{
+          "type":[
+            "accumulo_gc",
+            "accumulo_monitor",
+            "accumulo_tracer",
+            "accumulo_tserver"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} [%-8c{2}] %-5p: %X{application} %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}\\[%{JAVACLASS:logger_name}\\]%{SPACE}%{LOGLEVEL:level}%{SPACE}:%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/configuration/infra-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/configuration/infra-logsearch-conf.xml
deleted file mode 100644
index 1de8c46..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/configuration/infra-logsearch-conf.xml
+++ /dev/null
@@ -1,80 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>Infra</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>INFRA_SOLR:infra_solr</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-  "input":[
-    {
-      "type":"infra_solr",
-      "rowtype":"service",
-      "path":"{{default('/configurations/infra-solr-env/infra_solr_log_dir', '/var/log/ambari-infra-solr')}}/solr.log"
-    }
-  ],
-  "filter":[
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "infra_solr"
-          ]
-        }
-      },
-      "log4j_format":"",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}%{LOGLEVEL:level}%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-        }
-      }
-    }
-  ]
-}
-  </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/infra_solr.py b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/infra_solr.py
index 4a8b9c0..247d82a 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/infra_solr.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/infra_solr.py
@@ -59,15 +59,15 @@
       Execute(format('{solr_bindir}/solr stop -all >> {infra_solr_log}'),
               environment={'SOLR_INCLUDE': format('{infra_solr_conf}/infra-solr-env.sh')},
               user=params.infra_solr_user,
-              only_if=format("test -f {infra_solr_pidfile}")
+              only_if=format("test -f {prev_infra_solr_pidfile}")
               )
 
-      File(params.infra_solr_pidfile,
+      File(params.prev_infra_solr_pidfile,
            action="delete"
            )
     except:
       Logger.warning("Could not stop solr:" + str(sys.exc_info()[1]) + "\n Trying to kill it")
-      self.kill_process(params.infra_solr_pidfile, params.infra_solr_user, params.infra_solr_log_dir)
+      self.kill_process(params.prev_infra_solr_pidfile, params.infra_solr_user, params.infra_solr_log_dir)
 
   def status(self, env):
     import status_params
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py
index acf420e..6eb3ba8 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py
@@ -54,6 +54,7 @@
 infra_solr_port = status_params.infra_solr_port
 infra_solr_piddir = status_params.infra_solr_piddir
 infra_solr_pidfile = status_params.infra_solr_pidfile
+prev_infra_solr_pidfile = status_params.prev_infra_solr_pidfile
 
 user_group = config['configurations']['cluster-env']['user_group']
 fetch_nonlocal_groups = config['configurations']['cluster-env']["fetch_nonlocal_groups"]
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/status_params.py
index f51a321..f91a114 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/status_params.py
@@ -23,6 +23,7 @@
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.script.script import Script
+from os import listdir, path
 
 config = Script.get_config()
 
@@ -30,5 +31,10 @@
 infra_solr_piddir = default('configurations/infra-solr-env/infra_solr_pid_dir', '/var/run/ambari-infra-solr')
 infra_solr_pidfile = format("{infra_solr_piddir}/solr-{infra_solr_port}.pid")
 
+prev_infra_solr_pidfile = ''
+if path.isdir(infra_solr_piddir):
+  for file in listdir(infra_solr_piddir):
+    prev_infra_solr_pidfile = infra_solr_piddir + '/' + file
+
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/templates/input.config-ambari-infra.json.j2 b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/templates/input.config-ambari-infra.json.j2
new file mode 100644
index 0000000..af530e7
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/templates/input.config-ambari-infra.json.j2
@@ -0,0 +1,48 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"infra_solr",
+      "rowtype":"service",
+      "path":"{{default('/configurations/infra-solr-env/infra_solr_log_dir', '/var/log/ambari-infra-solr')}}/solr.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "infra_solr"
+          ]
+        }
+      },
+      "log4j_format":"",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}%{LOGLEVEL:level}%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
index cb66537..4d33661 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
@@ -101,6 +101,14 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
+    <name>timeline.metrics.host.inmemory.aggregation.jvm.arguments</name>
+    <value>-Xmx256m -Xms128m -XX:PermSize=68m</value>
+    <description>
+      Local aggregator jvm extra arguments separated with spaces
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
     <name>timeline.metrics.skip.network.interfaces.patterns</name>
     <value>None</value>
     <description>
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-logsearch-conf.xml
deleted file mode 100644
index 72d44db..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-logsearch-conf.xml
+++ /dev/null
@@ -1,201 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>AMS</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>METRICS_COLLECTOR:ams_collector,ams_hbase_master,ams_hbase_regionserver;METRICS_MONITOR:ams_monitor;METRICS_GRAFANA:ams_grafana</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-  "input":[
-    {
-      "type":"ams_hbase_master",
-      "rowtype":"service",
-      "path":"{{default('/configurations/ams-env/metrics_collector_log_dir', '/var/log/ambari-metrics-collector')}}/hbase-ams-master-*.log"
-    },
-    {
-      "type":"ams_hbase_regionserver",
-      "rowtype":"service",
-      "path":"{{default('/configurations/ams-env/metrics_collector_log_dir', '/var/log/ambari-metrics-collector')}}/hbase-ams-regionserver-*.log"
-    },
-    {
-      "type":"ams_collector",
-      "rowtype":"service",
-      "path":"{{default('/configurations/ams-env/metrics_collector_log_dir', '/var/log/ambari-metrics-collector')}}/ambari-metrics-collector.log"
-    },
-    {
-      "type":"ams_monitor",
-      "rowtype":"service",
-      "path":"{{default('/configurations/ams-env/metrics_monitor_log_dir', '/var/log/ambari-metrics-monitor')}}/ambari-metrics-monitor.out"
-    },
-    {
-      "type":"ams_grafana",
-      "rowtype":"service",
-      "path":"{{default('/configurations/ams-grafana-env/metrics_grafana_log_dir', '/var/log/ambari-metrics-grafana')}}/grafana.log"
-    }
-  ],
-  "filter":[
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "ams_collector"
-          ]
-         }
-       },
-      "log4j_format":"%d{ISO8601} %p %c: %m%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       }
-     },
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "ams_hbase_master",
-            "ams_hbase_regionserver"
-          ]
-         }
-       },
-      "log4j_format":"%d{ISO8601} %-5p [%t] %c{2}: %m%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       }
-     },
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "ams_grafana"
-          ]
-         }
-       },
-      "log4j_format":"%d{ISO8601} %-5p [%t] %c{2}: %m%n",
-      "multiline_pattern":"^(%{DATESTAMP:logtime})",
-      "message_pattern":"(?m)^%{DATESTAMP:logtime}%{SPACE}\\[%{WORD:level}\\]%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy/MM/dd HH:mm:ss"
-          }
-         },
-        "level":[
-          {
-            "map_fieldvalue":{
-              "pre_value":"I",
-              "post_value":"INFO"
-            }
-          },
-          {
-            "map_fieldvalue":{
-              "pre_value":"W",
-              "post_value":"WARN"
-            }
-          },
-          {
-            "map_fieldvalue":{
-              "pre_value":"D",
-              "post_value":"DEBUG"
-             }
-           },
-           {
-             "map_fieldvalue":{
-               "pre_value":"E",
-               "post_value":"ERROR"
-             }
-           },
-           {
-             "map_fieldvalue":{
-               "pre_value":"F",
-               "post_value":"FATAL"
-             }
-           }
-         ]
-       }
-     },
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "ams_monitor"
-          ]
-        }
-      },
-      "log4j_format":"",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}\\[%{LOGLEVEL:level}\\]%{SPACE}%{JAVAFILE:file}:%{INT:line_number}%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       },
-      "level":[
-        {
-          "map_fieldvalue":{
-            "pre_value":"WARNING",
-            "post_value":"WARN"
-          }
-        }
-      ]
-     }
-   ]
- }
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
index 8e1671e..1b085f6 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
@@ -787,4 +787,15 @@
     <value>{{cluster_zookeeper_clientPort}}</value>
     <on-ambari-upgrade add="true"/>
   </property>
+  <property>
+    <name>timeline.metrics.host.inmemory.aggregation</name>
+    <value>false</value>
+    <description>if set to "true" host metrics will be aggregated in memory on each host</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>timeline.metrics.host.inmemory.aggregation.port</name>
+    <value>61888</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
index 740a91a..9031b46 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
@@ -93,6 +93,9 @@
               <primary>true</primary>
             </log>
           </logs>
+          <configuration-dependencies>
+            <config-type>ams-site</config-type>
+          </configuration-dependencies>
         </component>
 
         <component>
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hbase-misc.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hbase-misc.json
index 6f53b84..a58a87a 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hbase-misc.json
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hbase-misc.json
@@ -1636,6 +1636,21 @@
       },
       {
         "allFormat": "glob",
+        "current": null,
+        "datasource": null,
+        "includeAll": false,
+        "label": "Cluster",
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "cluster",
+        "options": [],
+        "query": "cluster",
+        "refresh": true,
+        "regex": "",
+        "type": "query"
+      },
+      {
+        "allFormat": "glob",
         "current": {
           "text": "All",
           "value": ""
@@ -1653,7 +1668,8 @@
             "selected": true
           }
         ],
-        "query": "hosts",
+        "label": "Hosts",
+        "query": "hosts.$cluster",
         "refresh": true,
         "regex": "",
         "type": "query"
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hbase-regionservers.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hbase-regionservers.json
index c7a5777..3184357 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hbase-regionservers.json
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hbase-regionservers.json
@@ -9019,6 +9019,21 @@
       },
       {
         "allFormat": "glob",
+        "current": null,
+        "datasource": null,
+        "includeAll": false,
+        "label": "Cluster",
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "cluster",
+        "options": [],
+        "query": "cluster",
+        "refresh": true,
+        "regex": "",
+        "type": "query"
+      },
+      {
+        "allFormat": "glob",
         "current": {
           "text": "All",
           "value": ""},
@@ -9035,7 +9050,7 @@
             "selected": true
           }
         ],
-        "query": "hosts",
+        "query": "hosts.$cluster",
         "refresh": true,
         "regex": "",
         "type": "query"
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hdfs-datanodes.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hdfs-datanodes.json
index ffcddea..6e9777c 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hdfs-datanodes.json
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hdfs-datanodes.json
@@ -1146,6 +1146,21 @@
       },
       {
         "allFormat": "glob",
+        "current": null,
+        "datasource": null,
+        "includeAll": false,
+        "label": "Cluster",
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "cluster",
+        "options": [],
+        "query": "cluster",
+        "refresh": true,
+        "regex": "",
+        "type": "query"
+      },
+      {
+        "allFormat": "glob",
         "current": {
           "text": "All",
           "value": ""
@@ -1162,7 +1177,8 @@
             "selected": true
           }
         ],
-        "query": "hosts",
+        "label": "Hosts",
+        "query": "hosts.$cluster",
         "refresh": true,
         "regex": "",
         "type": "query"
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hdfs-home.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hdfs-home.json
index 09929ec..47005e2 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hdfs-home.json
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hdfs-home.json
@@ -697,6 +697,21 @@
       },
       {
         "allFormat": "glob",
+        "current": null,
+        "datasource": null,
+        "includeAll": false,
+        "label": "Cluster",
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "cluster",
+        "options": [],
+        "query": "cluster",
+        "refresh": true,
+        "regex": "",
+        "type": "query"
+      },
+      {
+        "allFormat": "glob",
         "current": {
           "text": "All",
           "value": ""
@@ -713,7 +728,8 @@
             "selected": true
           }
         ],
-        "query": "hosts",
+        "label": "Hosts",
+        "query": "hosts.$cluster",
         "refresh": true,
         "regex": "",
         "type": "query"
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hdfs-namenodes.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hdfs-namenodes.json
index ebf7a4c..4c03abe 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hdfs-namenodes.json
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hdfs-namenodes.json
@@ -1886,6 +1886,21 @@
       },
       {
         "allFormat": "glob",
+        "current": null,
+        "datasource": null,
+        "includeAll": false,
+        "label": "Cluster",
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "cluster",
+        "options": [],
+        "query": "cluster",
+        "refresh": true,
+        "regex": "",
+        "type": "query"
+      },
+      {
+        "allFormat": "glob",
         "current": {
           "text": "All",
           "value": ""
@@ -1902,7 +1917,8 @@
             "selected": true
           }
         ],
-        "query": "hosts",
+        "label": "Hosts",
+        "query": "hosts.$cluster",
         "refresh": true,
         "regex": "",
         "type": "query"
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hdfs-topn.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hdfs-topn.json
index 90fd37c..500d75a 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hdfs-topn.json
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hdfs-topn.json
@@ -802,6 +802,21 @@
       },
       {
         "allFormat": "glob",
+        "current": null,
+        "datasource": null,
+        "includeAll": false,
+        "label": "Cluster",
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "cluster",
+        "options": [],
+        "query": "cluster",
+        "refresh": true,
+        "regex": "",
+        "type": "query"
+      },
+      {
+        "allFormat": "glob",
         "current": {
           "text": "All",
           "value": ""
@@ -818,7 +833,8 @@
             "selected": true
           }
         ],
-        "query": "hosts",
+        "label": "Hosts",
+        "query": "hosts.$cluster",
         "refresh": true,
         "regex": "",
         "type": "query"
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hive-hivemetastore.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hive-hivemetastore.json
index caaa86a..dd32192 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hive-hivemetastore.json
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hive-hivemetastore.json
@@ -526,6 +526,21 @@
       },
       {
         "allFormat": "glob",
+        "current": null,
+        "datasource": null,
+        "includeAll": false,
+        "label": "Cluster",
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "cluster",
+        "options": [],
+        "query": "cluster",
+        "refresh": true,
+        "regex": "",
+        "type": "query"
+      },
+      {
+        "allFormat": "glob",
         "current": {
           "text": "All",
           "value": ""
@@ -542,7 +557,8 @@
             "selected": true
           }
         ],
-        "query": "hosts",
+        "label": "Hosts",
+        "query": "hosts.$cluster",
         "refresh": true,
         "regex": "",
         "type": "query",
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hive-hiverserver2.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hive-hiverserver2.json
index baca5bb..91ab4ae 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hive-hiverserver2.json
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-hive-hiverserver2.json
@@ -732,6 +732,21 @@
       },
       {
         "allFormat": "glob",
+        "current": null,
+        "datasource": null,
+        "includeAll": false,
+        "label": "Cluster",
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "cluster",
+        "options": [],
+        "query": "cluster",
+        "refresh": true,
+        "regex": "",
+        "type": "query"
+      },
+      {
+        "allFormat": "glob",
         "current": {
           "text": "All",
           "value": ""
@@ -748,7 +763,8 @@
             "selected": true
           }
         ],
-        "query": "hosts",
+        "label": "Hosts",
+        "query": "hosts.$cluster",
         "refresh": true,
         "regex": "",
         "type": "query",
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-kafka-hosts.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-kafka-hosts.json
index 533a169..02b61e5 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-kafka-hosts.json
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-kafka-hosts.json
@@ -1832,6 +1832,21 @@
       },
       {
         "allFormat": "glob",
+        "current": null,
+        "datasource": null,
+        "includeAll": false,
+        "label": "Cluster",
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "cluster",
+        "options": [],
+        "query": "cluster",
+        "refresh": true,
+        "regex": "",
+        "type": "query"
+      },
+      {
+        "allFormat": "glob",
         "current": {
           "text": "All",
           "value": ""
@@ -1848,7 +1863,8 @@
             "selected": true
           }
         ],
-        "query": "hosts",
+        "label": "Hosts",
+        "query": "hosts.$cluster",
         "refresh": true,
         "regex": "",
         "type": "query"
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-llapdaemon-daemons.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-llapdaemon-daemons.json
index 849f296..32c957b 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-llapdaemon-daemons.json
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-llapdaemon-daemons.json
@@ -2114,6 +2114,21 @@
       },
       {
         "allFormat": "glob",
+        "current": null,
+        "datasource": null,
+        "includeAll": false,
+        "label": "Cluster",
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "cluster",
+        "options": [],
+        "query": "cluster",
+        "refresh": true,
+        "regex": "",
+        "type": "query"
+      },
+      {
+        "allFormat": "glob",
         "current": {
           "text": "All",
           "value": "",
@@ -2132,7 +2147,7 @@
             "selected": true
           }
         ],
-        "query": "hosts",
+        "query": "hosts.$cluster",
         "refresh": true,
         "type": "query"
       }
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-llapdaemon-heatmaps.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-llapdaemon-heatmaps.json
index ab2434f..1ed770e 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-llapdaemon-heatmaps.json
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-llapdaemon-heatmaps.json
@@ -304,6 +304,21 @@
       },
       {
         "allFormat": "glob",
+        "current": null,
+        "datasource": null,
+        "includeAll": false,
+        "label": "Cluster",
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "cluster",
+        "options": [],
+        "query": "cluster",
+        "refresh": true,
+        "regex": "",
+        "type": "query"
+      },
+      {
+        "allFormat": "glob",
         "current": {
           "text": "All",
           "value": ""
@@ -321,7 +336,7 @@
             "selected": true
           }
         ],
-        "query": "hosts",
+        "query": "hosts.$cluster",
         "refresh": true,
         "regex": "",
         "type": "query"
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-solr-cores-dashboard.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-solr-cores-dashboard.json
index 9b79663..1cd603f 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-solr-cores-dashboard.json
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-solr-cores-dashboard.json
@@ -1554,6 +1554,21 @@
       },
       {
         "allFormat": "glob",
+        "current": null,
+        "datasource": null,
+        "includeAll": false,
+        "label": "Cluster",
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "cluster",
+        "options": [],
+        "query": "cluster",
+        "refresh": true,
+        "regex": "",
+        "type": "query"
+      },
+      {
+        "allFormat": "glob",
         "current": {
         },
         "datasource": null,
@@ -1564,7 +1579,8 @@
         "options": [
 
         ],
-        "query": "hosts",
+        "label": "Hosts",
+        "query": "hosts.$cluster",
         "refresh": true,
         "regex": "",
         "type": "query"
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-solr-hosts-dashboard.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-solr-hosts-dashboard.json
index 35d2bd7..58d3d48 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-solr-hosts-dashboard.json
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-solr-hosts-dashboard.json
@@ -242,6 +242,21 @@
       },
       {
         "allFormat": "glob",
+        "current": null,
+        "datasource": null,
+        "includeAll": false,
+        "label": "Cluster",
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "cluster",
+        "options": [],
+        "query": "cluster",
+        "refresh": true,
+        "regex": "",
+        "type": "query"
+      },
+      {
+        "allFormat": "glob",
         "current": {
         },
         "datasource": null,
@@ -252,7 +267,8 @@
         "options": [
 
         ],
-        "query": "hosts",
+        "label": "Hosts",
+        "query": "hosts.$cluster",
         "refresh": true,
         "regex": "",
         "type": "query"
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-yarn-nodemanagers.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-yarn-nodemanagers.json
index abbf221..52999a2 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-yarn-nodemanagers.json
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-yarn-nodemanagers.json
@@ -1652,6 +1652,21 @@
       },
       {
         "allFormat": "glob",
+        "current": null,
+        "datasource": null,
+        "includeAll": false,
+        "label": "Cluster",
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "cluster",
+        "options": [],
+        "query": "cluster",
+        "refresh": true,
+        "regex": "",
+        "type": "query"
+      },
+      {
+        "allFormat": "glob",
         "current": {
           "text": "All",
           "value": ""
@@ -1668,7 +1683,8 @@
             "selected": true
           }
         ],
-        "query": "hosts",
+        "label": "Hosts",
+        "query": "hosts.$cluster",
         "refresh": true,
         "regex": "",
         "type": "query"
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-yarn-resourcemanagers.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-yarn-resourcemanagers.json
index b861862..00a68d7 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-yarn-resourcemanagers.json
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/HDP/grafana-yarn-resourcemanagers.json
@@ -898,6 +898,21 @@
       },
       {
         "allFormat": "glob",
+        "current": null,
+        "datasource": null,
+        "includeAll": false,
+        "label": "Cluster",
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "cluster",
+        "options": [],
+        "query": "cluster",
+        "refresh": true,
+        "regex": "",
+        "type": "query"
+      },
+      {
+        "allFormat": "glob",
         "current": {
           "text": "All",
           "value": ""
@@ -914,7 +929,8 @@
             "selected": true
           }
         ],
-        "query": "hosts",
+        "label": "Hosts",
+        "query": "hosts.$cluster",
         "refresh": true,
         "regex": "",
         "type": "query"
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/default/grafana-ambari-server-database.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/default/grafana-ambari-server-database.json
index 229db83..a472dba 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/default/grafana-ambari-server-database.json
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/default/grafana-ambari-server-database.json
@@ -880,6 +880,21 @@
       },
       {
         "allFormat": "glob",
+        "current": null,
+        "datasource": null,
+        "includeAll": false,
+        "label": "Cluster",
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "cluster",
+        "options": [],
+        "query": "cluster",
+        "refresh": true,
+        "regex": "",
+        "type": "query"
+      },
+      {
+        "allFormat": "glob",
         "datasource": null,
         "includeAll": false,
         "multi": false,
@@ -887,8 +902,8 @@
         "name": "hosts",
         "options": [
         ],
-        "query": "hosts",
-        "refresh": true,
+        "label": "Hosts",
+        "query": "hosts.$cluster",
         "type": "query"
       }
     ]
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/default/grafana-ambari-server-topn.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/default/grafana-ambari-server-topn.json
index 2c98e8d..f919f27 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/default/grafana-ambari-server-topn.json
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/default/grafana-ambari-server-topn.json
@@ -300,6 +300,21 @@
       },
       {
         "allFormat": "glob",
+        "current": null,
+        "datasource": null,
+        "includeAll": false,
+        "label": "Cluster",
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "cluster",
+        "options": [],
+        "query": "cluster",
+        "refresh": true,
+        "regex": "",
+        "type": "query"
+      },
+      {
+        "allFormat": "glob",
         "datasource": null,
         "includeAll": false,
         "multi": false,
@@ -307,7 +322,8 @@
         "name": "hosts",
         "options": [
         ],
-        "query": "hosts",
+        "label": "Hosts",
+        "query": "hosts.$cluster",
         "refresh": true,
         "type": "query"
       },
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/default/grafana-ambari-server.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/default/grafana-ambari-server.json
index c458931..5453c85 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/default/grafana-ambari-server.json
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/default/grafana-ambari-server.json
@@ -705,6 +705,21 @@
       },
       {
         "allFormat": "glob",
+        "current": null,
+        "datasource": null,
+        "includeAll": false,
+        "label": "Cluster",
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "cluster",
+        "options": [],
+        "query": "cluster",
+        "refresh": true,
+        "regex": "",
+        "type": "query"
+      },
+      {
+        "allFormat": "glob",
         "datasource": null,
         "includeAll": false,
         "multi": false,
@@ -712,7 +727,8 @@
         "name": "hosts",
         "options": [
         ],
-        "query": "hosts",
+        "label": "Hosts",
+        "query": "hosts.$cluster",
         "refresh": true,
         "type": "query"
       }
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/default/grafana-ams-hbase-misc.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/default/grafana-ams-hbase-misc.json
index 4820ae3..a79c715 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/default/grafana-ams-hbase-misc.json
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/default/grafana-ams-hbase-misc.json
@@ -1636,6 +1636,21 @@
       },
       {
         "allFormat": "glob",
+        "current": null,
+        "datasource": null,
+        "includeAll": false,
+        "label": "Cluster",
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "cluster",
+        "options": [],
+        "query": "cluster",
+        "refresh": true,
+        "regex": "",
+        "type": "query"
+      },
+      {
+        "allFormat": "glob",
         "current": {
           "text": "All",
           "value": ""
@@ -1653,7 +1668,8 @@
             "selected": true
           }
         ],
-        "query": "hosts",
+        "label": "Hosts",
+        "query": "hosts.$cluster",
         "refresh": true,
         "regex": "",
         "type": "query"
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/default/grafana-ams-hbase-regionservers.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/default/grafana-ams-hbase-regionservers.json
index 4ec784e..b822521 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/default/grafana-ams-hbase-regionservers.json
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/default/grafana-ams-hbase-regionservers.json
@@ -9019,6 +9019,21 @@
       },
       {
         "allFormat": "glob",
+        "current": null,
+        "datasource": null,
+        "includeAll": false,
+        "label": "Cluster",
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "cluster",
+        "options": [],
+        "query": "cluster",
+        "refresh": true,
+        "regex": "",
+        "type": "query"
+      },
+      {
+        "allFormat": "glob",
         "current": {
           "text": "All",
           "value": ""
@@ -9036,7 +9051,7 @@
             "selected": true
           }
         ],
-        "query": "hosts",
+        "query": "hosts.$cluster",
         "refresh": true,
         "regex": "",
         "type": "query"
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/default/grafana-system-servers.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/default/grafana-system-servers.json
index 4776ff0..499e284 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/default/grafana-system-servers.json
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/files/grafana-dashboards/default/grafana-system-servers.json
@@ -2437,6 +2437,21 @@
       },
       {
         "allFormat": "glob",
+        "current": null,
+        "datasource": null,
+        "includeAll": false,
+        "label": "Cluster",
+        "multi": false,
+        "multiFormat": "glob",
+        "name": "cluster",
+        "options": [],
+        "query": "cluster",
+        "refresh": true,
+        "regex": "",
+        "type": "query"
+      },
+      {
+        "allFormat": "glob",
         "current": {
           "text": "All",
           "value": ""
@@ -2453,7 +2468,8 @@
             "selected": true
           }
         ],
-        "query": "hosts",
+        "label": "Hosts",
+        "query": "hosts.$cluster",
         "refresh": true,
         "regex": "",
         "type": "query"
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
index a929847..f49d47d 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
@@ -163,6 +163,20 @@
               create_parents = True
     )
 
+    if params.host_in_memory_aggregation and params.log4j_props is not None:
+      File(os.path.join(params.ams_monitor_conf_dir, "log4j.properties"),
+           owner=params.ams_user,
+           content=params.log4j_props
+           )
+
+    XmlConfig("ams-site.xml",
+              conf_dir=params.ams_monitor_conf_dir,
+              configurations=params.config['configurations']['ams-site'],
+              configuration_attributes=params.config['configuration_attributes']['ams-site'],
+              owner=params.ams_user,
+              group=params.user_group
+              )
+
     TemplateConfig(
       os.path.join(params.ams_monitor_conf_dir, "metric_monitor.ini"),
       owner=params.ams_user,
@@ -366,6 +380,22 @@
               create_parents = True
     )
 
+    if params.host_in_memory_aggregation and params.log4j_props is not None:
+      File(format("{params.ams_monitor_conf_dir}/log4j.properties"),
+           mode=0644,
+           group=params.user_group,
+           owner=params.ams_user,
+           content=InlineTemplate(params.log4j_props)
+           )
+
+    XmlConfig("ams-site.xml",
+              conf_dir=params.ams_monitor_conf_dir,
+              configurations=params.config['configurations']['ams-site'],
+              configuration_attributes=params.config['configuration_attributes']['ams-site'],
+              owner=params.ams_user,
+              group=params.user_group
+              )
+
     Execute(format("{sudo} chown -R {ams_user}:{user_group} {ams_monitor_log_dir}")
             )
 
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_collector.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_collector.py
index 7073de6..fc2576d 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_collector.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_collector.py
@@ -77,71 +77,7 @@
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class AmsCollectorDefault(AmsCollector):
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hbase.security.authentication": "kerberos",
-                         "hbase.security.authorization": "true"}
-
-    props_empty_check = ["hbase.zookeeper.property.authProvider.1",
-                         "hbase.master.keytab.file",
-                         "hbase.master.kerberos.principal",
-                         "hbase.regionserver.keytab.file",
-                         "hbase.regionserver.kerberos.principal"
-                         ]
-    props_read_check = ['hbase.master.keytab.file', 'hbase.regionserver.keytab.file']
-    ams_hbase_site_expectations = build_expectations('hbase-site', props_value_check,
-                                                     props_empty_check,
-                                                     props_read_check)
-
-    expectations = {}
-    expectations.update(ams_hbase_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.ams_hbase_conf_dir,
-                                                 {'hbase-site.xml': FILE_TYPE_XML})
-
-    # In case of blueprint deployment security_status might be called before AMS collector is installed.
-    if ('hbase-site' not in security_params or 'hbase.cluster.distributed' not in security_params['hbase-site']) :
-      self.put_structured_out({"securityState": "UNKNOWN"})
-      return
-
-    is_hbase_distributed = security_params['hbase-site']['hbase.cluster.distributed']
-    # for embedded mode, when HBase is backed by file, security state is SECURED_KERBEROS by definition when cluster is secured
-    if status_params.security_enabled and not is_hbase_distributed:
-      self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-      return
-
-    result_issues = validate_security_config_properties(security_params, expectations)
-
-    if not result_issues:  # If all validations passed successfully
-      try:
-        # Double check the dict before calling execute
-        if ('hbase-site' not in security_params or
-                'hbase.master.keytab.file' not in security_params['hbase-site'] or
-                'hbase.master.kerberos.principal' not in security_params['hbase-site']):
-          self.put_structured_out({"securityState": "UNSECURED"})
-          self.put_structured_out(
-            {"securityIssuesFound": "Keytab file or principal are not set property."})
-          return
-
-        cached_kinit_executor(status_params.kinit_path_local,
-                              status_params.hbase_user,
-                              security_params['hbase-site']['hbase.master.keytab.file'],
-                              security_params['hbase-site']['hbase.master.kerberos.principal'],
-                              status_params.hostname,
-                              status_params.tmp_dir)
-        self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-      except Exception as e:
-        self.put_structured_out({"securityState": "ERROR"})
-        self.put_structured_out({"securityStateErrorInfo": str(e)})
-    else:
-      issues = []
-      for cf in result_issues:
-        issues.append("Configuration file %s did not pass the validation. Reason: %s" % (
-          cf, result_issues[cf]))
-      self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-      self.put_structured_out({"securityState": "UNSECURED"})
+  pass
 
 
 @OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
index 06a4518..3036857 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
@@ -23,6 +23,7 @@
 from service_check import post_metrics_to_collector
 from resource_management.core.logger import Logger
 from resource_management.core.base import Fail
+from resource_management.libraries.script.script import Script
 from resource_management import Template
 from collections import namedtuple
 from urlparse import urlparse
@@ -54,9 +55,13 @@
 
   for i in xrange(0, GRAFANA_CONNECT_TRIES):
     try:
-      conn = network.get_http_connection(server.host,
-                                         int(server.port),
-                                         grafana_https_enabled, ca_certs)
+      conn = network.get_http_connection(
+        server.host,
+        int(server.port),
+        grafana_https_enabled,
+        ca_certs,
+        ssl_version=Script.get_force_https_protocol_value()
+      )
 
       userAndPass = b64encode('{0}:{1}'.format(server.user, server.password))
       headers = { 'Authorization' : 'Basic %s' %  userAndPass }
@@ -94,7 +99,13 @@
 
   for i in xrange(0, GRAFANA_CONNECT_TRIES):
     try:
-      conn = network.get_http_connection(server.host, int(server.port), grafana_https_enabled, ca_certs)
+      conn = network.get_http_connection(
+        server.host,
+        int(server.port),
+        grafana_https_enabled,
+        ca_certs,
+        ssl_version=Script.get_force_https_protocol_value()
+      )
       conn.request("PUT", url + "/" + str(id), payload, headers)
       response = conn.getresponse()
       data = response.read()
@@ -130,9 +141,12 @@
   for i in xrange(0, GRAFANA_CONNECT_TRIES):
     try:
       Logger.info("Connecting (POST) to %s:%s%s" % (server.host, server.port, url))
-      conn = network.get_http_connection(server.host,
-                                         int(server.port),
-                                         grafana_https_enabled, ca_certs)
+      conn = network.get_http_connection(
+        server.host,
+        int(server.port),
+        grafana_https_enabled, ca_certs,
+        ssl_version=Script.get_force_https_protocol_value()
+      )
       
       conn.request("POST", url, payload, headers)
 
@@ -171,9 +185,12 @@
 
   for i in xrange(0, GRAFANA_CONNECT_TRIES):
     try:
-      conn = network.get_http_connection(server.host,
-                                         int(server.port),
-                                         grafana_https_enabled, ca_certs)
+      conn = network.get_http_connection(
+        server.host,
+        int(server.port),
+        grafana_https_enabled, ca_certs,
+        ssl_version=Script.get_force_https_protocol_value()
+      )
 
       userAndPass = b64encode('{0}:{1}'.format(server.user, server.password))
       headers = { 'Authorization' : 'Basic %s' %  userAndPass }
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
index 919f26d..b8c14f4 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
@@ -66,17 +66,22 @@
 if not is_ams_distributed and len(ams_collector_list) > 1:
   embedded_mode_multiple_instances = True
 
+set_instanceId = "false"
+cluster_name = config["clusterName"]
 if 'cluster-env' in config['configurations'] and \
-    'metrics_collector_vip_host' in config['configurations']['cluster-env']:
-  ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_vip_host']
+    'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
+  ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
+  set_instanceId = "true"
+else:
+  ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
 
 metric_collector_host = select_metric_collector_hosts_from_hostnames(ams_collector_hosts)
 
 random_metric_collector_host = select_metric_collector_hosts_from_hostnames(ams_collector_hosts)
 
 if 'cluster-env' in config['configurations'] and \
-    'metrics_collector_vip_port' in config['configurations']['cluster-env']:
-  metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
+    'metrics_collector_external_port' in config['configurations']['cluster-env']:
+  metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
 else:
   metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
   if metric_collector_web_address.find(':') != -1:
@@ -219,6 +224,11 @@
 master_heapsize = check_append_heap_property(str(master_heapsize), "m")
 regionserver_heapsize = check_append_heap_property(str(regionserver_heapsize), "m")
 
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
+host_in_memory_aggregation_jvm_arguments = default("/configurations/ams-env/timeline.metrics.host.inmemory.aggregation.jvm.arguments",
+                                                   "-Xmx256m -Xms128m -XX:PermSize=68m")
+
 regionserver_xmn_max = default('/configurations/ams-hbase-env/hbase_regionserver_xmn_max', None)
 if regionserver_xmn_max:
   regionserver_xmn_max = int(trim_heap_property(str(regionserver_xmn_max), "m"))
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/service_check.py
index e753958..2b3dfa9 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/service_check.py
@@ -91,10 +91,13 @@
                                                    params.metric_collector_port,
                                                    self.AMS_METRICS_GET_URL % encoded_get_metrics_parameters))
       for i in xrange(0, self.AMS_READ_TRIES):
-        conn = network.get_http_connection(metric_collector_host,
-                                           int(params.metric_collector_port),
-                                           params.metric_collector_https_enabled,
-                                           ca_certs)
+        conn = network.get_http_connection(
+          metric_collector_host,
+          int(params.metric_collector_port),
+          params.metric_collector_https_enabled,
+          ca_certs,
+          ssl_version=Script.get_force_https_protocol_value()
+        )
         conn.request("GET", self.AMS_METRICS_GET_URL % encoded_get_metrics_parameters)
         response = conn.getresponse()
         Logger.info("Http response for host %s : %s %s" % (metric_collector_host, response.status, response.reason))
@@ -164,10 +167,13 @@
       Logger.info("Connecting (POST) to %s:%s%s" % (metric_collector_host,
                                                     metric_collector_port,
                                                     ams_metrics_post_url))
-      conn = network.get_http_connection(metric_collector_host,
-                                         int(metric_collector_port),
-                                         metric_collector_https_enabled,
-                                         ca_certs)
+      conn = network.get_http_connection(
+        metric_collector_host,
+        int(metric_collector_port),
+        metric_collector_https_enabled,
+        ca_certs,
+        ssl_version=Script.get_force_https_protocol_value()
+      )
       conn.request("POST", ams_metrics_post_url, metric_json, headers)
 
       response = conn.getresponse()
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2 b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2
index 8c6f86f..978b795 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2
@@ -58,6 +58,9 @@
 
 *.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
 *.sink.timeline.slave.host.name={{hostname}}
+*.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+*.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
+
 hbase.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
 hbase.sink.timeline.period={{metrics_collection_period}}
 hbase.sink.timeline.sendInterval={{metrics_report_interval}}000
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/input.config-ambari-metrics.json.j2 b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/input.config-ambari-metrics.json.j2
new file mode 100644
index 0000000..ef823b2
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/input.config-ambari-metrics.json.j2
@@ -0,0 +1,169 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"ams_hbase_master",
+      "rowtype":"service",
+      "path":"{{default('/configurations/ams-env/metrics_collector_log_dir', '/var/log/ambari-metrics-collector')}}/hbase-ams-master-*.log"
+    },
+    {
+      "type":"ams_hbase_regionserver",
+      "rowtype":"service",
+      "path":"{{default('/configurations/ams-env/metrics_collector_log_dir', '/var/log/ambari-metrics-collector')}}/hbase-ams-regionserver-*.log"
+    },
+    {
+      "type":"ams_collector",
+      "rowtype":"service",
+      "path":"{{default('/configurations/ams-env/metrics_collector_log_dir', '/var/log/ambari-metrics-collector')}}/ambari-metrics-collector.log"
+    },
+    {
+      "type":"ams_monitor",
+      "rowtype":"service",
+      "path":"{{default('/configurations/ams-env/metrics_monitor_log_dir', '/var/log/ambari-metrics-monitor')}}/ambari-metrics-monitor.out"
+    },
+    {
+      "type":"ams_grafana",
+      "rowtype":"service",
+      "path":"{{default('/configurations/ams-grafana-env/metrics_grafana_log_dir', '/var/log/ambari-metrics-grafana')}}/grafana.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "ams_collector"
+          ]
+         }
+      },
+      "log4j_format":"%d{ISO8601} %p %c: %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    },
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "ams_hbase_master",
+            "ams_hbase_regionserver"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} %-5p [%t] %c{2}: %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    },
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "ams_grafana"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} %-5p [%t] %c{2}: %m%n",
+      "multiline_pattern":"^(%{DATESTAMP:logtime})",
+      "message_pattern":"(?m)^%{DATESTAMP:logtime}%{SPACE}\\[%{WORD:level}\\]%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy/MM/dd HH:mm:ss"
+          }
+        },
+        "level":[
+          {
+            "map_fieldvalue":{
+              "pre_value":"I",
+              "post_value":"INFO"
+            }
+          },
+          {
+            "map_fieldvalue":{
+              "pre_value":"W",
+              "post_value":"WARN"
+            }
+          },
+          {
+            "map_fieldvalue":{
+              "pre_value":"D",
+              "post_value":"DEBUG"
+            }
+          },
+          {
+            "map_fieldvalue":{
+              "pre_value":"E",
+              "post_value":"ERROR"
+            }
+          },
+          {
+            "map_fieldvalue":{
+              "pre_value":"F",
+              "post_value":"FATAL"
+            }
+          }
+        ]
+      }
+    },
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "ams_monitor"
+          ]
+        }
+      },
+      "log4j_format":"",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}\\[%{LOGLEVEL:level}\\]%{SPACE}%{JAVAFILE:file}:%{INT:line_number}%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      },
+      "level":[
+        {
+          "map_fieldvalue":{
+            "pre_value":"WARNING",
+            "post_value":"WARN"
+          }
+        }
+      ]
+    }
+  ]
+ }
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2 b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
index 1505f9b..b7dee50 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
@@ -25,6 +25,8 @@
 skip_disk_patterns = {{skip_disk_metrics_patterns}}
 skip_virtual_interfaces = {{skip_virtual_interfaces}}
 skip_network_interfaces_patterns = {{skip_network_interfaces_patterns}}
+instanceId={{cluster_name}}
+set.instanceId={{set_instanceId}}
 
 [emitter]
 send_interval = {{metrics_report_interval}}
@@ -36,3 +38,10 @@
 failover_strategy_blacklisted_interval_seconds = {{failover_strategy_blacklisted_interval_seconds}}
 port = {{metric_collector_port}}
 https_enabled = {{metric_collector_https_enabled}}
+
+[aggregation]
+host_in_memory_aggregation = {{host_in_memory_aggregation}}
+host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
+java_home = {{java64_home}}
+jvm_arguments = {{host_in_memory_aggregation_jvm_arguments}}
+ams_monitor_log_dir = {{ams_monitor_log_dir}}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/configuration/atlas-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/configuration/atlas-logsearch-conf.xml
deleted file mode 100644
index 71a08fb..0000000
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/configuration/atlas-logsearch-conf.xml
+++ /dev/null
@@ -1,80 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>Atlas</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>ATLAS_SERVER:atlas_app</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder Config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-  "input":[
-    {
-      "type":"atlas_app",
-      "rowtype":"service",
-      "path":"{{default('/configurations/atlas-env/metadata_log_dir', '/var/log/atlas')}}/application.log"
-    }
-  ],
-  "filter":[
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "atlas_app"
-          ]
-         }
-       },
-      "log4j_format":"%d %-5p - [%t:%x] ~ %m (%c{1}:%L)%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{SPACE}-%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}~%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       }
-     }
-   ]
- }
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata.py b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata.py
index c25445c..36c4598 100644
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata.py
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata.py
@@ -18,6 +18,7 @@
 
 """
 import os
+import hashlib
 
 from resource_management import Package
 from resource_management import StackFeature
@@ -31,11 +32,13 @@
 from resource_management.libraries.resources.properties_file import PropertiesFile
 from resource_management.libraries.resources.template_config import TemplateConfig
 from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.resources.modify_properties_file import ModifyPropertiesFile
 
 
 def metadata(type='server'):
     import params
-    
+
     # Needed by both Server and Client
     Directory(params.conf_dir,
               mode=0755,
@@ -97,7 +100,14 @@
            mode=0755,
            content=InlineTemplate(params.metadata_env_content)
       )
- 
+
+      if not is_empty(params.atlas_admin_username) and not is_empty(params.atlas_admin_password):
+        psswd_output = hashlib.sha256(params.atlas_admin_password).hexdigest()
+        ModifyPropertiesFile(format("{conf_dir}/users-credentials.properties"),
+            properties = {format('{atlas_admin_username}') : format('ROLE_ADMIN::{psswd_output}')},
+            owner = params.metadata_user
+        )
+
       files_to_chown = [format("{conf_dir}/policy-store.txt"), format("{conf_dir}/users-credentials.properties")]
       for file in files_to_chown:
         if os.path.exists(file):
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata_server.py b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata_server.py
index 3c62243..1ef77cf 100644
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata_server.py
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata_server.py
@@ -168,84 +168,6 @@
     env.set_params(status_params)
     check_process_status(status_params.pid_file)
 
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    file_name_key = 'applicaton'
-    props_value_check = {'atlas.authentication.method': 'kerberos',
-                         'atlas.http.authentication.enabled': 'true',
-                         'atlas.http.authentication.type': 'kerberos'}
-    props_empty_check = ['atlas.authentication.principal',
-                         'atlas.authentication.keytab',
-                         'atlas.http.authentication.kerberos.principal',
-                         'atlas.http.authentication.kerberos.keytab']
-    props_read_check = ['atlas.authentication.keytab',
-                        'atlas.http.authentication.kerberos.keytab']
-
-    if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, status_params.version_for_stack_feature_checks):
-      file_name_key = 'atlas-application'
-      props_value_check = {'atlas.authentication.method.kerberos': 'true',
-                           'atlas.solr.kerberos.enable': 'true'}
-      props_empty_check = ['atlas.authentication.principal',
-                           'atlas.authentication.keytab',
-                           'atlas.authentication.method.kerberos.principal',
-                           'atlas.authentication.method.kerberos.keytab']
-      props_read_check = ['atlas.authentication.keytab',
-                          'atlas.authentication.method.kerberos.keytab']
-
-    atlas_site_expectations = build_expectations(file_name_key,
-                                                 props_value_check,
-                                                 props_empty_check,
-                                                 props_read_check)
-
-    atlas_expectations = {}
-    atlas_expectations.update(atlas_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.conf_dir,
-                                                 {status_params.conf_file: FILE_TYPE_PROPERTIES})
-    result_issues = validate_security_config_properties(security_params, atlas_expectations)
-
-    if not result_issues:  # If all validations passed successfully
-      try:
-        # Double check the dict before calling execute
-        if ( file_name_key not in security_params
-             or 'atlas.authentication.keytab' not in security_params[file_name_key]
-             or 'atlas.authentication.principal' not in security_params[file_name_key]):
-          self.put_structured_out({"securityState": "UNSECURED"})
-          self.put_structured_out(
-            {"securityIssuesFound": "Atlas service keytab file or principal are not set property."})
-          return
-
-        if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, status_params.version_for_stack_feature_checks):
-          if ( file_name_key not in security_params
-               or 'atlas.authentication.method.kerberos.keytab' not in security_params[file_name_key]
-               or 'atlas.authentication.method.kerberos.principal' not in security_params[file_name_key]):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Method Authentication keytab file or principal are not set property."})
-            return
-        else:
-          if ( file_name_key not in security_params
-               or 'atlas.http.authentication.kerberos.keytab' not in security_params[file_name_key]
-               or 'atlas.http.authentication.kerberos.principal' not in security_params[file_name_key]):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "HTTP Authentication keytab file or principal are not set property."})
-            return
-
-        self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-      except Exception as e:
-        self.put_structured_out({"securityState": "ERROR"})
-        self.put_structured_out({"securityStateErrorInfo": str(e)})
-    else:
-      issues = []
-      for cf in result_issues:
-        issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-      self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
 
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
index bf4848b..d26df33 100644
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
@@ -412,3 +412,6 @@
     'type': 'atlas',
     }
 # ranger atlas plugin section end
+# atlas admin login username password
+atlas_admin_username = config['configurations']['atlas-env']['atlas.admin.username']
+atlas_admin_password = config['configurations']['atlas-env']['atlas.admin.password']
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/templates/input.config-atlas.json.j2 b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/templates/input.config-atlas.json.j2
new file mode 100644
index 0000000..2d977b9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/templates/input.config-atlas.json.j2
@@ -0,0 +1,48 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"atlas_app",
+      "rowtype":"service",
+      "path":"{{default('/configurations/atlas-env/metadata_log_dir', '/var/log/atlas')}}/application.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "atlas_app"
+          ]
+        }
+      },
+      "log4j_format":"%d %-5p - [%t:%x] ~ %m (%c{1}:%L)%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{SPACE}-%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}~%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.2.5/configuration/application-properties.xml b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.2.5/configuration/application-properties.xml
index 70af02c..f34d8be 100644
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.2.5/configuration/application-properties.xml
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.2.5/configuration/application-properties.xml
@@ -24,7 +24,7 @@
   <!-- Misc properties -->
   <property>
     <name>atlas.audit.hbase.zookeeper.quorum</name>
-    <value/>
+    <value>localhost</value>
     <description/>
     <on-ambari-upgrade add="false"/>
   </property>
@@ -38,7 +38,7 @@
   </property>
   <property>
     <name>atlas.graph.storage.hostname</name>
-    <value/>
+    <value>localhost</value>
     <description/>
     <on-ambari-upgrade add="false"/>
   </property>
@@ -66,7 +66,7 @@
   </property>
   <property>
     <name>atlas.graph.index.search.solr.zookeeper-url</name>
-    <value/>
+    <value>localhost:2181/infra-solr</value>
     <description>The ZooKeeper quorum setup for Solr as comma separated value.</description>
     <on-ambari-upgrade add="false"/>
   </property>
@@ -145,7 +145,7 @@
   </property>
   <property>
     <name>atlas.kafka.bootstrap.servers</name>
-    <value/>
+    <value>localhost:6667</value>
     <description>Comma separated list of Kafka broker endpoints in host:port form</description>
     <depends-on>
      <property>
@@ -157,7 +157,7 @@
   </property>
   <property>
     <name>atlas.kafka.zookeeper.connect</name>
-    <value/>
+    <value>localhost:2181</value>
     <description>Comma separated list of servers forming Zookeeper quorum used by Kafka.</description>
     <on-ambari-upgrade add="false"/>
   </property>
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.2.5/role_command_order.json b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.2.5/role_command_order.json
index 372cd21..4d66dfc 100644
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.2.5/role_command_order.json
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.2.5/role_command_order.json
@@ -2,6 +2,6 @@
   "general_deps" : {
     "_comment" : "dependencies for ATLAS",
     "ATLAS_SERVICE_CHECK-SERVICE_CHECK": ["ATLAS_SERVER-START"],
-    "ATLAS_SERVER-START": ["KAFKA_BROKER-START", "LOGSEARCH_SOLR-START", "HBASE_MASTER-START", "HBASE_REGIONSERVER-START"]
+    "ATLAS_SERVER-START": ["KAFKA_BROKER-START", "INFRA_SOLR-START", "HBASE_MASTER-START", "HBASE_REGIONSERVER-START"]
   }
 }
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/alerts.json b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/alerts.json
new file mode 100644
index 0000000..8a2a415
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/alerts.json
@@ -0,0 +1,39 @@
+{
+  "ATLAS": {
+    "service": [],
+    "ATLAS_SERVER": [
+      {
+        "name": "metadata_server_webui",
+        "label": "Metadata Server Web UI",
+        "description": "This host-level alert is triggered if the Metadata Server Web UI is unreachable.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{application-properties/atlas.server.bind.address}}:{{application-properties/atlas.server.http.port}}/api/atlas/admin/status",
+            "https": "{{application-properties/atlas.server.bind.address}}:{{application-properties/atlas.server.https.port}}/api/atlas/admin/status",
+            "https_property": "{{application-properties/atlas.enableTLS}}",
+            "https_property_value": "true",
+            "default_port": 21000,
+            "kerberos_keytab": "{{cluster-env/smokeuser_keytab}}",
+            "kerberos_principal": "{{cluster-env/smokeuser_principal_name}}",
+            "connection_timeout": 5.0
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning":{
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      }
+    ]
+  }
+}
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/configuration/application-properties.xml b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/configuration/application-properties.xml
new file mode 100644
index 0000000..36a2b55
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/configuration/application-properties.xml
@@ -0,0 +1,546 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false">
+  <property>
+    <name>atlas.enableTLS</name>
+    <value>false</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>atlas.authentication.principal</name>
+    <value>atlas</value>
+    <description/>
+    <property-type>KERBEROS_PRINCIPAL</property-type>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.authentication.keytab</name>
+    <value>/etc/security/keytabs/atlas.service.keytab</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.server.bind.address</name>
+    <value>localhost</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.notification.embedded</name>
+    <value>false</value>
+    <description>Indicates whether or not the notification service should be embedded.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- atlas.cluster.name is also part of Atlas Hooks -->
+  <property>
+    <name>atlas.cluster.name</name>
+    <value>{{cluster_name}}</value>
+    <description>The cluster name.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>atlas.server.http.port</name>
+    <value>21000</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.server.https.port</name>
+    <value>21443</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>atlas.audit.hbase.tablename</name>
+    <value>ATLAS_ENTITY_AUDIT_EVENTS</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.audit.zookeeper.session.timeout.ms</name>
+    <value>60000</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.audit.hbase.zookeeper.quorum</name>
+    <value/>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- Storage properties -->
+  <property>
+    <name>atlas.graph.storage.hbase.table</name>
+    <value>atlas_titan</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.graph.storage.hostname</name>
+    <value/>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <!-- Overriden: previous value was berkeleyje -->
+  <property>
+    <name>atlas.graph.storage.backend</name>
+    <value>hbase</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- Graph properties -->
+  <!-- Overriden: previous value was elasticsearch -->
+  <property>
+    <name>atlas.graph.index.search.backend</name>
+    <value>solr5</value>
+    <description>The Atlas indexing backend (e.g. solr5).</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.graph.index.search.solr.mode</name>
+    <value>cloud</value>
+    <description>The Solr mode (e.g. cloud).</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.graph.index.search.solr.zookeeper-url</name>
+    <value/>
+    <description>The ZooKeeper quorum setup for Solr as comma separated value.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- Authentication properties -->
+  <property>
+    <name>atlas.authentication.method.kerberos</name>
+    <value>false</value>
+    <description>Indicates whether or not Kerberos is enabled.</description>
+    <on-ambari-upgrade add="false"/>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>atlas.authentication.method.file</name>
+    <display-name>Enable File Authentication</display-name>
+    <value>true</value>
+    <description>Indicates whether or not file based authentication is enabled.</description>
+    <on-ambari-upgrade add="false"/>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>atlas.authentication.method.ldap</name>
+    <display-name>Enable LDAP Authentication</display-name>
+    <value>false</value>
+    <description>Indicates whether or not LDAP authentication is enabled.</description>
+    <on-ambari-upgrade add="false"/>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>atlas.authentication.method.file.filename</name>
+    <value>{{conf_dir}}/users-credentials.properties</value>
+    <description>File path for file based login.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.auth.policy.file</name>
+    <value>{{conf_dir}}/policy-store.txt</value>
+    <description>Path for the Atlas policy file.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- Start: Shared Atlas Hooks that are also written out to configs for Falcon, Storm, Hive, and Sqoop.
+  There are several more properties for when Atlas is Kerberized.
+  Note that atlas.cluster.name is inherited.
+  -->
+  <!-- This property is constructed from the protocol, server, and port and generated by Stack Advisor.
+  Hence, it should be visible but not editable.
+  -->
+  <property>
+    <name>atlas.rest.address</name>
+    <value>http://localhost:21000</value>
+    <depends-on>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.enableTLS</name>
+      </property>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.server.http.port</name>
+      </property>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.server.https.port</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <editable-only-at-install>false</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.kafka.bootstrap.servers</name>
+    <value/>
+    <description>Comma separated list of Kafka broker endpoints in host:port form</description>
+    <depends-on>
+     <property>
+       <type>kafka-broker</type>
+       <name>listeners</name>
+     </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.kafka.zookeeper.connect</name>
+    <value/>
+    <description>Comma separated list of servers forming Zookeeper quorum used by Kafka.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.kafka.hook.group.id</name>
+    <value>atlas</value>
+    <description>Kafka group id for the hook topic.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.kafka.zookeeper.session.timeout.ms</name>
+    <value>60000</value>
+    <description></description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.kafka.zookeeper.connection.timeout.ms</name>
+    <value>30000</value>
+    <description></description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.kafka.zookeeper.sync.time.ms</name>
+    <value>20</value>
+    <description></description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.kafka.auto.commit.enable</name>
+    <value>false</value>
+    <description>Kafka auto commit setting for Atlas notifications.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.notification.create.topics</name>
+    <value>true</value>
+    <description></description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.notification.replicas</name>
+    <value>1</value>
+    <description></description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.notification.topics</name>
+    <value>ATLAS_HOOK,ATLAS_ENTITIES</value>
+    <description></description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <!-- End: Atlas Hooks -->
+
+  <property>
+    <name>atlas.authorizer.impl</name>
+    <description>
+      Atlas authorizer class
+    </description>
+    <depends-on>
+      <property>
+        <type>ranger-atlas-plugin-properties</type>
+        <name>ranger-atlas-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- Lineage properties -->
+  <property>
+    <name>atlas.lineage.schema.query.hive_table</name>
+    <value>hive_table where __guid='%s'\, columns</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.lineage.schema.query.Table</name>
+    <value>Table where __guid='%s'\, columns</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.solr.kerberos.enable</name>
+    <value>false</value>
+    <description>Enable kerberized Solr support for Atlas.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+
+
+  <!-- LDAP properties. They all begin with "atlas.authentication.method.ldap."
+  Must allow empty values since the user can pick either LDAP or AD.
+  -->
+  <property>
+    <name>atlas.authentication.method.ldap.url</name>
+    <value/>
+    <description>The LDAP URL.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.authentication.method.ldap.userDNpattern</name>
+    <value>uid=</value>
+    <description>User DN Pattern. This pattern is used to create a distinguished name (DN) for a user during login</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.authentication.method.ldap.groupSearchBase</name>
+    <value/>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.authentication.method.ldap.groupSearchFilter</name>
+    <value/>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.authentication.method.ldap.groupRoleAttribute</name>
+    <value>cn</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.authentication.method.ldap.base.dn</name>
+    <value/>
+    <description>The Distinguished Name (DN) of the starting point for directory server searches.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.authentication.method.ldap.bind.dn</name>
+    <value/>
+    <description>Full distinguished name (DN), including common name (CN), of an LDAP user account that has privileges to search. </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.authentication.method.ldap.bind.password</name>
+    <value/>
+    <property-type>PASSWORD</property-type>
+    <description>Password for the account that can search</description>
+    <value-attributes>
+      <type>password</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.authentication.method.ldap.referral</name>
+    <value>ignore</value>
+    <description>Set to follow if multiple LDAP servers are configured to return continuation references for results. Set to ignore (default) if no referrals should be followed. Possible values are follow|throw|ignore</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.authentication.method.ldap.user.searchfilter</name>
+    <value/>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.authentication.method.ldap.default.role</name>
+    <value>ROLE_USER</value>
+    <description/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- AD properties. They all begin with "atlas.authentication.method.ldap.ad."
+  Must allow empty values since the user can pick either LDAP or AD.
+  -->
+  <property>
+    <name>atlas.authentication.method.ldap.ad.domain</name>
+    <display-name>Domain Name (Only for AD)</display-name>
+    <value/>
+    <description>AD domain, only used if Authentication method is AD</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.authentication.method.ldap.ad.url</name>
+    <value/>
+    <description>AD URL, only used if Authentication method is AD</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.authentication.method.ldap.ad.base.dn</name>
+    <value/>
+    <description>The Distinguished Name (DN) of the starting point for directory server searches.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.authentication.method.ldap.ad.bind.dn</name>
+    <value/>
+    <description>Full distinguished name (DN), including common name (CN), of an LDAP user account that has privileges to search. </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.authentication.method.ldap.ad.bind.password</name>
+    <value/>
+    <property-type>PASSWORD</property-type>
+    <description>Password for the account that can search</description>
+    <value-attributes>
+      <type>password</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.authentication.method.ldap.ad.referral</name>
+    <value>ignore</value>
+    <description>Set to follow if multiple AD servers are configured to return continuation references for results. Set to ignore (default) if no referrals should be followed. Possible values are follow|throw|ignore</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.authentication.method.ldap.ad.user.searchfilter</name>
+    <value>(sAMAccountName={0})</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.authentication.method.ldap.ad.default.role</name>
+    <value>ROLE_USER</value>
+    <description/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>atlas.ssl.exclude.protocols</name>
+    <display-name>Excluded Wire Encryption Protocols</display-name>
+    <value>TLSv1.2</value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <description>A comma-separate list of the wire encryption protocols to exclude when TLS is enabled. Some versions of cURL do not work with TLSv1.2.</description>
+    <used-by>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.enableTLS</name>
+      </property>
+    </used-by>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+
+  <property>
+    <name>atlas.sso.knox.enabled</name>
+    <display-name>Enable Atlas Knox SSO</display-name>
+    <value>false</value>
+    <description/>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>atlas.sso.knox.providerurl</name>
+    <value/>
+    <description/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+       <type>gateway-site</type>
+       <name>gateway.port</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>atlas.sso.knox.publicKey</name>
+    <value/>
+    <description/>
+    <value-attributes>
+      <type>multiline</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>atlas.sso.knox.browser.useragent</name>
+    <value/>
+    <description/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>atlas.authentication.method.ldap.type</name>
+    <display-name>LDAP Authentication Type</display-name>
+    <value>ldap</value>
+    <description>The LDAP type (ldap, ad, or none).</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>ldap</value>
+          <label>LDAP</label>
+        </entry>
+        <entry>
+          <value>ad</value>
+          <label>AD</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/configuration/atlas-env.xml b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/configuration/atlas-env.xml
new file mode 100644
index 0000000..c5a4fd6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/configuration/atlas-env.xml
@@ -0,0 +1,182 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>atlas_server_metadata_size</name>
+    <value>50000</value>
+    <description>Count of metadata objects that supposed to be processed by atlas instance</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas_server_xmx</name>
+    <value>2048</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas_server_max_new_size</name>
+    <value>614</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <!-- metadata-env.sh -->
+  <property require-input="false">
+    <name>metadata_log_dir</name>
+    <value>/var/log/atlas</value>
+    <description>Atlas log directory.</description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property require-input="false">
+    <name>metadata_pid_dir</name>
+    <value>/var/run/atlas</value>
+    <description>Atlas pid-file directory.</description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>metadata_user</name>
+    <display-name>Metadata User</display-name>
+    <value>atlas</value>
+    <property-type>USER</property-type>
+    <description>Metadata User Name.</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>metadata_opts</name>
+    <value>-Dlog4j.configuration=atlas-log4j.xml</value>
+    <description>Metadata Server command line options.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>metadata_classpath</name>
+    <value> </value>
+    <description>Metadata Server additional classpath.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property require-input="false">
+    <name>metadata_data_dir</name>
+    <value>/var/lib/atlas/data</value>
+    <description>Atlas data directory.</description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property require-input="false">
+    <name>metadata_expanded_war_dir</name>
+    <value>./server/webapp</value>
+    <description>Atlas expanded WAR directory.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>metadata_conf_file</name>
+    <value>atlas-application.properties</value>
+    <description>Atlas configuration file</description>
+    <value-attributes>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>atlas_solr_shards</name>
+    <value>1</value>
+    <description>The number of shards set for collections created in LogSearch SOLR.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <!-- metadata-env.sh -->
+  <property>
+    <name>content</name>
+    <display-name>atlas-env template</display-name>
+    <description>This is the jinja template for metadata-env.sh file</description>
+    <value>
+      # The java implementation to use. If JAVA_HOME is not found we expect java and jar to be in path
+      export JAVA_HOME={{java64_home}}
+
+      # any additional java opts you want to set. This will apply to both client and server operations
+      {% if security_enabled %}
+      export ATLAS_OPTS="{{metadata_opts}} -Djava.security.auth.login.config={{atlas_jaas_file}}"
+      {% else %}
+      export ATLAS_OPTS="{{metadata_opts}}"
+      {% endif %}
+
+      # metadata configuration directory
+      export ATLAS_CONF={{conf_dir}}
+
+      # Where log files are stored. Defatult is logs directory under the base install location
+      export ATLAS_LOG_DIR={{log_dir}}
+
+      # additional classpath entries
+      export ATLASCPPATH={{metadata_classpath}}
+
+      # data dir
+      export ATLAS_DATA_DIR={{data_dir}}
+
+      # pid dir
+      export ATLAS_PID_DIR={{pid_dir}}
+
+      # hbase conf dir
+      export HBASE_CONF_DIR={{hbase_conf_dir}}
+
+      # Where do you want to expand the war file. By Default it is in /server/webapp dir under the base install dir.
+      export ATLAS_EXPANDED_WEBAPP_DIR={{expanded_war_dir}}
+      export ATLAS_SERVER_OPTS="-server -XX:SoftRefLRUPolicyMSPerMB=0 -XX:+CMSClassUnloadingEnabled -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+PrintTenuringDistribution -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=$ATLAS_LOG_DIR/atlas_server.hprof -Xloggc:$ATLAS_LOG_DIR/gc-worker.log -verbose:gc -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=1m -XX:+PrintGCDetails -XX:+PrintHeapAtGC -XX:+PrintGCTimeStamps"
+      {% if java_version == 8 %}
+      export ATLAS_SERVER_HEAP="-Xms{{atlas_server_xmx}}m -Xmx{{atlas_server_xmx}}m -XX:MaxNewSize={{atlas_server_max_new_size}}m -XX:MetaspaceSize=100m -XX:MaxMetaspaceSize=512m"
+      {% else %}
+      export ATLAS_SERVER_HEAP="-Xms{{atlas_server_xmx}}m -Xmx{{atlas_server_xmx}}m -XX:MaxNewSize={{atlas_server_max_new_size}}m -XX:MaxPermSize=512m"
+      {% endif %}
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>atlas.admin.username</name>
+    <display-name>Admin username</display-name>
+    <description>Admin Login user</description>
+    <value>admin</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.admin.password</name>
+    <display-name>Admin password</display-name>
+    <description>Admin Login password</description>
+    <value>admin</value>
+    <property-type>PASSWORD</property-type>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/configuration/atlas-log4j.xml b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/configuration/atlas-log4j.xml
new file mode 100644
index 0000000..bafd47d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/configuration/atlas-log4j.xml
@@ -0,0 +1,170 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+
+  <property>
+    <name>atlas_log_level</name>
+    <value>info</value>
+    <description>Log level for atlas logging</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>audit_log_level</name>
+    <value>info</value>
+    <description>Log level for audit logging</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>atlas_log_max_backup_size</name>
+    <value>256</value>
+    <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>Atlas Log: backup file size</display-name>
+    <value-attributes>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+   </property>
+   <property>
+    <name>atlas_log_number_of_backup_files</name>
+    <value>20</value>
+    <description>The number of backup files</description>
+    <display-name>Atlas Log: # of backup files</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>atlas-log4j template</display-name>
+    <description>Custom log4j.properties</description>
+    <value><![CDATA[<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
+
+<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/">
+  <appender name="console" class="org.apache.log4j.ConsoleAppender">
+    <param name="Target" value="System.out"/>
+    <layout class="org.apache.log4j.PatternLayout">
+      <param name="ConversionPattern" value="%d %-5p - [%t:%x] ~ %m (%c{1}:%L)%n"/>
+    </layout>
+  </appender>
+
+  <appender name="FILE" class="org.apache.log4j.DailyRollingFileAppender">
+    <param name="File" value="{{log_dir}}/application.log"/>
+    <param name="Append" value="true"/>
+    <param name="MaxFileSize" value="{{atlas_log_max_backup_size}}MB" />
+    <param name="MaxBackupIndex" value="{{atlas_log_number_of_backup_files}}" />
+    <layout class="org.apache.log4j.PatternLayout">
+      <param name="ConversionPattern" value="%d %-5p - [%t:%x] ~ %m (%c{1}:%L)%n"/>
+    </layout>
+  </appender>
+
+  <!-- uncomment this block to generate performance traces
+  <appender name="perf_appender" class="org.apache.log4j.DailyRollingFileAppender">
+    <param name="File" value="{{log_dir}}/atlas_perf.log" />
+    <param name="datePattern" value="'.'yyyy-MM-dd" />
+    <param name="append" value="true" />
+    <layout class="org.apache.log4j.PatternLayout">
+      <param name="ConversionPattern" value="%d|%t|%m%n" />
+    </layout>
+  </appender>
+
+    <logger name="org.apache.atlas.perf" additivity="false">
+      <level value="debug" />
+         <appender-ref ref="perf_appender" />
+    </logger>
+  -->
+
+  <appender name="AUDIT" class="org.apache.log4j.DailyRollingFileAppender">
+    <param name="File" value="{{log_dir}}/audit.log"/>
+    <param name="Append" value="true"/>
+    <param name="Threshold" value="info"/>
+    <layout class="org.apache.log4j.PatternLayout">
+      <param name="ConversionPattern" value="%d %x %m%n"/>
+    </layout>
+  </appender>
+
+  <logger name="org.apache.atlas" additivity="false">
+    <level value="{{atlas_log_level}}"/>
+    <appender-ref ref="FILE"/>
+  </logger>
+
+
+  <logger name="com.thinkaurelius.titan" additivity="false">
+    <level value="info"/>
+    <appender-ref ref="FILE"/>
+  </logger>
+
+  <logger name="org.elasticsearch" additivity="false">
+    <level value="info"/>
+    <appender-ref ref="FILE"/>
+  </logger>
+
+  <logger name="org.apache.lucene" additivity="false">
+    <level value="info"/>
+    <appender-ref ref="FILE"/>
+  </logger>
+
+  <logger name="com.google" additivity="false">
+    <level value="info"/>
+    <appender-ref ref="FILE"/>
+  </logger>
+
+  <logger name="AUDIT" additivity="false">
+    <level value="{{audit_log_level}}"/>
+    <appender-ref ref="AUDIT"/>
+  </logger>
+
+  <root>
+    <priority value="info"/>
+    <appender-ref ref="FILE"/>
+  </root>
+
+</log4j:configuration>
+      ]]></value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/configuration/atlas-solrconfig.xml b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/configuration/atlas-solrconfig.xml
new file mode 100644
index 0000000..cba4a4e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/configuration/atlas-solrconfig.xml
@@ -0,0 +1,641 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>content</name>
+    <display-name>atlas-solrconfig template</display-name>
+    <description>Atlas Solr configuration</description>
+    <value><![CDATA[<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!--
+     ***
+     For more details about configurations options that may appear in
+     this file, see http://wiki.apache.org/solr/SolrConfigXml.
+-->
+<config>
+  <!-- In all configuration below, a prefix of "solr." for class names
+       is an alias that causes solr to search appropriate packages,
+       including org.apache.solr.(search|update|request|core|analysis)
+
+       You may also specify a fully qualified Java classname if you
+       have your own custom plugins.
+    -->
+
+  <!-- Controls what version of Lucene various components of Solr
+       adhere to.  Generally, you want to use the latest version to
+       get all bug fixes and improvements. It is highly recommended
+       that you fully re-index after changing this setting as it can
+       affect both how text is indexed and queried.
+  -->
+  <luceneMatchVersion>5.5.1</luceneMatchVersion>
+
+  <!-- Data Directory
+
+       Used to specify an alternate directory to hold all index data
+       other than the default ./data under the Solr home.  If
+       replication is in use, this should match the replication
+       configuration.
+    -->
+  <dataDir>${solr.data.dir:}</dataDir>
+
+
+  <!-- The DirectoryFactory to use for indexes.
+
+       solr.StandardDirectoryFactory is filesystem
+       based and tries to pick the best implementation for the current
+       JVM and platform.  solr.NRTCachingDirectoryFactory, the default,
+       wraps solr.StandardDirectoryFactory and caches small files in memory
+       for better NRT performance.
+
+       One can force a particular implementation via solr.MMapDirectoryFactory,
+       solr.NIOFSDirectoryFactory, or solr.SimpleFSDirectoryFactory.
+
+       solr.RAMDirectoryFactory is memory based, not
+       persistent, and doesn't work with replication.
+    -->
+  <directoryFactory name="DirectoryFactory"
+                    class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}">
+  </directoryFactory>
+
+  <!-- The CodecFactory for defining the format of the inverted index.
+       The default implementation is SchemaCodecFactory, which is the official Lucene
+       index format, but hooks into the schema to provide per-field customization of
+       the postings lists and per-document values in the fieldType element
+       (postingsFormat/docValuesFormat). Note that most of the alternative implementations
+       are experimental, so if you choose to customize the index format, it's a good
+       idea to convert back to the official format e.g. via IndexWriter.addIndexes(IndexReader)
+       before upgrading to a newer version to avoid unnecessary reindexing.
+       A "compressionMode" string element can be added to <codecFactory> to choose
+       between the existing compression modes in the default codec: "BEST_SPEED" (default)
+       or "BEST_COMPRESSION".
+  -->
+  <codecFactory class="solr.SchemaCodecFactory"/>
+
+  <!-- To disable dynamic schema REST APIs, use the following for <schemaFactory>:
+
+       <schemaFactory class="ClassicIndexSchemaFactory"/>
+
+       When ManagedIndexSchemaFactory is specified instead, Solr will load the schema from
+       the resource named in 'managedSchemaResourceName', rather than from schema.xml.
+       Note that the managed schema resource CANNOT be named schema.xml.  If the managed
+       schema does not exist, Solr will create it after reading schema.xml, then rename
+       'schema.xml' to 'schema.xml.bak'.
+
+       Do NOT hand edit the managed schema - external modifications will be ignored and
+       overwritten as a result of schema modification REST API calls.
+
+       When ManagedIndexSchemaFactory is specified with mutable = true, schema
+       modification REST API calls will be allowed; otherwise, error responses will be
+       sent back for these requests.
+  -->
+  <schemaFactory class="ManagedIndexSchemaFactory">
+    <bool name="mutable">true</bool>
+    <str name="managedSchemaResourceName">managed-schema</str>
+  </schemaFactory>
+
+  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+       Index Config - These settings control low-level behavior of indexing
+       Most example settings here show the default value, but are commented
+       out, to more easily see where customizations have been made.
+
+       Note: This replaces <indexDefaults> and <mainIndex> from older versions
+       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
+  <indexConfig>
+
+    <!-- LockFactory
+
+         This option specifies which Lucene LockFactory implementation
+         to use.
+
+         single = SingleInstanceLockFactory - suggested for a
+                  read-only index or when there is no possibility of
+                  another process trying to modify the index.
+         native = NativeFSLockFactory - uses OS native file locking.
+                  Do not use when multiple solr webapps in the same
+                  JVM are attempting to share a single index.
+         simple = SimpleFSLockFactory  - uses a plain file for locking
+
+         Defaults: 'native' is default for Solr3.6 and later, otherwise
+                   'simple' is the default
+
+         More details on the nuances of each LockFactory...
+         http://wiki.apache.org/lucene-java/AvailableLockFactories
+    -->
+    <lockType>${solr.lock.type:native}</lockType>
+
+    <!-- Lucene Infostream
+
+         To aid in advanced debugging, Lucene provides an "InfoStream"
+         of detailed information when indexing.
+
+         Setting the value to true will instruct the underlying Lucene
+         IndexWriter to write its info stream to solr's log. By default,
+         this is enabled here, and controlled through log4j.properties.
+      -->
+    <infoStream>true</infoStream>
+  </indexConfig>
+
+
+  <!-- JMX
+
+       This example enables JMX if and only if an existing MBeanServer
+       is found, use this if you want to configure JMX through JVM
+       parameters. Remove this to disable exposing Solr configuration
+       and statistics to JMX.
+
+       For more details see http://wiki.apache.org/solr/SolrJmx
+    -->
+  <jmx />
+  <!-- If you want to connect to a particular server, specify the
+       agentId
+    -->
+  <!-- <jmx agentId="myAgent" /> -->
+  <!-- If you want to start a new MBeanServer, specify the serviceUrl -->
+  <!-- <jmx serviceUrl="service:jmx:rmi:///jndi/rmi://localhost:9999/solr"/>
+    -->
+
+  <!-- The default high-performance update handler -->
+  <updateHandler class="solr.DirectUpdateHandler2">
+
+    <!-- Enables a transaction log, used for real-time get, durability, and
+         and solr cloud replica recovery.  The log can grow as big as
+         uncommitted changes to the index, so use of a hard autoCommit
+         is recommended (see below).
+         "dir" - the target directory for transaction logs, defaults to the
+                solr data directory.
+         "numVersionBuckets" - sets the number of buckets used to keep
+                track of max version values when checking for re-ordered
+                updates; increase this value to reduce the cost of
+                synchronizing access to version buckets during high-volume
+                indexing, this requires 8 bytes (long) * numVersionBuckets
+                of heap space per Solr core.
+    -->
+    <updateLog>
+      <str name="dir">${solr.ulog.dir:}</str>
+      <int name="numVersionBuckets">${solr.ulog.numVersionBuckets:65536}</int>
+    </updateLog>
+
+    <!-- AutoCommit
+
+         Perform a hard commit automatically under certain conditions.
+         Instead of enabling autoCommit, consider using "commitWithin"
+         when adding documents.
+
+         http://wiki.apache.org/solr/UpdateXmlMessages
+
+         maxDocs - Maximum number of documents to add since the last
+                   commit before automatically triggering a new commit.
+
+         maxTime - Maximum amount of time in ms that is allowed to pass
+                   since a document was added before automatically
+                   triggering a new commit.
+         openSearcher - if false, the commit causes recent index changes
+           to be flushed to stable storage, but does not cause a new
+           searcher to be opened to make those changes visible.
+
+         If the updateLog is enabled, then it's highly recommended to
+         have some sort of hard autoCommit to limit the log size.
+      -->
+    <autoCommit>
+      <maxTime>${solr.autoCommit.maxTime:15000}</maxTime>
+      <openSearcher>false</openSearcher>
+    </autoCommit>
+
+    <!-- softAutoCommit is like autoCommit except it causes a
+         'soft' commit which only ensures that changes are visible
+         but does not ensure that data is synced to disk.  This is
+         faster and more near-realtime friendly than a hard commit.
+      -->
+    <autoSoftCommit>
+      <maxTime>${solr.autoSoftCommit.maxTime:-1}</maxTime>
+    </autoSoftCommit>
+
+  </updateHandler>
+
+  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+       Query section - these settings control query time things like caches
+       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
+  <query>
+    <!-- Max Boolean Clauses
+
+         Maximum number of clauses in each BooleanQuery,  an exception
+         is thrown if exceeded.
+
+         ** WARNING **
+
+         This option actually modifies a global Lucene property that
+         will affect all SolrCores.  If multiple solrconfig.xml files
+         disagree on this property, the value at any given moment will
+         be based on the last SolrCore to be initialized.
+
+      -->
+    <maxBooleanClauses>1024</maxBooleanClauses>
+
+
+    <!-- Solr Internal Query Caches
+
+         There are two implementations of cache available for Solr,
+         LRUCache, based on a synchronized LinkedHashMap, and
+         FastLRUCache, based on a ConcurrentHashMap.
+
+         FastLRUCache has faster gets and slower puts in single
+         threaded operation and thus is generally faster than LRUCache
+         when the hit ratio of the cache is high (> 75%), and may be
+         faster under other scenarios on multi-cpu systems.
+    -->
+
+    <!-- Filter Cache
+
+         Cache used by SolrIndexSearcher for filters (DocSets),
+         unordered sets of *all* documents that match a query.  When a
+         new searcher is opened, its caches may be prepopulated or
+         "autowarmed" using data from caches in the old searcher.
+         autowarmCount is the number of items to prepopulate.  For
+         LRUCache, the autowarmed items will be the most recently
+         accessed items.
+
+         Parameters:
+           class - the SolrCache implementation LRUCache or
+               (LRUCache or FastLRUCache)
+           size - the maximum number of entries in the cache
+           initialSize - the initial capacity (number of entries) of
+               the cache.  (see java.util.HashMap)
+           autowarmCount - the number of entries to prepopulate from
+               and old cache.
+      -->
+    <filterCache class="solr.FastLRUCache"
+                 size="512"
+                 initialSize="512"
+                 autowarmCount="0"/>
+
+    <!-- Query Result Cache
+
+        Caches results of searches - ordered lists of document ids
+        (DocList) based on a query, a sort, and the range of documents requested.
+        Additional supported parameter by LRUCache:
+           maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
+                      to occupy
+     -->
+    <queryResultCache class="solr.LRUCache"
+                      size="512"
+                      initialSize="512"
+                      autowarmCount="0"/>
+
+    <!-- Document Cache
+
+         Caches Lucene Document objects (the stored fields for each
+         document).  Since Lucene internal document ids are transient,
+         this cache will not be autowarmed.
+      -->
+    <documentCache class="solr.LRUCache"
+                   size="512"
+                   initialSize="512"
+                   autowarmCount="0"/>
+
+    <!-- custom cache currently used by block join -->
+    <cache name="perSegFilter"
+           class="solr.search.LRUCache"
+           size="10"
+           initialSize="0"
+           autowarmCount="10"
+           regenerator="solr.NoOpRegenerator" />
+
+    <!-- Lazy Field Loading
+
+         If true, stored fields that are not requested will be loaded
+         lazily.  This can result in a significant speed improvement
+         if the usual case is to not load all stored fields,
+         especially if the skipped fields are large compressed text
+         fields.
+    -->
+    <enableLazyFieldLoading>true</enableLazyFieldLoading>
+
+    <!-- Result Window Size
+
+         An optimization for use with the queryResultCache.  When a search
+         is requested, a superset of the requested number of document ids
+         are collected.  For example, if a search for a particular query
+         requests matching documents 10 through 19, and queryWindowSize is 50,
+         then documents 0 through 49 will be collected and cached.  Any further
+         requests in that range can be satisfied via the cache.
+      -->
+    <queryResultWindowSize>20</queryResultWindowSize>
+
+    <!-- Maximum number of documents to cache for any entry in the
+         queryResultCache.
+      -->
+    <queryResultMaxDocsCached>200</queryResultMaxDocsCached>
+
+    <!-- Use Cold Searcher
+
+         If a search request comes in and there is no current
+         registered searcher, then immediately register the still
+         warming searcher and use it.  If "false" then all requests
+         will block until the first searcher is done warming.
+      -->
+    <useColdSearcher>false</useColdSearcher>
+
+    <!-- Max Warming Searchers
+
+         Maximum number of searchers that may be warming in the
+         background concurrently.  An error is returned if this limit
+         is exceeded.
+
+         Recommend values of 1-2 for read-only slaves, higher for
+         masters w/o cache warming.
+      -->
+    <maxWarmingSearchers>2</maxWarmingSearchers>
+
+  </query>
+
+
+  <!-- Request Dispatcher
+
+       This section contains instructions for how the SolrDispatchFilter
+       should behave when processing requests for this SolrCore.
+
+       handleSelect is a legacy option that affects the behavior of requests
+       such as /select?qt=XXX
+
+       handleSelect="true" will cause the SolrDispatchFilter to process
+       the request and dispatch the query to a handler specified by the
+       "qt" param, assuming "/select" isn't already registered.
+
+       handleSelect="false" will cause the SolrDispatchFilter to
+       ignore "/select" requests, resulting in a 404 unless a handler
+       is explicitly registered with the name "/select"
+
+       handleSelect="true" is not recommended for new users, but is the default
+       for backwards compatibility
+    -->
+  <requestDispatcher handleSelect="false" >
+    <!-- Request Parsing
+
+         These settings indicate how Solr Requests may be parsed, and
+         what restrictions may be placed on the ContentStreams from
+         those requests
+
+         enableRemoteStreaming - enables use of the stream.file
+         and stream.url parameters for specifying remote streams.
+
+         multipartUploadLimitInKB - specifies the max size (in KiB) of
+         Multipart File Uploads that Solr will allow in a Request.
+
+         formdataUploadLimitInKB - specifies the max size (in KiB) of
+         form data (application/x-www-form-urlencoded) sent via
+         POST. You can use POST to pass request parameters not
+         fitting into the URL.
+
+         addHttpRequestToContext - if set to true, it will instruct
+         the requestParsers to include the original HttpServletRequest
+         object in the context map of the SolrQueryRequest under the
+         key "httpRequest". It will not be used by any of the existing
+         Solr components, but may be useful when developing custom
+         plugins.
+
+         *** WARNING ***
+         The settings below authorize Solr to fetch remote files, You
+         should make sure your system has some authentication before
+         using enableRemoteStreaming="true"
+
+      -->
+    <requestParsers enableRemoteStreaming="true"
+                    multipartUploadLimitInKB="2048000"
+                    formdataUploadLimitInKB="2048"
+                    addHttpRequestToContext="false"/>
+
+    <!-- HTTP Caching
+
+         Set HTTP caching related parameters (for proxy caches and clients).
+
+         The options below instruct Solr not to output any HTTP Caching
+         related headers
+      -->
+    <httpCaching never304="true" />
+
+  </requestDispatcher>
+
+  <!-- Request Handlers
+
+       http://wiki.apache.org/solr/SolrRequestHandler
+
+       Incoming queries will be dispatched to a specific handler by name
+       based on the path specified in the request.
+
+       Legacy behavior: If the request path uses "/select" but no Request
+       Handler has that name, and if handleSelect="true" has been specified in
+       the requestDispatcher, then the Request Handler is dispatched based on
+       the qt parameter.  Handlers without a leading '/' are accessed this way
+       like so: http://host/app/[core/]select?qt=name  If no qt is
+       given, then the requestHandler that declares default="true" will be
+       used or the one named "standard".
+
+       If a Request Handler is declared with startup="lazy", then it will
+       not be initialized until the first request that uses it.
+
+    -->
+  <!-- SearchHandler
+
+       http://wiki.apache.org/solr/SearchHandler
+
+       For processing Search Queries, the primary Request Handler
+       provided with Solr is "SearchHandler" It delegates to a sequent
+       of SearchComponents (see below) and supports distributed
+       queries across multiple shards
+    -->
+  <requestHandler name="/select" class="solr.SearchHandler">
+    <!-- default values for query parameters can be specified, these
+         will be overridden by parameters in the request
+      -->
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+      <int name="rows">10</int>
+    </lst>
+
+  </requestHandler>
+
+  <!-- A request handler that returns indented JSON by default -->
+  <requestHandler name="/query" class="solr.SearchHandler">
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+      <str name="wt">json</str>
+      <str name="indent">true</str>
+      <str name="df">text</str>
+    </lst>
+  </requestHandler>
+
+  <!--
+    The export request handler is used to export full sorted result sets.
+    Do not change these defaults.
+  -->
+  <requestHandler name="/export" class="solr.SearchHandler">
+    <lst name="invariants">
+      <str name="rq">{!xport}</str>
+      <str name="wt">xsort</str>
+      <str name="distrib">false</str>
+    </lst>
+
+    <arr name="components">
+      <str>query</str>
+    </arr>
+  </requestHandler>
+
+
+  <initParams path="/update/**,/query,/select,/tvrh,/elevate,/spell">
+    <lst name="defaults">
+      <str name="df">text</str>
+    </lst>
+  </initParams>
+
+  <!-- Field Analysis Request Handler
+
+       RequestHandler that provides much the same functionality as
+       analysis.jsp. Provides the ability to specify multiple field
+       types and field names in the same request and outputs
+       index-time and query-time analysis for each of them.
+
+       Request parameters are:
+       analysis.fieldname - field name whose analyzers are to be used
+
+       analysis.fieldtype - field type whose analyzers are to be used
+       analysis.fieldvalue - text for index-time analysis
+       q (or analysis.q) - text for query time analysis
+       analysis.showmatch (true|false) - When set to true and when
+           query analysis is performed, the produced tokens of the
+           field value analysis will be marked as "matched" for every
+           token that is produces by the query analysis
+   -->
+  <requestHandler name="/analysis/field"
+                  startup="lazy"
+                  class="solr.FieldAnalysisRequestHandler" />
+
+
+  <!-- Document Analysis Handler
+
+       http://wiki.apache.org/solr/AnalysisRequestHandler
+
+       An analysis handler that provides a breakdown of the analysis
+       process of provided documents. This handler expects a (single)
+       content stream with the following format:
+
+       <docs>
+         <doc>
+           <field name="id">1</field>
+           <field name="name">The Name</field>
+           <field name="text">The Text Value</field>
+         </doc>
+         <doc>...</doc>
+         <doc>...</doc>
+         ...
+       </docs>
+
+    Note: Each document must contain a field which serves as the
+    unique key. This key is used in the returned response to associate
+    an analysis breakdown to the analyzed document.
+
+    Like the FieldAnalysisRequestHandler, this handler also supports
+    query analysis by sending either an "analysis.query" or "q"
+    request parameter that holds the query text to be analyzed. It
+    also supports the "analysis.showmatch" parameter which when set to
+    true, all field tokens that match the query tokens will be marked
+    as a "match".
+  -->
+  <requestHandler name="/analysis/document"
+                  class="solr.DocumentAnalysisRequestHandler"
+                  startup="lazy" />
+
+  <!-- Echo the request contents back to the client -->
+  <requestHandler name="/debug/dump" class="solr.DumpRequestHandler" >
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+      <str name="echoHandler">true</str>
+    </lst>
+  </requestHandler>
+
+
+
+  <!-- Search Components
+
+       Search components are registered to SolrCore and used by
+       instances of SearchHandler (which can access them by name)
+
+       By default, the following components are available:
+
+       <searchComponent name="query"     class="solr.QueryComponent" />
+       <searchComponent name="facet"     class="solr.FacetComponent" />
+       <searchComponent name="mlt"       class="solr.MoreLikeThisComponent" />
+       <searchComponent name="highlight" class="solr.HighlightComponent" />
+       <searchComponent name="stats"     class="solr.StatsComponent" />
+       <searchComponent name="debug"     class="solr.DebugComponent" />
+
+     -->
+
+  <!-- Terms Component
+
+       http://wiki.apache.org/solr/TermsComponent
+
+       A component to return terms and document frequency of those
+       terms
+    -->
+  <searchComponent name="terms" class="solr.TermsComponent"/>
+
+  <!-- A request handler for demonstrating the terms component -->
+  <requestHandler name="/terms" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <bool name="terms">true</bool>
+      <bool name="distrib">false</bool>
+    </lst>
+    <arr name="components">
+      <str>terms</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Legacy config for the admin interface -->
+  <admin>
+    <defaultQuery>*:*</defaultQuery>
+  </admin>
+
+</config>
+      ]]></value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/configuration/ranger-atlas-audit.xml b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/configuration/ranger-atlas-audit.xml
new file mode 100644
index 0000000..16c022d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/configuration/ranger-atlas-audit.xml
@@ -0,0 +1,141 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+
+  <property>
+    <name>xasecure.audit.is.enabled</name>
+    <value>true</value>
+    <description>Is Audit enabled?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.audit.destination.hdfs</name>
+    <value>true</value>
+    <display-name>Audit to HDFS</display-name>
+    <description>Is Audit to HDFS enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.audit.destination.hdfs.dir</name>
+    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
+    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs.dir</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
+    <value>/var/log/atlas/audit/hdfs/spool</value>
+    <description>/var/log/atlas/audit/hdfs/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.audit.destination.solr</name>
+    <value>false</value>
+    <display-name>Audit to SOLR</display-name>
+    <description>Is Solr audit enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.solr</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.audit.destination.solr.urls</name>
+    <value></value>
+    <description>Solr URL</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.urls</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.audit.destination.solr.zookeepers</name>
+    <value>NONE</value>
+    <description>Solr Zookeeper string</description>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.zookeepers</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
+    <value>/var/log/atlas/audit/solr/spool</value>
+    <description>/var/log/atlas/audit/solr/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.audit.provider.summary.enabled</name>
+    <value>false</value>
+    <display-name>Audit provider summary enabled</display-name>
+    <description>Enable Summary audit?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+   <property>
+    <name>ranger.plugin.atlas.ambari.cluster.name</name>
+    <value>{{cluster_name}}</value>
+    <description>Capture cluster name from where Ranger atlas plugin is enabled.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/configuration/ranger-atlas-plugin-properties.xml b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/configuration/ranger-atlas-plugin-properties.xml
new file mode 100644
index 0000000..d66afa1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/configuration/ranger-atlas-plugin-properties.xml
@@ -0,0 +1,132 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+
+  <property>
+    <name>policy_user</name>
+    <value>atlas</value>
+    <display-name>Policy user for Atlas</display-name>
+    <description>This user must be system user and also present at Ranger
+      admin portal</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>common.name.for.certificate</name>
+    <value></value>
+    <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>ranger-atlas-plugin-enabled</name>
+    <value>No</value>
+    <display-name>Enable Ranger for Atlas</display-name>
+    <description>Enable ranger Atlas plugin</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>ranger-atlas-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+
+
+
+
+  <property>
+    <name>external_admin_username</name>
+    <value></value>
+    <display-name>External Ranger admin username</display-name>
+    <description>Add ranger default admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>external_admin_password</name>
+    <value></value>
+    <display-name>External Ranger admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_username</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin username</display-name>
+    <description>Add ranger default ambari admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_password</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default ambari admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_USERNAME</name>
+    <value>admin</value>
+    <display-name>Ranger repository config user</display-name>
+    <description>Used for repository creation on ranger admin
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_PASSWORD</name>
+    <value>admin</value>
+    <display-name>Ranger repository config password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Used for repository creation on ranger admin
+    </description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+</configuration>
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/configuration/ranger-atlas-policymgr-ssl.xml b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/configuration/ranger-atlas-policymgr-ssl.xml
new file mode 100644
index 0000000..dcffb63
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/configuration/ranger-atlas-policymgr-ssl.xml
@@ -0,0 +1,73 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore</name>
+    <value>/usr/hdp/current/atlas-server/conf/ranger-plugin-keystore.jks</value>
+    <description>Java Keystore files</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.password</name>
+    <value>myKeyFilePassword</value>
+    <property-type>PASSWORD</property-type>
+    <description>password for keystore</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore</name>
+    <value>/usr/hdp/current/atlas-server/conf/ranger-plugin-truststore.jks</value>
+    <description>java truststore file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.password</name>
+    <value>changeit</value>
+    <property-type>PASSWORD</property-type>
+    <description>java truststore password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+    <property>
+    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java keystore credential file</description>
+      <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java truststore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+</configuration>
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/configuration/ranger-atlas-security.xml b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/configuration/ranger-atlas-security.xml
new file mode 100644
index 0000000..8fac342
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/configuration/ranger-atlas-security.xml
@@ -0,0 +1,77 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>ranger.plugin.atlas.service.name</name>
+    <value>{{repo_name}}</value>
+    <description>Name of the Ranger service containing Atlas policies</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>ranger.plugin.atlas.policy.source.impl</name>
+    <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
+    <description>Class to retrieve policies from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>ranger.plugin.atlas.policy.rest.url</name>
+    <value>{{policymgr_mgr_url}}</value>
+    <description>URL to Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>admin-properties</type>
+        <name>policymgr_external_url</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>ranger.plugin.atlas.policy.rest.ssl.config.file</name>
+    <value>/usr/hdp/current/atlas-server/conf/ranger-policymgr-ssl.xml</value>
+    <description>Path to the file containing SSL details to contact Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>ranger.plugin.atlas.policy.pollIntervalMs</name>
+    <value>30000</value>
+    <description>How often to poll for changes in policies?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>ranger.plugin.atlas.policy.cache.dir</name>
+    <value>/etc/ranger/{{repo_name}}/policycache</value>
+    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.add-hadoop-authorization</name>
+    <value>true</value>
+    <description>Enable/Disable the default hadoop authorization (based on rwxrwxrwx permission on the resource) if Ranger Authorization fails.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+</configuration>
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/kerberos.json b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/kerberos.json
new file mode 100644
index 0000000..7d10ccc
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/kerberos.json
@@ -0,0 +1,100 @@
+{
+  "services": [
+    {
+      "name": "ATLAS",
+      "configurations": [
+        {
+          "application-properties": {
+            "atlas.authentication.method.kerberos": "true",
+            "atlas.kafka.sasl.kerberos.service.name": "${kafka-env/kafka_user}",
+            "atlas.kafka.security.protocol": "PLAINTEXTSASL",
+            "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "atlas.jaas.KafkaClient.loginModuleControlFlag": "required",
+            "atlas.jaas.KafkaClient.option.useKeyTab": "true",
+            "atlas.jaas.KafkaClient.option.storeKey": "true",
+            "atlas.jaas.KafkaClient.option.serviceName": "${kafka-env/kafka_user}",
+            "atlas.solr.kerberos.enable": "true",
+            "atlas.server.ha.zookeeper.acl" : "auth:"
+          }
+        },
+        {
+          "ranger-atlas-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "auth_to_local_properties" : [
+        "application-properties/atlas.authentication.method.kerberos.name.rules|new_lines_escaped"
+      ],
+      "components": [
+        {
+          "name": "ATLAS_SERVER",
+          "identities": [
+            {
+              "name": "atlas",
+              "principal": {
+                "value": "atlas/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "application-properties/atlas.jaas.KafkaClient.option.principal",
+                "local_username" : "${atlas-env/metadata_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/atlas.service.keytab",
+                "owner": {
+                  "name": "${atlas-env/metadata_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "application-properties/atlas.jaas.KafkaClient.option.keyTab"
+              }
+            },
+            {
+              "name": "atlas_auth",
+              "reference": "/ATLAS/ATLAS_SERVER/atlas",
+              "principal": {
+                "configuration": "application-properties/atlas.authentication.principal"
+              },
+              "keytab": {
+                "configuration": "application-properties/atlas.authentication.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "value": "HTTP/_HOST@${realm}",
+                "configuration": "application-properties/atlas.authentication.method.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "application-properties/atlas.authentication.method.kerberos.keytab"
+              }
+            },
+            {
+              "name": "ranger_atlas_audit",
+              "reference": "/ATLAS/ATLAS_SERVER/atlas",
+              "principal": {
+                "configuration": "ranger-atlas-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-atlas-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            },
+            {
+              "name": "/KAFKA/KAFKA_BROKER/kafka_broker"
+            },
+            {
+              "name": "/AMBARI_INFRA/INFRA_SOLR/infra-solr"
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/metainfo.xml
new file mode 100644
index 0000000..11ebf45
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/metainfo.xml
@@ -0,0 +1,190 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ATLAS</name>
+      <displayName>Atlas</displayName>
+      <comment>Atlas Metadata and Governance platform</comment>
+      <version>0.7.0.3.0</version>
+      
+      <components>
+        <component>
+          <name>ATLAS_SERVER</name>
+          <displayName>Atlas Metadata Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>AMBARI_INFRA/INFRA_SOLR_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>HBASE/HBASE_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>KAFKA/KAFKA_BROKER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/metadata_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>atlas_app</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+
+        <component>
+          <name>ATLAS_CLIENT</name>
+          <displayName>Atlas Metadata Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+          </dependencies>
+          <commandScript>
+            <script>scripts/atlas_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>properties</type>
+              <fileName>application.properties</fileName>
+              <dictionaryName>application-properties</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>atlas-env.sh</fileName>
+              <dictionaryName>atlas-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>atlas-log4j.xml</fileName>
+              <dictionaryName>atlas-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>atlas-solrconfig.xml</fileName>
+              <dictionaryName>atlas-solrconfig</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+
+      <requiredServices>
+        <service>KAFKA</service>
+      </requiredServices>
+
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+        <theme>
+          <fileName>theme_version_2.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat7,amazon2015,redhat6,suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>atlas-metadata_${stack_version}</name>
+            </package>
+            <package>
+              <name>ambari-infra-solr-client</name>
+              <condition>should_install_infra_solr_client</condition>
+            </package>
+            <package>
+              <name>kafka_${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+          <packages>
+            <package>
+              <name>atlas-metadata-${stack_version}</name>
+            </package>
+            <package>
+              <name>ambari-infra-solr-client</name>
+              <condition>should_install_infra_solr_client</condition>
+            </package>
+            <package>
+              <name>kafka-${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>application-properties</config-type>
+        <config-type>atlas-env</config-type>
+        <config-type>atlas-log4j</config-type>
+        <config-type>core-site</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>atlas-solrconfig</config-type>
+        <config-type>ranger-atlas-audit</config-type>
+        <config-type>ranger-atlas-plugin-properties</config-type>
+        <config-type>ranger-atlas-policymgr-ssl</config-type>
+        <config-type>ranger-atlas-security</config-type>
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/atlas_client.py b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/atlas_client.py
new file mode 100644
index 0000000..26742ae
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/atlas_client.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+
+from metadata import metadata
+
+
+class AtlasClient(Script):
+
+  def get_component_name(self):
+    return "atlas-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, params.version):
+      conf_select.select(params.stack_name, "atlas", params.version)
+      stack_select.select("atlas-client", params.version)
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env, upgrade_type=None, config_dir=None):
+    import params
+    env.set_params(params)
+    metadata('client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  AtlasClient().execute()
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/metadata.py b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/metadata.py
new file mode 100644
index 0000000..36c4598
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/metadata.py
@@ -0,0 +1,243 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import hashlib
+
+from resource_management import Package
+from resource_management import StackFeature
+from resource_management.core.resources.system import Directory, File, Execute
+from resource_management.core.source import StaticFile, InlineTemplate, Template
+from resource_management.core.exceptions import Fail
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.decorator import retry
+from resource_management.libraries.functions import solr_cloud_util
+from resource_management.libraries.functions.stack_features import check_stack_feature, get_stack_feature_version
+from resource_management.libraries.resources.properties_file import PropertiesFile
+from resource_management.libraries.resources.template_config import TemplateConfig
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.resources.modify_properties_file import ModifyPropertiesFile
+
+
+def metadata(type='server'):
+    import params
+
+    # Needed by both Server and Client
+    Directory(params.conf_dir,
+              mode=0755,
+              cd_access='a',
+              owner=params.metadata_user,
+              group=params.user_group,
+              create_parents = True
+    )
+
+    if type == "server":
+      Directory([params.pid_dir],
+                mode=0755,
+                cd_access='a',
+                owner=params.metadata_user,
+                group=params.user_group,
+                create_parents = True
+      )
+      Directory(format('{conf_dir}/solr'),
+                mode=0755,
+                cd_access='a',
+                owner=params.metadata_user,
+                group=params.user_group,
+                create_parents = True,
+                recursive_ownership=True
+      )
+      Directory(params.log_dir,
+                mode=0755,
+                cd_access='a',
+                owner=params.metadata_user,
+                group=params.user_group,
+                create_parents = True
+      )
+      Directory(params.data_dir,
+                mode=0644,
+                cd_access='a',
+                owner=params.metadata_user,
+                group=params.user_group,
+                create_parents = True
+      )
+      Directory(params.expanded_war_dir,
+                mode=0644,
+                cd_access='a',
+                owner=params.metadata_user,
+                group=params.user_group,
+                create_parents = True
+      )
+      File(format("{expanded_war_dir}/atlas.war"),
+           content = StaticFile(format('{metadata_home}/server/webapp/atlas.war'))
+      )
+      File(format("{conf_dir}/atlas-log4j.xml"),
+           mode=0644,
+           owner=params.metadata_user,
+           group=params.user_group,
+           content=InlineTemplate(params.metadata_log4j_content)
+      )
+      File(format("{conf_dir}/atlas-env.sh"),
+           owner=params.metadata_user,
+           group=params.user_group,
+           mode=0755,
+           content=InlineTemplate(params.metadata_env_content)
+      )
+
+      if not is_empty(params.atlas_admin_username) and not is_empty(params.atlas_admin_password):
+        psswd_output = hashlib.sha256(params.atlas_admin_password).hexdigest()
+        ModifyPropertiesFile(format("{conf_dir}/users-credentials.properties"),
+            properties = {format('{atlas_admin_username}') : format('ROLE_ADMIN::{psswd_output}')},
+            owner = params.metadata_user
+        )
+
+      files_to_chown = [format("{conf_dir}/policy-store.txt"), format("{conf_dir}/users-credentials.properties")]
+      for file in files_to_chown:
+        if os.path.exists(file):
+          Execute(('chown', format('{metadata_user}:{user_group}'), file),
+                  sudo=True
+                  )
+          Execute(('chmod', '644', file),
+                  sudo=True
+                  )
+
+      if params.metadata_solrconfig_content:
+        File(format("{conf_dir}/solr/solrconfig.xml"),
+             mode=0644,
+             owner=params.metadata_user,
+             group=params.user_group,
+             content=InlineTemplate(params.metadata_solrconfig_content)
+        )
+
+    # Needed by both Server and Client
+    PropertiesFile(format('{conf_dir}/{conf_file}'),
+         properties = params.application_properties,
+         mode=0644,
+         owner=params.metadata_user,
+         group=params.user_group
+    )
+
+    if params.security_enabled:
+      TemplateConfig(format(params.atlas_jaas_file),
+                     owner=params.metadata_user)
+
+    if type == 'server' and params.search_backend_solr and params.has_infra_solr:
+      solr_cloud_util.setup_solr_client(params.config)
+      check_znode()
+      jaasFile=params.atlas_jaas_file if params.security_enabled else None
+      upload_conf_set('atlas_configs', jaasFile)
+
+      if params.security_enabled: # update permissions before creating the collections
+        solr_cloud_util.add_solr_roles(params.config,
+                                       roles = [params.infra_solr_role_atlas, params.infra_solr_role_ranger_audit, params.infra_solr_role_dev],
+                                       new_service_principals = [params.atlas_jaas_principal])
+
+      create_collection('vertex_index', 'atlas_configs', jaasFile)
+      create_collection('edge_index', 'atlas_configs', jaasFile)
+      create_collection('fulltext_index', 'atlas_configs', jaasFile)
+
+      if params.security_enabled:
+        secure_znode(format('{infra_solr_znode}/configs/atlas_configs'), jaasFile)
+        secure_znode(format('{infra_solr_znode}/collections/vertex_index'), jaasFile)
+        secure_znode(format('{infra_solr_znode}/collections/edge_index'), jaasFile)
+        secure_znode(format('{infra_solr_znode}/collections/fulltext_index'), jaasFile)
+
+    File(params.atlas_hbase_setup,
+         group=params.user_group,
+         owner=params.hbase_user,
+         content=Template("atlas_hbase_setup.rb.j2")
+    )
+
+    is_atlas_upgrade_support = check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, get_stack_feature_version(params.config))
+
+    if is_atlas_upgrade_support and params.security_enabled:
+
+      File(params.atlas_kafka_setup,
+           group=params.user_group,
+           owner=params.kafka_user,
+           content=Template("atlas_kafka_acl.sh.j2"))
+
+      #  files required only in case if kafka broker is not present on the host as configured component
+      if not params.host_with_kafka:
+        File(format("{kafka_conf_dir}/kafka-env.sh"),
+             owner=params.kafka_user,
+             content=InlineTemplate(params.kafka_env_sh_template))
+
+        File(format("{kafka_conf_dir}/kafka_jaas.conf"),
+             group=params.user_group,
+             owner=params.kafka_user,
+             content=Template("kafka_jaas.conf.j2"))
+
+    if params.stack_supports_atlas_hdfs_site_on_namenode_ha and len(params.namenode_host) > 1:
+      XmlConfig("hdfs-site.xml",
+                conf_dir=params.conf_dir,
+                configurations=params.config['configurations']['hdfs-site'],
+                configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+                owner=params.metadata_user,
+                group=params.user_group,
+                mode=0644
+                )
+    else:
+      File(format('{conf_dir}/hdfs-site.xml'), action="delete")
+
+
+def upload_conf_set(config_set, jaasFile):
+  import params
+
+  solr_cloud_util.upload_configuration_to_zk(
+      zookeeper_quorum=params.zookeeper_quorum,
+      solr_znode=params.infra_solr_znode,
+      config_set_dir=format("{conf_dir}/solr"),
+      config_set=config_set,
+      tmp_dir=params.tmp_dir,
+      java64_home=params.java64_home,
+      solrconfig_content=InlineTemplate(params.metadata_solrconfig_content),
+      jaas_file=jaasFile,
+      retry=30, interval=5)
+
+def create_collection(collection, config_set, jaasFile):
+  import params
+
+  solr_cloud_util.create_collection(
+      zookeeper_quorum=params.zookeeper_quorum,
+      solr_znode=params.infra_solr_znode,
+      collection = collection,
+      config_set=config_set,
+      java64_home=params.java64_home,
+      jaas_file=jaasFile,
+      shards=params.atlas_solr_shards,
+      replication_factor = params.infra_solr_replication_factor)
+
+def secure_znode(znode, jaasFile):
+  import params
+  solr_cloud_util.secure_znode(config=params.config, zookeeper_quorum=params.zookeeper_quorum,
+                               solr_znode=znode,
+                               jaas_file=jaasFile,
+                               java64_home=params.java64_home, sasl_users=[params.atlas_jaas_principal])
+
+
+
+@retry(times=10, sleep_time=5, err_class=Fail)
+def check_znode():
+  import params
+  solr_cloud_util.check_znode(
+    zookeeper_quorum=params.zookeeper_quorum,
+    solr_znode=params.infra_solr_znode,
+    java64_home=params.java64_home)
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/metadata_server.py b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/metadata_server.py
new file mode 100644
index 0000000..1ef77cf
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/metadata_server.py
@@ -0,0 +1,187 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+# Python Imports
+import os
+
+# Local Imports
+from metadata import metadata
+from resource_management import Fail
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.core.resources.system import Execute, File
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_PROPERTIES
+from resource_management.libraries.functions.show_logs import show_logs
+from resource_management.libraries.functions.stack_features import check_stack_feature, get_stack_feature_version
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.core.resources.system import Directory
+from resource_management.core.logger import Logger
+from setup_ranger_atlas import setup_ranger_atlas
+from resource_management.core.resources.zkmigrator import ZkMigrator
+
+class MetadataServer(Script):
+
+  def get_component_name(self):
+    return "atlas-server"
+
+  def install(self, env):
+    import params
+    env.set_params(params)
+
+    Directory(format("{expanded_war_dir}/atlas"),
+              action = "delete",
+    )
+
+    self.install_packages(env)
+
+  def configure(self, env, upgrade_type=None, config_dir=None):
+    import params
+    env.set_params(params)
+    metadata()
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, params.version):
+      conf_select.select(params.stack_name, "atlas", params.version)
+      stack_select.select("atlas-server", params.version)
+
+  def start(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    self.configure(env)
+
+    daemon_cmd = format('source {params.conf_dir}/atlas-env.sh ; {params.metadata_start_script}')
+    no_op_test = format('ls {params.pid_file} >/dev/null 2>&1 && ps -p `cat {params.pid_file}` >/dev/null 2>&1')
+    atlas_hbase_setup_command = format("cat {atlas_hbase_setup} | hbase shell -n")
+    atlas_kafka_setup_command = format("bash {atlas_kafka_setup}")
+    secure_atlas_hbase_setup_command = format("kinit -kt {hbase_user_keytab} {hbase_principal_name}; ") + atlas_hbase_setup_command
+    # in case if principal was distributed across several hosts, pattern need to be replaced to right one
+    secure_atlas_kafka_setup_command = format("kinit -kt {kafka_keytab} {kafka_principal_name}; ").replace("_HOST", params.hostname) + atlas_kafka_setup_command
+
+    if params.stack_supports_atlas_ranger_plugin:
+      Logger.info('Atlas plugin is enabled, configuring Atlas plugin.')
+      setup_ranger_atlas(upgrade_type=upgrade_type)
+    else:
+      Logger.info('Atlas plugin is not supported or enabled.')
+
+    try:
+      effective_version = get_stack_feature_version(params.config)
+
+      if check_stack_feature(StackFeature.ATLAS_HBASE_SETUP, effective_version):
+        if params.security_enabled and params.has_hbase_master:
+          Execute(secure_atlas_hbase_setup_command,
+                  tries = 5,
+                  try_sleep = 10,
+                  user=params.hbase_user
+          )
+        elif params.enable_ranger_hbase and not params.security_enabled:
+          Execute(atlas_hbase_setup_command,
+                  tries = 5,
+                  try_sleep = 10,
+                  user=params.hbase_user
+          )
+
+      if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, effective_version) and params.security_enabled:
+        try:
+          Execute(secure_atlas_kafka_setup_command,
+                  user=params.kafka_user,
+                  tries=5,
+                  try_sleep=10
+          )
+        except Fail:
+          pass  # do nothing and do not block Atlas start, fail logs would be available via Execute internals
+
+      Execute(daemon_cmd,
+              user=params.metadata_user,
+              not_if=no_op_test
+      )
+    except:
+      show_logs(params.log_dir, params.metadata_user)
+      raise
+
+  def stop(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    daemon_cmd = format('source {params.conf_dir}/atlas-env.sh; {params.metadata_stop_script}')
+
+    # If the pid dir doesn't exist, this means either
+    # 1. The user just added Atlas service and issued a restart command (stop+start). So stop should be a no-op
+    # since there's nothing to stop.
+    # OR
+    # 2. The user changed the value of the pid dir config and incorrectly issued a restart command.
+    # In which case the stop command cannot do anything since Ambari doesn't know which process to kill.
+    # The start command will spawn another instance.
+    # The user should have issued a stop, changed the config, and then started it.
+    if not os.path.isdir(params.pid_dir):
+      Logger.info("*******************************************************************")
+      Logger.info("Will skip the stop command since this is the first time stopping/restarting Atlas "
+                  "and the pid dir does not exist, %s\n" % params.pid_dir)
+      return
+
+    try:
+      Execute(daemon_cmd,
+              user=params.metadata_user,
+      )
+    except:
+      show_logs(params.log_dir, params.metadata_user)
+      raise
+
+    File(params.pid_file, action="delete")
+
+  def disable_security(self, env):
+    import params
+    if not params.zookeeper_quorum:
+      Logger.info("No zookeeper connection string. Skipping reverting ACL")
+      return
+    zkmigrator = ZkMigrator(params.zookeeper_quorum, params.java_exec, params.java64_home, params.atlas_jaas_file, params.metadata_user)
+    zkmigrator.set_acls(params.zk_root if params.zk_root.startswith('/') else '/' + params.zk_root, 'world:anyone:crdwa')
+    if params.atlas_kafka_group_id:
+      zkmigrator.set_acls(format('/consumers/{params.atlas_kafka_group_id}'), 'world:anyone:crdwa')
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.pid_file)
+
+  def get_log_folder(self):
+    import params
+
+    return params.log_dir
+
+  def get_user(self):
+    import params
+
+    return params.metadata_user
+
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.pid_file]
+
+if __name__ == "__main__":
+  MetadataServer().execute()
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/params.py
new file mode 100644
index 0000000..d26df33
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/params.py
@@ -0,0 +1,417 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import sys
+
+from ambari_commons import OSCheck
+from resource_management import get_bare_principal
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.default import default
+
+# Local Imports
+from status_params import *
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.functions.expect import expect
+from resource_management.libraries.functions.setup_ranger_plugin_xml import generate_ranger_service_config
+
+
+def configs_for_ha(atlas_hosts, metadata_port, is_atlas_ha_enabled, metadata_protocol):
+  """
+  Return a dictionary of additional configs to merge if Atlas HA is enabled.
+  :param atlas_hosts: List of hostnames that contain Atlas
+  :param metadata_port: Port number
+  :param is_atlas_ha_enabled: None, True, or False
+  :param metadata_protocol: http or https
+  :return: Dictionary with additional configs to merge to application-properties if HA is enabled.
+  """
+  additional_props = {}
+  if atlas_hosts is None or len(atlas_hosts) == 0 or metadata_port is None:
+    return additional_props
+
+  # Sort to guarantee each host sees the same values, assuming restarted at the same time.
+  atlas_hosts = sorted(atlas_hosts)
+
+  # E.g., id1,id2,id3,...,idn
+  _server_id_list = ["id" + str(i) for i in range(1, len(atlas_hosts) + 1)]
+  atlas_server_ids = ",".join(_server_id_list)
+  additional_props["atlas.server.ids"] = atlas_server_ids
+
+  i = 0
+  for curr_hostname in atlas_hosts:
+    id = _server_id_list[i]
+    prop_name = "atlas.server.address." + id
+    prop_value = curr_hostname + ":" + metadata_port
+    additional_props[prop_name] = prop_value
+    if "atlas.rest.address" in additional_props:
+      additional_props["atlas.rest.address"] += "," + metadata_protocol + "://" + prop_value
+    else:
+      additional_props["atlas.rest.address"] = metadata_protocol + "://" + prop_value
+
+    i += 1
+
+  # This may override the existing property
+  if i == 1 or (i > 1 and is_atlas_ha_enabled is False):
+    additional_props["atlas.server.ha.enabled"] = "false"
+  elif i > 1:
+    additional_props["atlas.server.ha.enabled"] = "true"
+
+  return additional_props
+  
+# server configurations
+config = Script.get_config()
+exec_tmp_dir = Script.get_tmp_dir()
+stack_root = Script.get_stack_root()
+
+# Needed since this is an Atlas Hook service.
+cluster_name = config['clusterName']
+
+java_version = expect("/hostLevelParams/java_version", int)
+
+zk_root = default('/configurations/application-properties/atlas.server.ha.zookeeper.zkroot', '/apache_atlas')
+stack_supports_zk_security = check_stack_feature(StackFeature.SECURE_ZOOKEEPER, version_for_stack_feature_checks)
+atlas_kafka_group_id = default('/configurations/application-properties/atlas.kafka.hook.group.id', None)
+
+if security_enabled:
+  _hostname_lowercase = config['hostname'].lower()
+  _atlas_principal_name = config['configurations']['application-properties']['atlas.authentication.principal']
+  atlas_jaas_principal = _atlas_principal_name.replace('_HOST',_hostname_lowercase)
+  atlas_keytab_path = config['configurations']['application-properties']['atlas.authentication.keytab']
+
+# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
+version = default("/commandParams/version", None)
+
+# stack version
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+metadata_home = format('{stack_root}/current/atlas-server')
+metadata_bin = format("{metadata_home}/bin")
+
+python_binary = os.environ['PYTHON_EXE'] if 'PYTHON_EXE' in os.environ else sys.executable
+metadata_start_script = format("{metadata_bin}/atlas_start.py")
+metadata_stop_script = format("{metadata_bin}/atlas_stop.py")
+
+# metadata local directory structure
+log_dir = config['configurations']['atlas-env']['metadata_log_dir']
+
+# service locations
+hadoop_conf_dir = os.path.join(os.environ["HADOOP_HOME"], "conf") if 'HADOOP_HOME' in os.environ else '/etc/hadoop/conf'
+
+# some commands may need to supply the JAAS location when running as atlas
+atlas_jaas_file = format("{conf_dir}/atlas_jaas.conf")
+
+# user
+user_group = config['configurations']['cluster-env']['user_group']
+
+# metadata env
+java64_home = config['hostLevelParams']['java_home']
+java_exec = format("{java64_home}/bin/java")
+env_sh_template = config['configurations']['atlas-env']['content']
+
+# credential provider
+credential_provider = format( "jceks://file@{conf_dir}/atlas-site.jceks")
+
+# command line args
+ssl_enabled = default("/configurations/application-properties/atlas.enableTLS", False)
+http_port = default("/configurations/application-properties/atlas.server.http.port", "21000")
+https_port = default("/configurations/application-properties/atlas.server.https.port", "21443")
+if ssl_enabled:
+  metadata_port = https_port
+  metadata_protocol = 'https'
+else:
+  metadata_port = http_port
+  metadata_protocol = 'http'
+
+metadata_host = config['hostname']
+
+atlas_hosts = sorted(default('/clusterHostInfo/atlas_server_hosts', []))
+metadata_server_host = atlas_hosts[0] if len(atlas_hosts) > 0 else "UNKNOWN_HOST"
+
+# application properties
+application_properties = dict(config['configurations']['application-properties'])
+application_properties["atlas.server.bind.address"] = metadata_host
+
+# trimming knox_key
+if 'atlas.sso.knox.publicKey' in application_properties:
+  knox_key = application_properties['atlas.sso.knox.publicKey']
+  knox_key_without_new_line = knox_key.replace("\n","")
+  application_properties['atlas.sso.knox.publicKey'] = knox_key_without_new_line
+
+if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, version_for_stack_feature_checks):
+  metadata_server_url = application_properties["atlas.rest.address"]
+else:
+  # In HDP 2.3 and 2.4 the property was computed and saved to the local config but did not exist in the database.
+  metadata_server_url = format('{metadata_protocol}://{metadata_server_host}:{metadata_port}')
+  application_properties["atlas.rest.address"] = metadata_server_url
+
+# Atlas HA should populate
+# atlas.server.ids = id1,id2,...,idn
+# atlas.server.address.id# = host#:port
+# User should not have to modify this property, but still allow overriding it to False if multiple Atlas servers exist
+# This can be None, True, or False
+is_atlas_ha_enabled = default("/configurations/application-properties/atlas.server.ha.enabled", None)
+additional_ha_props = configs_for_ha(atlas_hosts, metadata_port, is_atlas_ha_enabled, metadata_protocol)
+for k,v in additional_ha_props.iteritems():
+  application_properties[k] = v
+
+
+metadata_env_content = config['configurations']['atlas-env']['content']
+
+metadata_opts = config['configurations']['atlas-env']['metadata_opts']
+metadata_classpath = config['configurations']['atlas-env']['metadata_classpath']
+data_dir = format("{stack_root}/current/atlas-server/data")
+expanded_war_dir = os.environ['METADATA_EXPANDED_WEBAPP_DIR'] if 'METADATA_EXPANDED_WEBAPP_DIR' in os.environ else format("{stack_root}/current/atlas-server/server/webapp")
+
+metadata_log4j_content = config['configurations']['atlas-log4j']['content']
+
+metadata_solrconfig_content = default("/configurations/atlas-solrconfig/content", None)
+
+atlas_log_level = config['configurations']['atlas-log4j']['atlas_log_level']
+audit_log_level = config['configurations']['atlas-log4j']['audit_log_level']
+atlas_log_max_backup_size = default("/configurations/atlas-log4j/atlas_log_max_backup_size", 256)
+atlas_log_number_of_backup_files = default("/configurations/atlas-log4j/atlas_log_number_of_backup_files", 20)
+
+# smoke test
+smoke_test_user = config['configurations']['cluster-env']['smokeuser']
+smoke_test_password = 'smoke'
+smokeuser_principal =  config['configurations']['cluster-env']['smokeuser_principal_name']
+smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+
+
+security_check_status_file = format('{log_dir}/security_check.status')
+
+# hbase
+hbase_conf_dir = "/etc/hbase/conf"
+
+atlas_search_backend = default("/configurations/application-properties/atlas.graph.index.search.backend", "")
+search_backend_solr = atlas_search_backend.startswith('solr')
+
+# infra solr
+infra_solr_znode = default("/configurations/infra-solr-env/infra_solr_znode", None)
+infra_solr_hosts = default("/clusterHostInfo/infra_solr_hosts", [])
+infra_solr_replication_factor = 2 if len(infra_solr_hosts) > 1 else 1
+atlas_solr_shards = default("/configurations/atlas-env/atlas_solr-shards", 1)
+has_infra_solr = len(infra_solr_hosts) > 0
+infra_solr_role_atlas = default('configurations/infra-solr-security-json/infra_solr_role_atlas', 'atlas_user')
+infra_solr_role_dev = default('configurations/infra-solr-security-json/infra_solr_role_dev', 'dev')
+infra_solr_role_ranger_audit = default('configurations/infra-solr-security-json/infra_solr_role_ranger_audit', 'ranger_audit_user')
+
+# zookeeper
+zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
+zookeeper_port = default('/configurations/zoo.cfg/clientPort', None)
+
+# get comma separated lists of zookeeper hosts from clusterHostInfo
+index = 0
+zookeeper_quorum = ""
+for host in zookeeper_hosts:
+  zookeeper_host = host
+  if zookeeper_port is not None:
+    zookeeper_host = host + ":" + str(zookeeper_port)
+
+  zookeeper_quorum += zookeeper_host
+  index += 1
+  if index < len(zookeeper_hosts):
+    zookeeper_quorum += ","
+
+stack_supports_atlas_hdfs_site_on_namenode_ha = check_stack_feature(StackFeature.ATLAS_HDFS_SITE_ON_NAMENODE_HA, version_for_stack_feature_checks)
+
+atlas_server_xmx = default("configurations/atlas-env/atlas_server_xmx", 2048)
+atlas_server_max_new_size = default("configurations/atlas-env/atlas_server_max_new_size", 614)
+
+hbase_master_hosts = default('/clusterHostInfo/hbase_master_hosts', [])
+has_hbase_master = not len(hbase_master_hosts) == 0
+
+atlas_hbase_setup = format("{exec_tmp_dir}/atlas_hbase_setup.rb")
+atlas_kafka_setup = format("{exec_tmp_dir}/atlas_kafka_acl.sh")
+atlas_graph_storage_hbase_table = default('/configurations/application-properties/atlas.graph.storage.hbase.table', None)
+atlas_audit_hbase_tablename = default('/configurations/application-properties/atlas.audit.hbase.tablename', None)
+
+hbase_user_keytab = default('/configurations/hbase-env/hbase_user_keytab', None)
+hbase_principal_name = default('/configurations/hbase-env/hbase_principal_name', None)
+
+# ToDo: Kafka port to Atlas
+# Used while upgrading the stack in a kerberized cluster and running kafka-acls.sh
+hosts_with_kafka = default('/clusterHostInfo/kafka_broker_hosts', [])
+host_with_kafka = hostname in hosts_with_kafka
+
+ranger_tagsync_hosts = default("/clusterHostInfo/ranger_tagsync_hosts", [])
+has_ranger_tagsync = len(ranger_tagsync_hosts) > 0
+rangertagsync_user = "rangertagsync"
+
+kafka_keytab = default('/configurations/kafka-env/kafka_keytab', None)
+kafka_principal_name = default('/configurations/kafka-env/kafka_principal_name', None)
+default_replication_factor = default('/configurations/application-properties/atlas.notification.replicas', None)
+
+if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, version_for_stack_feature_checks):
+  default_replication_factor = default('/configurations/application-properties/atlas.notification.replicas', None)
+
+  kafka_env_sh_template = config['configurations']['kafka-env']['content']
+  kafka_home = os.path.join(stack_root,  "current", "kafka-broker")
+  kafka_conf_dir = os.path.join(kafka_home, "config")
+
+  kafka_zk_endpoint = default("/configurations/kafka-broker/zookeeper.connect", None)
+  kafka_kerberos_enabled = (('security.inter.broker.protocol' in config['configurations']['kafka-broker']) and
+                            ((config['configurations']['kafka-broker']['security.inter.broker.protocol'] == "PLAINTEXTSASL") or
+                             (config['configurations']['kafka-broker']['security.inter.broker.protocol'] == "SASL_PLAINTEXT")))
+  if security_enabled and stack_version_formatted != "" and 'kafka_principal_name' in config['configurations']['kafka-env'] \
+    and check_stack_feature(StackFeature.KAFKA_KERBEROS, stack_version_formatted):
+    _hostname_lowercase = config['hostname'].lower()
+    _kafka_principal_name = config['configurations']['kafka-env']['kafka_principal_name']
+    kafka_jaas_principal = _kafka_principal_name.replace('_HOST', _hostname_lowercase)
+    kafka_keytab_path = config['configurations']['kafka-env']['kafka_keytab']
+    kafka_bare_jaas_principal = get_bare_principal(_kafka_principal_name)
+    kafka_kerberos_params = "-Djava.security.auth.login.config={0}/kafka_jaas.conf".format(kafka_conf_dir)
+  else:
+    kafka_kerberos_params = ''
+    kafka_jaas_principal = None
+    kafka_keytab_path = None
+
+namenode_host = set(default("/clusterHostInfo/namenode_host", []))
+has_namenode = not len(namenode_host) == 0
+
+# ranger altas plugin section start
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+
+retry_enabled = default("/commandParams/command_retry_enabled", False)
+
+stack_supports_atlas_ranger_plugin = check_stack_feature(StackFeature.ATLAS_RANGER_PLUGIN_SUPPORT, version_for_stack_feature_checks)
+stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
+
+# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature
+xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
+
+# ranger atlas plugin enabled property
+enable_ranger_atlas = default("/configurations/ranger-atlas-plugin-properties/ranger-atlas-plugin-enabled", "No")
+enable_ranger_atlas = True if enable_ranger_atlas.lower() == "yes" else False
+
+# ranger hbase plugin enabled property
+enable_ranger_hbase = default("/configurations/ranger-hbase-plugin-properties/ranger-hbase-plugin-enabled", "No")
+enable_ranger_hbase = True if enable_ranger_hbase.lower() == 'yes' else False
+
+if stack_supports_atlas_ranger_plugin and enable_ranger_atlas:
+  # for create_hdfs_directory
+  hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] if has_namenode else None
+  hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']  if has_namenode else None
+  hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] if has_namenode else None
+  hdfs_site = config['configurations']['hdfs-site']
+  default_fs = config['configurations']['core-site']['fs.defaultFS']
+  dfs_type = default("/commandParams/dfs_type", "")
+
+  import functools
+  from resource_management.libraries.resources.hdfs_resource import HdfsResource
+  from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+  #create partial functions with common arguments for every HdfsResource call
+  #to create hdfs directory we need to call params.HdfsResource in code
+
+  HdfsResource = functools.partial(
+    HdfsResource,
+    user = hdfs_user,
+    hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+    security_enabled = security_enabled,
+    keytab = hdfs_user_keytab,
+    kinit_path_local = kinit_path_local,
+    hadoop_bin_dir = hadoop_bin_dir,
+    hadoop_conf_dir = hadoop_conf_dir,
+    principal_name = hdfs_principal_name,
+    hdfs_site = hdfs_site,
+    default_fs = default_fs,
+    immutable_paths = get_not_managed_resources(),
+    dfs_type = dfs_type
+  )
+
+  # ranger atlas service/repository name
+  repo_name = str(config['clusterName']) + '_atlas'
+  repo_name_value = config['configurations']['ranger-atlas-security']['ranger.plugin.atlas.service.name']
+  if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
+    repo_name = repo_name_value
+
+  ssl_keystore_password = config['configurations']['ranger-atlas-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']
+  ssl_truststore_password = config['configurations']['ranger-atlas-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']
+  credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
+  xa_audit_hdfs_is_enabled = default('/configurations/ranger-atlas-audit/xasecure.audit.destination.hdfs', False)
+
+  # get ranger policy url
+  policymgr_mgr_url = config['configurations']['ranger-atlas-security']['ranger.plugin.atlas.policy.rest.url']
+
+  if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
+    policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
+
+  downloaded_custom_connector = None
+  driver_curl_source = None
+  driver_curl_target = None
+
+  ranger_env = config['configurations']['ranger-env']
+
+  # create ranger-env config having external ranger credential properties
+  if not has_ranger_admin and enable_ranger_atlas:
+    external_admin_username = default('/configurations/ranger-atlas-plugin-properties/external_admin_username', 'admin')
+    external_admin_password = default('/configurations/ranger-atlas-plugin-properties/external_admin_password', 'admin')
+    external_ranger_admin_username = default('/configurations/ranger-atlas-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
+    external_ranger_admin_password = default('/configurations/ranger-atlas-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
+    ranger_env = {}
+    ranger_env['admin_username'] = external_admin_username
+    ranger_env['admin_password'] = external_admin_password
+    ranger_env['ranger_admin_username'] = external_ranger_admin_username
+    ranger_env['ranger_admin_password'] = external_ranger_admin_password
+
+  ranger_plugin_properties = config['configurations']['ranger-atlas-plugin-properties']
+  ranger_atlas_audit = config['configurations']['ranger-atlas-audit']
+  ranger_atlas_audit_attrs = config['configuration_attributes']['ranger-atlas-audit']
+  ranger_atlas_security = config['configurations']['ranger-atlas-security']
+  ranger_atlas_security_attrs = config['configuration_attributes']['ranger-atlas-security']
+  ranger_atlas_policymgr_ssl = config['configurations']['ranger-atlas-policymgr-ssl']
+  ranger_atlas_policymgr_ssl_attrs = config['configuration_attributes']['ranger-atlas-policymgr-ssl']
+
+  policy_user = config['configurations']['ranger-atlas-plugin-properties']['policy_user']
+
+  atlas_repository_configuration = {
+    'username' : config['configurations']['ranger-atlas-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
+    'password' : unicode(config['configurations']['ranger-atlas-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']),
+    'atlas.rest.address' : metadata_server_url,
+    'commonNameForCertificate' : config['configurations']['ranger-atlas-plugin-properties']['common.name.for.certificate'],
+    'ambari.service.check.user' : policy_user
+  }
+
+  custom_ranger_service_config = generate_ranger_service_config(ranger_plugin_properties)
+  if len(custom_ranger_service_config) > 0:
+    atlas_repository_configuration.update(custom_ranger_service_config)
+
+  if security_enabled:
+    atlas_repository_configuration['policy.download.auth.users'] = metadata_user
+    atlas_repository_configuration['tag.download.auth.users'] = metadata_user
+
+  atlas_ranger_plugin_repo = {
+    'isEnabled': 'true',
+    'configs': atlas_repository_configuration,
+    'description': 'atlas repo',
+    'name': repo_name,
+    'type': 'atlas',
+    }
+# ranger atlas plugin section end
+# atlas admin login username password
+atlas_admin_username = config['configurations']['atlas-env']['atlas.admin.username']
+atlas_admin_password = config['configurations']['atlas-env']['atlas.admin.password']
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/service_check.py
new file mode 100644
index 0000000..cada8c3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/service_check.py
@@ -0,0 +1,55 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.core.logger import Logger  
+from resource_management.core.resources.system import Execute
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.core.exceptions import Fail
+
+class AtlasServiceCheck(Script):
+
+  def service_check(self, env):
+    import params
+
+    env.set_params(params)
+
+    if params.security_enabled:
+      Execute(format("{kinit_path_local} -kt {smokeuser_keytab} {smokeuser_principal}"),
+              user=params.smoke_test_user)
+    atlas_host_call_count = 0
+
+    for atlas_host in params.atlas_hosts:
+      if params.security_enabled:
+        smoke_cmd = format('curl -k --negotiate -u : -b ~/cookiejar.txt -c ~/cookiejar.txt -s -o /dev/null -w "%{{http_code}}" {metadata_protocol}://{atlas_host}:{metadata_port}/')
+      else:
+        smoke_cmd = format('curl -k -s -o /dev/null -w "%{{http_code}}" {metadata_protocol}://{atlas_host}:{metadata_port}/')
+      try:
+        Execute(smoke_cmd , user=params.smoke_test_user, tries = 5,
+              try_sleep = 10)
+      except Exception, err:
+        atlas_host_call_count =  atlas_host_call_count + 1
+        Logger.error("ATLAS service check failed for host {0} with error {1}".format(atlas_host,err))
+    if atlas_host_call_count == len(params.atlas_hosts):
+      raise Fail("All instances of ATLAS METADATA SERVER are down.")
+
+
+if __name__ == "__main__":
+  AtlasServiceCheck().execute()
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/setup_ranger_atlas.py b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/setup_ranger_atlas.py
new file mode 100644
index 0000000..c47c75c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/setup_ranger_atlas.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+    http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+from resource_management.core.logger import Logger
+
+def setup_ranger_atlas(upgrade_type=None):
+  import params
+
+  if params.enable_ranger_atlas:
+
+    from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
+
+    if params.retry_enabled:
+      Logger.info("ATLAS: Setup ranger: command retry enables thus retrying if ranger admin is down !")
+    else:
+      Logger.info("ATLAS: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
+
+    if params.enable_ranger_atlas and params.xa_audit_hdfs_is_enabled:
+      if params.has_namenode:
+        params.HdfsResource("/ranger/audit",
+                            type="directory",
+                            action="create_on_execute",
+                            owner=params.metadata_user,
+                            group=params.user_group,
+                            mode=0755,
+                            recursive_chmod=True
+        )
+        params.HdfsResource("/ranger/audit/atlas",
+                            type="directory",
+                            action="create_on_execute",
+                            owner=params.metadata_user,
+                            group=params.user_group,
+                            mode=0700,
+                            recursive_chmod=True
+        )
+        params.HdfsResource(None, action="execute")
+
+    setup_ranger_plugin('atlas-server', 'atlas',None,
+                        params.downloaded_custom_connector, params.driver_curl_source,
+                        params.driver_curl_target, params.java64_home,
+                        params.repo_name, params.atlas_ranger_plugin_repo,
+                        params.ranger_env, params.ranger_plugin_properties,
+                        params.policy_user, params.policymgr_mgr_url,
+                        params.enable_ranger_atlas, conf_dict=params.conf_dir,
+                        component_user=params.metadata_user, component_group=params.user_group, cache_service_list=['atlas'],
+                        plugin_audit_properties=params.config['configurations']['ranger-atlas-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-atlas-audit'],
+                        plugin_security_properties=params.config['configurations']['ranger-atlas-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-atlas-security'],
+                        plugin_policymgr_ssl_properties=params.config['configurations']['ranger-atlas-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-atlas-policymgr-ssl'],
+                        component_list=['atlas-server'], audit_db_is_enabled=False,
+                        credential_file=params.credential_file, xa_audit_db_password=None,
+                        ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
+                        api_version = 'v2', skip_if_rangeradmin_down = not params.retry_enabled, is_security_enabled = params.security_enabled,
+                        is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
+                        component_user_principal=params.atlas_jaas_principal if params.security_enabled else None,
+                        component_user_keytab=params.atlas_keytab_path if params.security_enabled else None)
+  else:
+    Logger.info('Ranger Atlas plugin is not enabled')
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/status_params.py
new file mode 100644
index 0000000..852a9cb
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/scripts/status_params.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import  get_kinit_path, format_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+
+from resource_management.libraries.functions.stack_features import check_stack_feature, get_stack_feature_version
+from resource_management.libraries.functions import StackFeature
+
+
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+
+default_conf_file = "application.properties"
+
+# get the correct version to use for checking stack features
+version_for_stack_feature_checks = get_stack_feature_version(config)
+
+if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, version_for_stack_feature_checks):
+  default_conf_file = "atlas-application.properties"
+
+conf_file = default("/configurations/atlas-env/metadata_conf_file", default_conf_file)
+conf_dir = format("{stack_root}/current/atlas-server/conf")
+pid_dir = default("/configurations/atlas-env/metadata_pid_dir", "/var/run/atlas")
+pid_file = format("{pid_dir}/atlas.pid")
+
+metadata_user = default("/configurations/atlas-env/metadata_user", None)
+hbase_user = default("/configurations/hbase-env/hbase_user", None)
+kafka_user = default("/configurations/kafka-env/kafka_user", None)
+
+# Security related/required params
+hostname = config['hostname']
+security_enabled = default("/configurations/cluster-env/security_enabled", None)
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+tmp_dir = Script.get_tmp_dir()
+
+stack_name = default("/hostLevelParams/stack_name", None)
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/atlas_hbase_setup.rb.j2 b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/atlas_hbase_setup.rb.j2
new file mode 100644
index 0000000..14167dc
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/atlas_hbase_setup.rb.j2
@@ -0,0 +1,42 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+_tbl_titan = '{{atlas_graph_storage_hbase_table}}'
+_tbl_audit = '{{atlas_audit_hbase_tablename}}'
+_usr_atlas = '{{metadata_user}}'
+
+
+if not list.include? _tbl_titan
+  begin
+    create _tbl_titan,{NAME => 'e',DATA_BLOCK_ENCODING => 'FAST_DIFF', COMPRESSION =>'GZ', BLOOMFILTER =>'ROW'},{NAME => 'g',DATA_BLOCK_ENCODING => 'FAST_DIFF', COMPRESSION =>'GZ', BLOOMFILTER =>'ROW'},{NAME => 'i',DATA_BLOCK_ENCODING => 'FAST_DIFF', COMPRESSION =>'GZ', BLOOMFILTER =>'ROW'},{NAME => 's',DATA_BLOCK_ENCODING => 'FAST_DIFF', COMPRESSION =>'GZ', BLOOMFILTER =>'ROW'},{NAME => 'm',DATA_BLOCK_ENCODING => 'FAST_DIFF', COMPRESSION =>'GZ', BLOOMFILTER =>'ROW'},{NAME => 'l',DATA_BLOCK_ENCODING => 'FAST_DIFF', COMPRESSION =>'GZ', BLOOMFILTER =>'ROW', TTL => 604800, KEEP_DELETED_CELLS =>false}
+  rescue RuntimeError => e
+    raise e if not e.message.include? "Table already exists"
+  end
+end
+
+
+if not list.include? _tbl_audit
+  begin
+    create _tbl_audit, {NAME => 'dt', DATA_BLOCK_ENCODING => 'FAST_DIFF', COMPRESSION =>'GZ', BLOOMFILTER =>'ROW'}
+  rescue RuntimeError => e
+    raise e if not e.message.include? "Table already exists"
+  end
+end
+
+grant _usr_atlas, 'RWCA', _tbl_titan
+grant _usr_atlas, 'RWCA', _tbl_audit
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/atlas_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/atlas_jaas.conf.j2
new file mode 100644
index 0000000..68eb088
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/atlas_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+Client {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   useTicketCache=false
+   storeKey=true
+   doNotPrompt=false
+   keyTab="{{atlas_keytab_path}}"
+   principal="{{atlas_jaas_principal}}";
+};
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/atlas_kafka_acl.sh.j2 b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/atlas_kafka_acl.sh.j2
new file mode 100644
index 0000000..6a2edc6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/atlas_kafka_acl.sh.j2
@@ -0,0 +1,41 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+#!/bin/bash
+
+
+create_topic() {
+    topic_name=$1
+    topics=`{{kafka_home}}/bin/kafka-topics.sh --zookeeper {{kafka_zk_endpoint}} --topic $topic_name --list`
+    if [ -z $topics ]; then
+      {{kafka_home}}/bin/kafka-topics.sh --zookeeper {{kafka_zk_endpoint}} --topic $topic_name --create --partitions 1 --replication-factor {{default_replication_factor}}
+      echo "Created topic $topic_name with replication factor {{default_replication_factor}}"
+    else
+      echo "Topic $topic_name already exists"
+    fi
+}
+
+create_topic ATLAS_HOOK
+create_topic ATLAS_ENTITIES
+
+{{kafka_home}}/bin/kafka-acls.sh --authorizer-properties zookeeper.connect={{kafka_zk_endpoint}} --add  --topic ATLAS_HOOK --allow-principal User:* --producer
+{{kafka_home}}/bin/kafka-acls.sh --authorizer-properties zookeeper.connect={{kafka_zk_endpoint}} --add  --topic ATLAS_HOOK --allow-principal User:{{metadata_user}} --consumer --group atlas
+{{kafka_home}}/bin/kafka-acls.sh --authorizer-properties zookeeper.connect={{kafka_zk_endpoint}} --add  --topic ATLAS_ENTITIES --allow-principal User:{{metadata_user}} --producer
+
+{% if has_ranger_tagsync %}
+{{kafka_home}}/bin/kafka-acls.sh --authorizer-properties zookeeper.connect={{kafka_zk_endpoint}} --add  --topic ATLAS_ENTITIES --allow-principal User:{{rangertagsync_user}} --consumer --group ranger_entities_consumer
+{% endif %}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/input.config-atlas.json.j2 b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/input.config-atlas.json.j2
new file mode 100644
index 0000000..2d977b9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/input.config-atlas.json.j2
@@ -0,0 +1,48 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"atlas_app",
+      "rowtype":"service",
+      "path":"{{default('/configurations/atlas-env/metadata_log_dir', '/var/log/atlas')}}/application.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "atlas_app"
+          ]
+        }
+      },
+      "log4j_format":"%d %-5p - [%t:%x] ~ %m (%c{1}:%L)%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{SPACE}-%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}~%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/kafka_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/kafka_jaas.conf.j2
new file mode 100644
index 0000000..56c558d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/package/templates/kafka_jaas.conf.j2
@@ -0,0 +1,41 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+KafkaServer {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="{{kafka_keytab_path}}"
+   storeKey=true
+   useTicketCache=false
+   serviceName="{{kafka_bare_jaas_principal}}"
+   principal="{{kafka_jaas_principal}}";
+};
+KafkaClient {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useTicketCache=true
+   renewTicket=true
+   serviceName="{{kafka_bare_jaas_principal}}";
+};
+Client {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="{{kafka_keytab_path}}"
+   storeKey=true
+   useTicketCache=false
+   serviceName="zookeeper"
+   principal="{{kafka_jaas_principal}}";
+};
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/quicklinks/quicklinks.json
new file mode 100644
index 0000000..0a7d0a0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/quicklinks/quicklinks.json
@@ -0,0 +1,36 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"https",
+      "checks":[
+        {
+          "property":"atlas.enableTLS",
+          "desired":"true",
+          "site":"application-properties"
+        }
+      ]
+    },
+
+    "links": [
+      {
+        "name": "atlas_dashboard",
+        "label": "Atlas Dashboard",
+        "requires_user_name": "true",
+        "component_name": "ATLAS_SERVER",
+        "url": "%@://%@:%@/",
+        "attributes": ["authenticated", "sso"],
+        "port":{
+          "http_property": "atlas.server.http.port",
+          "http_default_port": "21000",
+          "https_property": "atlas.server.https.port",
+          "https_default_port": "21443",
+          "regex": "^(\\d+)$",
+          "site": "application-properties"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/role_command_order.json b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/role_command_order.json
new file mode 100644
index 0000000..4d66dfc
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/role_command_order.json
@@ -0,0 +1,7 @@
+{
+  "general_deps" : {
+    "_comment" : "dependencies for ATLAS",
+    "ATLAS_SERVICE_CHECK-SERVICE_CHECK": ["ATLAS_SERVER-START"],
+    "ATLAS_SERVER-START": ["KAFKA_BROKER-START", "INFRA_SOLR-START", "HBASE_MASTER-START", "HBASE_REGIONSERVER-START"]
+  }
+}
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/service_advisor.py
new file mode 100644
index 0000000..a2e31cc
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/service_advisor.py
@@ -0,0 +1,441 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+# Python imports
+import imp
+import os
+import traceback
+import re
+import socket
+import fnmatch
+
+from ambari_commons.str_utils import string_set_equals
+from resource_management.core.logger import Logger
+
+SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
+STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../stacks/')
+PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
+
+try:
+  with open(PARENT_FILE, 'rb') as fp:
+    service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE, ('.py', 'rb', imp.PY_SOURCE))
+except Exception as e:
+  traceback.print_exc()
+  print "Failed to load parent"
+
+class AtlasServiceAdvisor(service_advisor.ServiceAdvisor):
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(AtlasServiceAdvisor, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    # Always call these methods
+    self.modifyMastersWithMultipleInstances()
+    self.modifyCardinalitiesDict()
+    self.modifyHeapSizeProperties()
+    self.modifyNotValuableComponents()
+    self.modifyComponentsNotPreferableOnServer()
+    self.modifyComponentLayoutSchemes()
+
+  def modifyMastersWithMultipleInstances(self):
+    """
+    Modify the set of masters with multiple instances.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyCardinalitiesDict(self):
+    """
+    Modify the dictionary of cardinalities.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyHeapSizeProperties(self):
+    """
+    Modify the dictionary of heap size properties.
+    Must be overriden in child class.
+    """
+    pass
+
+  def modifyNotValuableComponents(self):
+    """
+    Modify the set of components whose host assignment is based on other services.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentsNotPreferableOnServer(self):
+    """
+    Modify the set of components that are not preferable on the server.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentLayoutSchemes(self):
+    """
+    Modify layout scheme dictionaries for components.
+    The scheme dictionary basically maps the number of hosts to
+    host index where component should exist.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def getServiceComponentLayoutValidations(self, services, hosts):
+    """
+    Get a list of errors.
+    Must be overriden in child class.
+    """
+
+    return []
+
+  def getServiceConfigurationRecommendations(self, configurations, clusterData, services, hosts):
+    """
+    Entry point.
+    Must be overriden in child class.
+    """
+    #Logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    recommender = AtlasRecommender()
+    recommender.recommendAtlasConfigurationsFromHDP25(configurations, clusterData, services, hosts)
+    recommender.recommendAtlasConfigurationsFromHDP26(configurations, clusterData, services, hosts)
+
+
+
+  def getServiceConfigurationsValidationItems(self, configurations, recommendedDefaults, services, hosts):
+    """
+    Entry point.
+    Validate configurations for the service. Return a list of errors.
+    The code for this function should be the same for each Service Advisor.
+    """
+    #Logger.info("Class: %s, Method: %s. Validating Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    validator = AtlasValidator()
+    # Calls the methods of the validator using arguments,
+    # method(siteProperties, siteRecommendations, configurations, services, hosts)
+    return validator.validateListOfConfigUsingMethod(configurations, recommendedDefaults, services, hosts, validator.validators)
+
+
+
+class AtlasRecommender(service_advisor.ServiceAdvisor):
+  """
+  Atlas Recommender suggests properties when adding the service for the first time or modifying configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(AtlasRecommender, self)
+    self.as_super.__init__(*args, **kwargs)
+
+
+  def constructAtlasRestAddress(self, services, hosts):
+    """
+    :param services: Collection of services in the cluster with configs
+    :param hosts: Collection of hosts in the cluster
+    :return: The suggested property for atlas.rest.address if it is valid, otherwise, None
+    """
+    atlas_rest_address = None
+    services_list = [service["StackServices"]["service_name"] for service in services["services"]]
+    is_atlas_in_cluster = "ATLAS" in services_list
+
+    atlas_server_hosts_info = self.getHostsWithComponent("ATLAS", "ATLAS_SERVER", services, hosts)
+    if is_atlas_in_cluster and atlas_server_hosts_info and len(atlas_server_hosts_info) > 0:
+      # Multiple Atlas Servers can exist, so sort by hostname to create deterministic csv
+      atlas_host_names = [e['Hosts']['host_name'] for e in atlas_server_hosts_info]
+      if len(atlas_host_names) > 1:
+        atlas_host_names = sorted(atlas_host_names)
+
+      scheme = "http"
+      metadata_port = "21000"
+      atlas_server_default_https_port = "21443"
+      tls_enabled = "false"
+      if 'application-properties' in services['configurations']:
+        if 'atlas.enableTLS' in services['configurations']['application-properties']['properties']:
+          tls_enabled = services['configurations']['application-properties']['properties']['atlas.enableTLS']
+        if 'atlas.server.http.port' in services['configurations']['application-properties']['properties']:
+          metadata_port = str(services['configurations']['application-properties']['properties']['atlas.server.http.port'])
+
+        if str(tls_enabled).lower() == "true":
+          scheme = "https"
+          if 'atlas.server.https.port' in services['configurations']['application-properties']['properties']:
+            metadata_port = str(services['configurations']['application-properties']['properties']['atlas.server.https.port'])
+          else:
+            metadata_port = atlas_server_default_https_port
+
+      atlas_rest_address_list = ["{0}://{1}:{2}".format(scheme, hostname, metadata_port) for hostname in atlas_host_names]
+      atlas_rest_address = ",".join(atlas_rest_address_list)
+      self.logger.info("Constructing atlas.rest.address=%s" % atlas_rest_address)
+    return atlas_rest_address
+
+  def recommendAtlasConfigurationsFromHDP25(self, configurations, clusterData, services, hosts):
+    putAtlasApplicationProperty = self.putProperty(configurations, "application-properties", services)
+    putAtlasRangerPluginProperty = self.putProperty(configurations, "ranger-atlas-plugin-properties", services)
+    putAtlasEnvProperty = self.putProperty(configurations, "atlas-env", services)
+
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+
+    # Generate atlas.rest.address since the value is always computed
+    atlas_rest_address = self.constructAtlasRestAddress(services, hosts)
+    if atlas_rest_address is not None:
+      putAtlasApplicationProperty("atlas.rest.address", atlas_rest_address)
+
+    if "AMBARI_INFRA" in servicesList and 'infra-solr-env' in services['configurations']:
+      if 'infra_solr_znode' in services['configurations']['infra-solr-env']['properties']:
+        infra_solr_znode = services['configurations']['infra-solr-env']['properties']['infra_solr_znode']
+      else:
+        infra_solr_znode = None
+
+      zookeeper_hosts = self.getHostNamesWithComponent("ZOOKEEPER", "ZOOKEEPER_SERVER", services)
+      zookeeper_host_arr = []
+
+      zookeeper_port = self.getZKPort(services)
+      for i in range(len(zookeeper_hosts)):
+        zookeeper_host = zookeeper_hosts[i] + ':' + zookeeper_port
+        if infra_solr_znode is not None:
+          zookeeper_host += infra_solr_znode
+        zookeeper_host_arr.append(zookeeper_host)
+
+      solr_zookeeper_url = ",".join(zookeeper_host_arr)
+
+      putAtlasApplicationProperty('atlas.graph.index.search.solr.zookeeper-url', solr_zookeeper_url)
+    else:
+      putAtlasApplicationProperty('atlas.graph.index.search.solr.zookeeper-url', "")
+
+    # Kafka section
+    if "KAFKA" in servicesList and 'kafka-broker' in services['configurations']:
+      kafka_hosts = self.getHostNamesWithComponent("KAFKA", "KAFKA_BROKER", services)
+
+      if 'port' in services['configurations']['kafka-broker']['properties']:
+        kafka_broker_port = services['configurations']['kafka-broker']['properties']['port']
+      else:
+        kafka_broker_port = '6667'
+
+      if 'kafka-broker' in services['configurations'] and 'listeners' in services['configurations']['kafka-broker']['properties']:
+        kafka_server_listeners = services['configurations']['kafka-broker']['properties']['listeners']
+      else:
+        kafka_server_listeners = 'PLAINTEXT://localhost:6667'
+
+      security_enabled = self.isSecurityEnabled(services)
+
+      if ',' in kafka_server_listeners and len(kafka_server_listeners.split(',')) > 1:
+        for listener in kafka_server_listeners.split(','):
+          listener = listener.strip().split(':')
+          if len(listener) == 3:
+            if 'SASL' in listener[0] and security_enabled:
+              kafka_broker_port = listener[2]
+              break
+            elif  'SASL' not in listener[0] and not security_enabled:
+              kafka_broker_port = listener[2]
+      else:
+        listener = kafka_server_listeners.strip().split(':')
+        if len(listener) == 3:
+          kafka_broker_port  = listener[2]
+
+      kafka_host_arr = []
+      for i in range(len(kafka_hosts)):
+        kafka_host_arr.append(kafka_hosts[i] + ':' + kafka_broker_port)
+
+      kafka_bootstrap_servers = ",".join(kafka_host_arr)
+
+      if 'zookeeper.connect' in services['configurations']['kafka-broker']['properties']:
+        kafka_zookeeper_connect = services['configurations']['kafka-broker']['properties']['zookeeper.connect']
+      else:
+        kafka_zookeeper_connect = None
+
+      putAtlasApplicationProperty('atlas.kafka.bootstrap.servers', kafka_bootstrap_servers)
+      putAtlasApplicationProperty('atlas.kafka.zookeeper.connect', kafka_zookeeper_connect)
+    else:
+      putAtlasApplicationProperty('atlas.kafka.bootstrap.servers', "")
+      putAtlasApplicationProperty('atlas.kafka.zookeeper.connect', "")
+
+    if "HBASE" in servicesList and 'hbase-site' in services['configurations']:
+      if 'hbase.zookeeper.quorum' in services['configurations']['hbase-site']['properties']:
+        hbase_zookeeper_quorum = services['configurations']['hbase-site']['properties']['hbase.zookeeper.quorum']
+      else:
+        hbase_zookeeper_quorum = ""
+
+      putAtlasApplicationProperty('atlas.graph.storage.hostname', hbase_zookeeper_quorum)
+      putAtlasApplicationProperty('atlas.audit.hbase.zookeeper.quorum', hbase_zookeeper_quorum)
+    else:
+      putAtlasApplicationProperty('atlas.graph.storage.hostname', "")
+      putAtlasApplicationProperty('atlas.audit.hbase.zookeeper.quorum', "")
+
+    if "ranger-env" in services["configurations"] and "ranger-atlas-plugin-properties" in services["configurations"] and \
+        "ranger-atlas-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
+      ranger_atlas_plugin_enabled = services["configurations"]["ranger-env"]["properties"]["ranger-atlas-plugin-enabled"]
+      putAtlasRangerPluginProperty('ranger-atlas-plugin-enabled', ranger_atlas_plugin_enabled)
+
+    ranger_atlas_plugin_enabled = ''
+    if 'ranger-atlas-plugin-properties' in configurations and 'ranger-atlas-plugin-enabled' in configurations['ranger-atlas-plugin-properties']['properties']:
+      ranger_atlas_plugin_enabled = configurations['ranger-atlas-plugin-properties']['properties']['ranger-atlas-plugin-enabled']
+    elif 'ranger-atlas-plugin-properties' in services['configurations'] and 'ranger-atlas-plugin-enabled' in services['configurations']['ranger-atlas-plugin-properties']['properties']:
+      ranger_atlas_plugin_enabled = services['configurations']['ranger-atlas-plugin-properties']['properties']['ranger-atlas-plugin-enabled']
+
+    if ranger_atlas_plugin_enabled and (ranger_atlas_plugin_enabled.lower() == 'Yes'.lower()):
+      putAtlasApplicationProperty('atlas.authorizer.impl','ranger')
+    else:
+      putAtlasApplicationProperty('atlas.authorizer.impl','simple')
+
+    #atlas server memory settings
+    if 'atlas-env' in services['configurations']:
+      atlas_server_metadata_size = 50000
+      if 'atlas_server_metadata_size' in services['configurations']['atlas-env']['properties']:
+        atlas_server_metadata_size = float(services['configurations']['atlas-env']['properties']['atlas_server_metadata_size'])
+
+      atlas_server_xmx = 2048
+
+      if 300000 <= atlas_server_metadata_size < 500000:
+        atlas_server_xmx = 1024*5
+      if 500000 <= atlas_server_metadata_size < 1000000:
+        atlas_server_xmx = 1024*10
+      if atlas_server_metadata_size >= 1000000:
+        atlas_server_xmx = 1024*16
+
+      atlas_server_max_new_size = (atlas_server_xmx / 100) * 30
+
+      putAtlasEnvProperty("atlas_server_xmx", atlas_server_xmx)
+      putAtlasEnvProperty("atlas_server_max_new_size", atlas_server_max_new_size)
+
+
+  def recommendAtlasConfigurationsFromHDP26(self, configurations, clusterData, services, hosts):
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    putAtlasApplicationProperty = self.putProperty(configurations, "application-properties", services)
+
+    knox_host = 'localhost'
+    knox_port = '8443'
+    if 'KNOX' in servicesList:
+      knox_hosts = self.getComponentHostNames(services, "KNOX", "KNOX_GATEWAY")
+      if len(knox_hosts) > 0:
+        knox_hosts.sort()
+        knox_host = knox_hosts[0]
+      if 'gateway-site' in services['configurations'] and 'gateway.port' in services['configurations']["gateway-site"]["properties"]:
+        knox_port = services['configurations']["gateway-site"]["properties"]['gateway.port']
+      putAtlasApplicationProperty('atlas.sso.knox.providerurl', 'https://{0}:{1}/gateway/knoxsso/api/v1/websso'.format(knox_host, knox_port))
+
+
+
+
+class AtlasValidator(service_advisor.ServiceAdvisor):
+  """
+  Atlas Validator checks the correctness of properties whenever the service is first added or the user attempts to
+  change configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(AtlasValidator, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    self.validators = [("application-properties", self.validateAtlasConfigurationsFromHDP25)]
+
+
+
+  def validateAtlasConfigurationsFromHDP25(self, properties, recommendedDefaults, configurations, services, hosts):
+    application_properties = self.getSiteProperties(configurations, "application-properties")
+    validationItems = []
+
+    auth_type = application_properties['atlas.authentication.method.ldap.type']
+    auth_ldap_enable = application_properties['atlas.authentication.method.ldap'].lower() == 'true'
+    self.logger.info("Validating Atlas configs, authentication type: %s" % str(auth_type))
+
+    # Required props
+    ldap_props = {"atlas.authentication.method.ldap.url": "",
+                  "atlas.authentication.method.ldap.userDNpattern": "uid=",
+                  "atlas.authentication.method.ldap.groupSearchBase": "",
+                  "atlas.authentication.method.ldap.groupSearchFilter": "",
+                  "atlas.authentication.method.ldap.groupRoleAttribute": "cn",
+                  "atlas.authentication.method.ldap.base.dn": "",
+                  "atlas.authentication.method.ldap.bind.dn": "",
+                  "atlas.authentication.method.ldap.bind.password": "",
+                  "atlas.authentication.method.ldap.user.searchfilter": ""
+    }
+    ad_props = {"atlas.authentication.method.ldap.ad.domain": "",
+                "atlas.authentication.method.ldap.ad.url": "",
+                "atlas.authentication.method.ldap.ad.base.dn": "",
+                "atlas.authentication.method.ldap.ad.bind.dn": "",
+                "atlas.authentication.method.ldap.ad.bind.password": "",
+                "atlas.authentication.method.ldap.ad.user.searchfilter": "(sAMAccountName={0})"
+    }
+
+    props_to_require = set()
+    if auth_type.lower() == "ldap":
+      props_to_require = set(ldap_props.keys())
+    elif auth_type.lower() == "ad":
+      props_to_require = set(ad_props.keys())
+    elif auth_type.lower() == "none":
+      pass
+
+    if auth_ldap_enable:
+      for prop in props_to_require:
+        if prop not in application_properties or application_properties[prop] is None or application_properties[prop].strip() == "":
+          validationItems.append({"config-name": prop,
+                                  "item": self.getErrorItem("If authentication type is %s, this property is required." % auth_type)})
+
+    if application_properties['atlas.graph.index.search.backend'] == 'solr5' and \
+            not application_properties['atlas.graph.index.search.solr.zookeeper-url']:
+      validationItems.append({"config-name": "atlas.graph.index.search.solr.zookeeper-url",
+                              "item": self.getErrorItem(
+                                "If AMBARI_INFRA is not installed then the SOLR zookeeper url configuration must be specified.")})
+
+    if not application_properties['atlas.kafka.bootstrap.servers']:
+      validationItems.append({"config-name": "atlas.kafka.bootstrap.servers",
+                              "item": self.getErrorItem(
+                                "If KAFKA is not installed then the Kafka bootstrap servers configuration must be specified.")})
+
+    if not application_properties['atlas.kafka.zookeeper.connect']:
+      validationItems.append({"config-name": "atlas.kafka.zookeeper.connect",
+                              "item": self.getErrorItem(
+                                "If KAFKA is not installed then the Kafka zookeeper quorum configuration must be specified.")})
+
+    if application_properties['atlas.graph.storage.backend'] == 'hbase' and 'hbase-site' in services['configurations']:
+      hbase_zookeeper_quorum = services['configurations']['hbase-site']['properties']['hbase.zookeeper.quorum']
+
+      if not application_properties['atlas.graph.storage.hostname']:
+        validationItems.append({"config-name": "atlas.graph.storage.hostname",
+                                "item": self.getErrorItem(
+                                  "If HBASE is not installed then the hbase zookeeper quorum configuration must be specified.")})
+      elif string_set_equals(application_properties['atlas.graph.storage.hostname'], hbase_zookeeper_quorum):
+        validationItems.append({"config-name": "atlas.graph.storage.hostname",
+                                "item": self.getWarnItem(
+                                  "Atlas is configured to use the HBase installed in this cluster. If you would like Atlas to use another HBase instance, please configure this property and HBASE_CONF_DIR variable in atlas-env appropriately.")})
+
+      if not application_properties['atlas.audit.hbase.zookeeper.quorum']:
+        validationItems.append({"config-name": "atlas.audit.hbase.zookeeper.quorum",
+                                "item": self.getErrorItem(
+                                  "If HBASE is not installed then the audit hbase zookeeper quorum configuration must be specified.")})
+
+    elif application_properties['atlas.graph.storage.backend'] == 'hbase' and 'hbase-site' not in services[
+      'configurations']:
+      if not application_properties['atlas.graph.storage.hostname']:
+        validationItems.append({"config-name": "atlas.graph.storage.hostname",
+                                "item": self.getErrorItem(
+                                  "Atlas is not configured to use the HBase installed in this cluster. If you would like Atlas to use another HBase instance, please configure this property and HBASE_CONF_DIR variable in atlas-env appropriately.")})
+      if not application_properties['atlas.audit.hbase.zookeeper.quorum']:
+        validationItems.append({"config-name": "atlas.audit.hbase.zookeeper.quorum",
+                                "item": self.getErrorItem(
+                                  "If HBASE is not installed then the audit hbase zookeeper quorum configuration must be specified.")})
+
+    validationProblems = self.toConfigurationValidationProblems(validationItems, "application-properties")
+    return validationProblems
+
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/themes/theme.json b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/themes/theme.json
new file mode 100644
index 0000000..da3c79f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/themes/theme.json
@@ -0,0 +1,619 @@
+{
+  "name": "default",
+  "description": "Default theme for Atlas service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "authentication_settings",
+            "display-name": "Authentication",
+            "layout": {
+              "tab-columns": "3",
+              "tab-rows": "2",
+              "sections": [
+                {
+                  "name": "section-authentication-type",
+                  "display-name": "Authentication Type",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "3",
+                  "section-columns": "3",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-authentication-type",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "3"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-authentication",
+                  "display-name": "LDAP/AD",
+                  "row-index": "1",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "3",
+                  "section-columns": "3",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-authentication",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "3"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.type",
+          "subsection-name": "subsection-authentication-type"
+        },
+
+
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.url",
+          "subsection-name": "subsection-authentication",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ldap",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.userDNpattern",
+          "subsection-name": "subsection-authentication",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ldap",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.groupSearchBase",
+          "subsection-name": "subsection-authentication",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ldap",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.groupSearchFilter",
+          "subsection-name": "subsection-authentication",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ldap",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.groupRoleAttribute",
+          "subsection-name": "subsection-authentication",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ldap",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.base.dn",
+          "subsection-name": "subsection-authentication",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ldap",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.bind.dn",
+          "subsection-name": "subsection-authentication",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ldap",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.bind.password",
+          "subsection-name": "subsection-authentication",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ldap",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.referral",
+          "subsection-name": "subsection-authentication",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ldap",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.user.searchfilter",
+          "subsection-name": "subsection-authentication",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ldap",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.default.role",
+          "subsection-name": "subsection-authentication",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ldap",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+
+
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.ad.url",
+          "subsection-name": "subsection-authentication",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ad",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.ad.domain",
+          "subsection-name": "subsection-authentication",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ad",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.ad.base.dn",
+          "subsection-name": "subsection-authentication",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ad",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.ad.bind.dn",
+          "subsection-name": "subsection-authentication",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ad",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.ad.bind.password",
+          "subsection-name": "subsection-authentication",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ad",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.ad.referral",
+          "subsection-name": "subsection-authentication",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ad",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.ad.user.searchfilter",
+          "subsection-name": "subsection-authentication",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ad",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.ad.default.role",
+          "subsection-name": "subsection-authentication",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ad",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.type",
+        "widget":{
+          "type":"combo"
+        }
+      },
+
+
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.url",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.userDNpattern",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.groupSearchBase",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.groupSearchFilter",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.groupRoleAttribute",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.base.dn",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.bind.dn",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.bind.password",
+        "widget":{
+          "type":"password"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.referral",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.user.searchfilter",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.default.role",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+
+
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.ad.url",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.ad.domain",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.ad.base.dn",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.ad.bind.dn",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.ad.bind.password",
+        "widget":{
+          "type":"password"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.ad.referral",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.ad.user.searchfilter",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.ad.default.role",
+        "widget":{
+          "type":"text-field"
+        }
+      }
+    ]
+  }
+}
+
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/themes/theme_version_2.json b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/themes/theme_version_2.json
new file mode 100644
index 0000000..74d0b4e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/themes/theme_version_2.json
@@ -0,0 +1,845 @@
+{
+  "name": "default",
+  "description": "Default theme for Atlas service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "authentication_settings",
+            "display-name": "Authentication",
+            "layout": {
+              "tab-columns": "1",
+              "tab-rows": "4",
+              "sections": [
+                {
+                  "name": "section-authentication-type",
+                  "display-name": "Authentication Methods",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-authentication-type",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "subsection-authentication-file",
+                  "display-name": "File",
+                  "row-index": "1",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-authentication-file",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-authentication-ldap",
+                  "display-name": "LDAP/AD",
+                  "row-index": "2",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-authentication-ldap",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1",
+                      "depends-on": [
+                        {
+                          "configs":[
+                            "application-properties/atlas.authentication.method.ldap"
+                          ],
+                          "if": "${application-properties/atlas.authentication.method.ldap}",
+                          "then": {
+                            "property_value_attributes": {
+                              "visible": true
+                            }
+                          },
+                          "else": {
+                            "property_value_attributes": {
+                              "visible": false
+                            }
+                          }
+                        }
+                      ]
+                    }
+                  ]
+                },
+                {
+                  "name": "section-atlas-sso",
+                  "display-name": "Atlas Knox SSO",
+                  "row-index": "3",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-atlas-sso",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "application-properties/atlas.authentication.method.file",
+          "subsection-name": "subsection-authentication-type"
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.file.filename",
+          "subsection-name": "subsection-authentication-file",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.file"
+              ],
+              "if": "${application-properties/atlas.authentication.method.file}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap",
+          "subsection-name": "subsection-authentication-type"
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.type",
+          "subsection-name": "subsection-authentication-ldap",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.url",
+          "subsection-name": "subsection-authentication-ldap",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type",
+                "application-properties/atlas.authentication.method.ldap"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ldap && ${application-properties/atlas.authentication.method.ldap}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.userDNpattern",
+          "subsection-name": "subsection-authentication-ldap",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type",
+                "application-properties/atlas.authentication.method.ldap"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ldap && ${application-properties/atlas.authentication.method.ldap} ",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.groupSearchBase",
+          "subsection-name": "subsection-authentication-ldap",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type",
+                "application-properties/atlas.authentication.method.ldap"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ldap && ${application-properties/atlas.authentication.method.ldap}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.groupSearchFilter",
+          "subsection-name": "subsection-authentication-ldap",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type",
+                "application-properties/atlas.authentication.method.ldap"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ldap && ${application-properties/atlas.authentication.method.ldap}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.groupRoleAttribute",
+          "subsection-name": "subsection-authentication-ldap",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type",
+                "application-properties/atlas.authentication.method.ldap"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ldap && ${application-properties/atlas.authentication.method.ldap}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.base.dn",
+          "subsection-name": "subsection-authentication-ldap",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type",
+                "application-properties/atlas.authentication.method.ldap"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ldap && ${application-properties/atlas.authentication.method.ldap}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.bind.dn",
+          "subsection-name": "subsection-authentication-ldap",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type",
+                "application-properties/atlas.authentication.method.ldap"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ldap && ${application-properties/atlas.authentication.method.ldap}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.bind.password",
+          "subsection-name": "subsection-authentication-ldap",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type",
+                "application-properties/atlas.authentication.method.ldap"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ldap && ${application-properties/atlas.authentication.method.ldap}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.referral",
+          "subsection-name": "subsection-authentication-ldap",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type",
+                "application-properties/atlas.authentication.method.ldap"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ldap && ${application-properties/atlas.authentication.method.ldap}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.user.searchfilter",
+          "subsection-name": "subsection-authentication-ldap",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type",
+                "application-properties/atlas.authentication.method.ldap"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ldap && ${application-properties/atlas.authentication.method.ldap}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.default.role",
+          "subsection-name": "subsection-authentication-ldap",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type",
+                "application-properties/atlas.authentication.method.ldap"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ldap && ${application-properties/atlas.authentication.method.ldap}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.ad.url",
+          "subsection-name": "subsection-authentication-ldap",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type",
+                "application-properties/atlas.authentication.method.ldap"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ad && ${application-properties/atlas.authentication.method.ldap}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.ad.domain",
+          "subsection-name": "subsection-authentication-ldap",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type",
+                "application-properties/atlas.authentication.method.ldap"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ad && ${application-properties/atlas.authentication.method.ldap}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.ad.base.dn",
+          "subsection-name": "subsection-authentication-ldap",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type",
+                "application-properties/atlas.authentication.method.ldap"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ad && ${application-properties/atlas.authentication.method.ldap}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.ad.bind.dn",
+          "subsection-name": "subsection-authentication-ldap",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type",
+                "application-properties/atlas.authentication.method.ldap"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ad && ${application-properties/atlas.authentication.method.ldap}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.ad.bind.password",
+          "subsection-name": "subsection-authentication-ldap",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type",
+                "application-properties/atlas.authentication.method.ldap"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ad && ${application-properties/atlas.authentication.method.ldap}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.ad.referral",
+          "subsection-name": "subsection-authentication-ldap",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type",
+                "application-properties/atlas.authentication.method.ldap"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ad && ${application-properties/atlas.authentication.method.ldap}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.ad.user.searchfilter",
+          "subsection-name": "subsection-authentication-ldap",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type",
+                "application-properties/atlas.authentication.method.ldap"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ad && ${application-properties/atlas.authentication.method.ldap}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.authentication.method.ldap.ad.default.role",
+          "subsection-name": "subsection-authentication-ldap",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.authentication.method.ldap.type",
+                "application-properties/atlas.authentication.method.ldap"
+              ],
+              "if": "${application-properties/atlas.authentication.method.ldap.type} === ad && ${application-properties/atlas.authentication.method.ldap}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.sso.knox.enabled",
+          "subsection-name": "subsection-authentication-type"
+        },
+        {
+          "config": "application-properties/atlas.sso.knox.providerurl",
+          "subsection-name": "subsection-atlas-sso",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.sso.knox.enabled"
+              ],
+              "if": "${application-properties/atlas.sso.knox.enabled}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.sso.knox.publicKey",
+          "subsection-name": "subsection-atlas-sso",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.sso.knox.enabled"
+              ],
+              "if": "${application-properties/atlas.sso.knox.enabled}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "application-properties/atlas.sso.knox.browser.useragent",
+          "subsection-name": "subsection-atlas-sso",
+          "depends-on": [
+            {
+              "configs":[
+                "application-properties/atlas.sso.knox.enabled"
+              ],
+              "if": "${application-properties/atlas.sso.knox.enabled}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "application-properties/atlas.authentication.method.file",
+        "widget": {
+          "type": "checkbox"
+        }
+      },
+      {
+        "config": "application-properties/atlas.authentication.method.file.filename",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "application-properties/atlas.authentication.method.ldap",
+        "widget": {
+          "type": "checkbox"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.url",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.userDNpattern",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.groupSearchBase",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.groupSearchFilter",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.groupRoleAttribute",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.base.dn",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.bind.dn",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.bind.password",
+        "widget":{
+          "type":"password"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.referral",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.user.searchfilter",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.default.role",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config": "application-properties/atlas.authentication.method.ldap.type",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.ad.url",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.ad.domain",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config":"application-properties/atlas.authentication.method.ldap.ad.base.dn",
+        "widget":{
+          "type":"text-field"
+        }
+      },
+      {
+        "config": "application-properties/atlas.authentication.method.ldap.ad.bind.dn",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "application-properties/atlas.authentication.method.ldap.ad.bind.password",
+        "widget": {
+          "type": "password"
+        }
+      },
+      {
+        "config": "application-properties/atlas.authentication.method.ldap.ad.referral",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "application-properties/atlas.authentication.method.ldap.ad.user.searchfilter",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "application-properties/atlas.authentication.method.ldap.ad.default.role",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "application-properties/atlas.sso.knox.enabled",
+        "widget": {
+          "type":"checkbox"
+        }
+      },
+      {
+        "config": "application-properties/atlas.sso.knox.providerurl",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "application-properties/atlas.sso.knox.publicKey",
+        "widget": {
+          "type": "text-area"
+        }
+      },
+      {
+        "config": "application-properties/atlas.sso.knox.browser.useragent",
+        "widget": {
+          "type": "text-field"
+        }
+      }
+    ]
+  }
+}
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-logsearch-conf.xml
deleted file mode 100644
index 6b43ba6..0000000
--- a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-logsearch-conf.xml
+++ /dev/null
@@ -1,80 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>Falcon</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>FALCON_SERVER:falcon_app</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder Config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-  "input":[
-    {
-      "type":"falcon_app",
-      "rowtype":"service",
-      "path":"{{default('/configurations/falcon-env/falcon_log_dir', '/var/log/falcon')}}/falcon.application.log"
-    }
-  ],
-  "filter":[
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "falcon_app"
-          ]
-         }
-       },
-      "log4j_format":"%d %-5p - [%t:%x] ~ %m (%c{1}:%L)%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{SPACE}-%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}~%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       }
-     }
-   ]
- }
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_client.py b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_client.py
index 6d1dbc5..b0f517b 100644
--- a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_client.py
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_client.py
@@ -59,16 +59,6 @@
     conf_select.select(params.stack_name, "falcon", params.version)
     stack_select.select("falcon-client", params.version)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-
-    if status_params.security_enabled:
-      self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-
 @OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
 class FalconClientWindows(FalconClient):
   def install(self, env):
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_server.py b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_server.py
index c4960a7..23f9ef8 100644
--- a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_server.py
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_server.py
@@ -89,65 +89,6 @@
 
     falcon_server_upgrade.pre_start_restore()
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"*.falcon.authentication.type": "kerberos",
-                           "*.falcon.http.authentication.type": "kerberos"}
-      props_empty_check = ["*.falcon.service.authentication.kerberos.principal",
-                           "*.falcon.service.authentication.kerberos.keytab",
-                           "*.falcon.http.authentication.kerberos.principal",
-                           "*.falcon.http.authentication.kerberos.keytab"]
-      props_read_check = ["*.falcon.service.authentication.kerberos.keytab",
-                          "*.falcon.http.authentication.kerberos.keytab"]
-      falcon_startup_props = build_expectations('startup', props_value_check, props_empty_check,
-                                                  props_read_check)
-
-      falcon_expectations ={}
-      falcon_expectations.update(falcon_startup_props)
-
-      security_params = get_params_from_filesystem('/etc/falcon/conf',
-                                                   {'startup.properties': FILE_TYPE_PROPERTIES})
-      result_issues = validate_security_config_properties(security_params, falcon_expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'startup' not in security_params
-               or '*.falcon.service.authentication.kerberos.keytab' not in security_params['startup']
-               or '*.falcon.service.authentication.kerberos.principal' not in security_params['startup']) \
-            or '*.falcon.http.authentication.kerberos.keytab' not in security_params['startup'] \
-            or '*.falcon.http.authentication.kerberos.principal' not in security_params['startup']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.falcon_user,
-                                security_params['startup']['*.falcon.service.authentication.kerberos.keytab'],
-                                security_params['startup']['*.falcon.service.authentication.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.falcon_user,
-                                security_params['startup']['*.falcon.http.authentication.kerberos.keytab'],
-                                security_params['startup']['*.falcon.http.authentication.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
     return params.falcon_log_dir
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/templates/input.config-falcon.json.j2 b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/templates/input.config-falcon.json.j2
new file mode 100644
index 0000000..7c5aede
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/templates/input.config-falcon.json.j2
@@ -0,0 +1,48 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"falcon_app",
+      "rowtype":"service",
+      "path":"{{default('/configurations/falcon-env/falcon_log_dir', '/var/log/falcon')}}/falcon.application.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "falcon_app"
+          ]
+        }
+      },
+      "log4j_format":"%d %-5p - [%t:%x] ~ %m (%c{1}:%L)%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{SPACE}-%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}~%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/configuration/flume-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/configuration/flume-logsearch-conf.xml
deleted file mode 100644
index 98e6db8..0000000
--- a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/configuration/flume-logsearch-conf.xml
+++ /dev/null
@@ -1,80 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>Flume</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>FLUME:flume_handler</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder Config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-  "input":[
-    {
-      "type":"flume_handler",
-      "rowtype":"service",
-      "path":"{{default('/configurations/flume-env/flume_log_dir', '/var/log/flume')}}/flume.log"
-    }
-   ],
-  "filter":[
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "flume_handler"
-          ]
-         }
-       },
-      "log4j_format":"",
-      "multiline_pattern":"^(%{USER_SYNC_DATE:logtime})",
-      "message_pattern":"(?m)^%{USER_SYNC_DATE:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}\\(%{JAVACLASS:class}\\.%{JAVAMETHOD:method}:%{INT:line_number}\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"dd MMM yyyy HH:mm:ss,SSS"
-          }
-         }
-       }
-     }
-   ]
- }
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
index b143941..0e0c9aa 100644
--- a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
@@ -90,18 +90,23 @@
 if config.has_key('hostname'):
   hostname = config['hostname']
 
-ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+set_instanceId = "false"
+cluster_name = config["clusterName"]
+
+if 'cluster-env' in config['configurations'] and \
+        'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
+  ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
+  set_instanceId = "true"
+else:
+  ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+
 has_metric_collector = not len(ams_collector_hosts) == 0
 metric_collector_port = None
 if has_metric_collector:
+  metric_collector_host = select_metric_collector_hosts_from_hostnames(ams_collector_hosts)
   if 'cluster-env' in config['configurations'] and \
-      'metrics_collector_vip_host' in config['configurations']['cluster-env']:
-    metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']
-  else:
-    metric_collector_host = select_metric_collector_hosts_from_hostnames(ams_collector_hosts)
-  if 'cluster-env' in config['configurations'] and \
-      'metrics_collector_vip_port' in config['configurations']['cluster-env']:
-    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
+      'metrics_collector_external_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
   else:
     metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
     if metric_collector_web_address.find(':') != -1:
@@ -119,6 +124,9 @@
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
+
 # Cluster Zookeeper quorum
 zookeeper_quorum = None
 if not len(default("/clusterHostInfo/zookeeper_hosts", [])) == 0:
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2 b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
index cf2bd6c..c476019 100644
--- a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
@@ -23,6 +23,8 @@
 collectionFrequency={{metrics_collection_period}}000
 maxRowCacheSize=10000
 sendInterval={{metrics_report_interval}}000
+host_in_memory_aggregation = {{host_in_memory_aggregation}}
+host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 # HTTPS properties
 truststore.path = {{metric_truststore_path}}
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/input.config-flume.json.j2 b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/input.config-flume.json.j2
new file mode 100644
index 0000000..12d7aad
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/input.config-flume.json.j2
@@ -0,0 +1,53 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{# flume.conf: Add your flume configuration here and start flume
+#             Note if you are using the Windows srvice or Unix service
+#             provided by the HDP distribution, they will assume the
+#             agent's name in this file to be 'a1'
+#}
+{
+  "input":[
+    {
+      "type":"flume_handler",
+      "rowtype":"service",
+      "path":"{{default('/configurations/flume-env/flume_log_dir', '/var/log/flume')}}/flume.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "flume_handler"
+          ]
+        }
+      },
+      "log4j_format":"",
+      "multiline_pattern":"^(%{USER_SYNC_DATE:logtime})",
+      "message_pattern":"(?m)^%{USER_SYNC_DATE:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}\\(%{JAVACLASS:class}\\.%{JAVAMETHOD:method}:%{INT:line_number}\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"dd MMM yyyy HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-logsearch-conf.xml
deleted file mode 100644
index 321ea4e..0000000
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-logsearch-conf.xml
+++ /dev/null
@@ -1,111 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>HBase</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>HBASE_MASTER:hbase_master;HBASE_REGIONSERVER:hbase_regionserver;PHOENIX_QUERY_SERVER:hbase_phoenix_server</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder Config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-  "input":[
-    {
-      "type":"hbase_master",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hbase-env/hbase_log_dir', '/var/log/hbase')}}/hbase-*-master-*.log"
-    },
-    {
-      "type":"hbase_regionserver",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hbase-env/hbase_log_dir', '/var/log/hbase')}}/hbase-*-regionserver-*.log"
-    },
-    {
-      "type":"hbase_phoenix_server",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hbase-env/hbase_log_dir', '/var/log/hbase')}}/phoenix-*-server.log"
-    }
-   ],
-  "filter":[
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "hbase_master",
-            "hbase_regionserver"
-          ]
-         }
-       },
-      "log4j_format":"%d{ISO8601} %-5p [%t] %c{2}: %m%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       }
-     },
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "hbase_phoenix_server"
-          ]
-         }
-      },
-      "log4j_format":"%d{ISO8601} %-5p [%t] %c{2}: %m%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       }
-     }
-   ]
- }
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
index d2c8089..83af3aa 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
@@ -98,55 +98,6 @@
 
     check_process_status(status_params.hbase_master_pid_file)
 
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"hbase.security.authentication" : "kerberos",
-                           "hbase.security.authorization": "true"}
-      props_empty_check = ['hbase.master.keytab.file',
-                           'hbase.master.kerberos.principal']
-      props_read_check = ['hbase.master.keytab.file']
-      hbase_site_expectations = build_expectations('hbase-site', props_value_check, props_empty_check,
-                                                  props_read_check)
-
-      hbase_expectations = {}
-      hbase_expectations.update(hbase_site_expectations)
-
-      security_params = get_params_from_filesystem(status_params.hbase_conf_dir,
-                                                   {'hbase-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, hbase_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'hbase-site' not in security_params
-               or 'hbase.master.keytab.file' not in security_params['hbase-site']
-               or 'hbase.master.kerberos.principal' not in security_params['hbase-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hbase_user,
-                                security_params['hbase-site']['hbase.master.keytab.file'],
-                                security_params['hbase-site']['hbase.master.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-      
   def get_log_folder(self):
     import params
     return params.log_dir
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_regionserver.py
index 226e7fd5..75910b1 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_regionserver.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_regionserver.py
@@ -109,55 +109,6 @@
 
     check_process_status(status_params.regionserver_pid_file)
 
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"hbase.security.authentication" : "kerberos",
-                           "hbase.security.authorization": "true"}
-      props_empty_check = ['hbase.regionserver.keytab.file',
-                           'hbase.regionserver.kerberos.principal']
-      props_read_check = ['hbase.regionserver.keytab.file']
-      hbase_site_expectations = build_expectations('hbase-site', props_value_check, props_empty_check,
-                                                   props_read_check)
-
-      hbase_expectations = {}
-      hbase_expectations.update(hbase_site_expectations)
-
-      security_params = get_params_from_filesystem(status_params.hbase_conf_dir,
-                                                   {'hbase-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, hbase_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'hbase-site' not in security_params
-               or 'hbase.regionserver.keytab.file' not in security_params['hbase-site']
-               or 'hbase.regionserver.kerberos.principal' not in security_params['hbase-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hbase_user,
-                                security_params['hbase-site']['hbase.regionserver.keytab.file'],
-                                security_params['hbase-site']['hbase.regionserver.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
     return params.log_dir
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index 3177643..d45aea6 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -152,12 +152,20 @@
 if has_ganglia_server:
   ganglia_server_host = ganglia_server_hosts[0]
 
-ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+set_instanceId = "false"
+cluster_name = config["clusterName"]
+
+if 'cluster-env' in config['configurations'] and \
+    'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
+  ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
+  set_instanceId = "true"
+else:
+  ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
 has_metric_collector = not len(ams_collector_hosts) == 0
 if has_metric_collector:
   if 'cluster-env' in config['configurations'] and \
-      'metrics_collector_vip_port' in config['configurations']['cluster-env']:
-    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
+      'metrics_collector_external_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
   else:
     metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
     if metric_collector_web_address.find(':') != -1:
@@ -176,6 +184,9 @@
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
+
 # if hbase is selected the hbase_rs_hosts, should not be empty, but still default just in case
 if 'slave_hosts' in config['clusterHostInfo']:
   rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/slave_hosts') #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/phoenix_queryserver.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/phoenix_queryserver.py
index 77820cc..8a85d6e 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/phoenix_queryserver.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/phoenix_queryserver.py
@@ -71,10 +71,6 @@
     import status_params
     env.set_params(status_params)
     phoenix_service('status')
-
-
-  def security_status(self, env):
-    self.put_structured_out({"securityState": "UNSECURED"})
     
   def get_log_folder(self):
     import params
@@ -89,4 +85,4 @@
     return [status_params.phoenix_pid_file]
 
 if __name__ == "__main__":
-  PhoenixQueryServer().execute()
\ No newline at end of file
+  PhoenixQueryServer().execute()
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2 b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
index e808f5d..7368ffe 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
@@ -76,6 +76,8 @@
 hbase.sink.timeline.collector.hosts={{ams_collector_hosts}}
 hbase.sink.timeline.protocol={{metric_collector_protocol}}
 hbase.sink.timeline.port={{metric_collector_port}}
+hbase.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+hbase.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 # HTTPS properties
 hbase.sink.timeline.truststore.path = {{metric_truststore_path}}
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2 b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
index 7e1abbc..f245365 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
@@ -74,7 +74,8 @@
 hbase.sink.timeline.collector.hosts={{ams_collector_hosts}}
 hbase.sink.timeline.protocol={{metric_collector_protocol}}
 hbase.sink.timeline.port={{metric_collector_port}}
-
+hbase.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+hbase.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 # HTTPS properties
 hbase.sink.timeline.truststore.path = {{metric_truststore_path}}
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/input.config-hbase.json.j2 b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/input.config-hbase.json.j2
new file mode 100644
index 0000000..94fbc64
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/input.config-hbase.json.j2
@@ -0,0 +1,79 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"hbase_master",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hbase-env/hbase_log_dir', '/var/log/hbase')}}/hbase-*-master-*.log"
+    },
+    {
+      "type":"hbase_regionserver",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hbase-env/hbase_log_dir', '/var/log/hbase')}}/hbase-*-regionserver-*.log"
+    },
+    {
+      "type":"hbase_phoenix_server",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hbase-env/hbase_log_dir', '/var/log/hbase')}}/phoenix-*-server.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "hbase_master",
+            "hbase_regionserver"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} %-5p [%t] %c{2}: %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    },
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "hbase_phoenix_server"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} %-5p [%t] %c{2}: %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/alerts.json b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/alerts.json
new file mode 100644
index 0000000..6fcb4dc
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/alerts.json
@@ -0,0 +1,127 @@
+{
+  "HBASE": {
+    "service": [
+      {
+        "name": "hbase_regionserver_process_percent",
+        "label": "Percent RegionServers Available",
+        "description": "This service-level alert is triggered if the configured percentage of RegionServer processes cannot be determined to be up and listening on the network for the configured warning and critical thresholds. It aggregates the results of RegionServer process down checks.",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "hbase_regionserver_process",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 10
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 30
+            },
+            "units" : "%",
+            "type": "PERCENT"
+          }
+        }
+      }    
+    ],
+    "HBASE_MASTER": [
+      {
+        "name": "hbase_master_process",
+        "label": "HBase Master Process",
+        "description": "This alert is triggered if the HBase master processes cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "PORT",
+          "uri": "{{hbase-site/hbase.master.port}}",
+          "default_port": 60000,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      },
+      {
+        "name": "hbase_master_cpu",
+        "label": "HBase Master CPU Utilization",
+        "description": "This host-level alert is triggered if CPU utilization of the HBase Master exceeds certain warning and critical thresholds. It checks the HBase Master JMX Servlet for the SystemCPULoad property. The threshold values are in percent.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hbase-site/hbase.master.info.port}}",
+            "default_port": 60010,
+            "connection_timeout": 5.0,
+            "kerberos_principal": "{{hbase-site/hbase.security.authentication.spnego.kerberos.principal}}",
+            "kerberos_keytab": "{{hbase-site/hbase.security.authentication.spnego.kerberos.keytab}}"
+          },
+          "reporting": {
+            "ok": {
+              "text": "{1} CPU, load {0:.1%}"
+            },
+            "warning": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 200
+            },
+            "critical": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 250
+            },
+            "units" : "%",
+            "type": "PERCENT"
+          },
+          "jmx": {
+            "property_list": [
+              "java.lang:type=OperatingSystem/SystemCpuLoad",
+              "java.lang:type=OperatingSystem/AvailableProcessors"
+            ],
+            "value": "{0} * 100"
+          }
+        }
+      }
+    ],
+    "HBASE_REGIONSERVER": [
+      {
+        "name": "hbase_regionserver_process",
+        "label": "HBase RegionServer Process",
+        "description": "This host-level alert is triggered if the RegionServer processes cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
+        "interval": 1,
+        "scope": "HOST",
+        "source": {
+          "type": "PORT",
+          "uri": "{{hbase-site/hbase.regionserver.info.port}}",
+          "default_port": 60030,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-env.xml b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-env.xml
new file mode 100644
index 0000000..da12706
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-env.xml
@@ -0,0 +1,279 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <!-- Inherited from HBase in HDP 2.0.6. -->
+  <property>
+    <name>hbase_log_dir</name>
+    <value>/var/log/hbase</value>
+    <display-name>HBase Log Dir Prefix</display-name>
+    <description>Log Directories for HBase.</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_pid_dir</name>
+    <value>/var/run/hbase</value>
+    <display-name>HBase PID Dir</display-name>
+    <description>Pid Directory for HBase.</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_regionserver_heapsize</name>
+    <value>4096</value>
+    <description>Maximum amount of memory each HBase RegionServer can use.</description>
+    <display-name>HBase RegionServer Maximum Memory</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>6554</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_regionserver_xmn_max</name>
+    <value>4000</value>
+    <description>
+Sets the upper bound on HBase RegionServers' young generation size.
+This value is used in case the young generation size (-Xmn) calculated based on the max heapsize (hbase_regionserver_heapsize)
+and the -Xmn ratio (hbase_regionserver_xmn_ratio) exceeds this value.
+    </description>
+    <display-name>RegionServers maximum value for -Xmn</display-name>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_regionserver_xmn_ratio</name>
+    <value>0.2</value>
+    <display-name>RegionServers -Xmn in -Xmx ratio</display-name>
+    <description>Percentage of max heap size (-Xmx) which used for young generation heap (-Xmn).</description>
+    <value-attributes>
+      <type>float</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_master_heapsize</name>
+    <value>4096</value>
+    <description>Maximum amount of memory each HBase Master can use.</description>
+    <display-name>HBase Master Maximum Memory</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>16384</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_user</name>
+    <display-name>HBase User</display-name>
+    <value>hbase</value>
+    <property-type>USER</property-type>
+    <description>HBase User Name.</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_user_nofile_limit</name>
+    <value>32000</value>
+    <description>Max open files limit setting for HBASE user.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_user_nproc_limit</name>
+    <value>16000</value>
+    <description>Max number of processes limit setting for HBASE user.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_java_io_tmpdir</name>
+    <value>/tmp</value>
+    <description>Used in hbase-env.sh as HBASE_OPTS=-Djava.io.tmpdir=java_io_tmpdir</description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_principal_name</name>
+    <description>HBase principal name</description>
+    <property-type>KERBEROS_PRINCIPAL</property-type>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_user_keytab</name>
+    <description>HBase keytab path</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_regionserver_shutdown_timeout</name>
+    <value>30</value>
+    <display-name>HBase RegionServer shutdown timeout</display-name>
+    <description>
+After this number of seconds waiting for graceful stop of HBase Master it will be forced to exit with SIGKILL.
+The timeout is introduced because there is a known bug when from time to time HBase RegionServer hangs forever on stop if NN safemode is on.
+    </description>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <!-- hbase-env.sh -->
+  <property>
+    <name>content</name>
+    <display-name>hbase-env template</display-name>
+    <description>This is the jinja template for hbase-env.sh file</description>
+    <value>
+# Set environment variables here.
+
+# The java implementation to use. Java 1.6 required.
+export JAVA_HOME={{java64_home}}
+
+# HBase Configuration directory
+export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}
+
+# Extra Java CLASSPATH elements. Optional.
+export HBASE_CLASSPATH=${HBASE_CLASSPATH}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+# export HBASE_HEAPSIZE=1000
+
+# Extra Java runtime options.
+# Below are what we set by default. May only work with SUN JVM.
+# For more on why as well as other possible settings,
+# see http://wiki.apache.org/hadoop/PerformanceTuning
+export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
+# Uncomment below to enable java garbage collection logging.
+# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
+
+# Uncomment and adjust to enable JMX exporting
+# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
+# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+#
+# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size
+# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+
+# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
+export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
+
+# Extra ssh options. Empty by default.
+# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+
+# Where log files are stored. $HBASE_HOME/logs by default.
+export HBASE_LOG_DIR={{log_dir}}
+
+# A string representing this instance of hbase. $USER by default.
+# export HBASE_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HBASE_NICENESS=10
+
+# The directory where pid files are stored. /tmp by default.
+export HBASE_PID_DIR={{pid_dir}}
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HBASE_SLAVE_SLEEP=0.1
+
+# Tell HBase whether it should manage it's own instance of Zookeeper or not.
+export HBASE_MANAGES_ZK=false
+
+{% if java_version &lt; 8 %}
+JDK_DEPENDED_OPTS="-XX:PermSize=128m -XX:MaxPermSize=128m"
+{% endif %}
+
+{% if security_enabled %}
+export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}} -Djava.io.tmpdir={{java_io_tmpdir}}"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}} $JDK_DEPENDED_OPTS"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}} $JDK_DEPENDED_OPTS"
+export PHOENIX_QUERYSERVER_OPTS="$PHOENIX_QUERYSERVER_OPTS -Djava.security.auth.login.config={{queryserver_jaas_config_file}}"
+{% else %}
+export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.io.tmpdir={{java_io_tmpdir}}"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} $JDK_DEPENDED_OPTS"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} $JDK_DEPENDED_OPTS"
+{% endif %}
+
+# HBase off-heap MaxDirectMemorySize
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS {% if hbase_max_direct_memory_size %} -XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m {% endif %}"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS {% if hbase_max_direct_memory_size %} -XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m {% endif %}"
+</value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- Inherited from HBase in HDP 2.2 -->
+  <property>
+    <name>hbase_max_direct_memory_size</name>
+    <value/>
+    <display-name>HBase off-heap MaxDirectMemorySize</display-name>
+    <description>If not empty, adds '-XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m' to HBASE_REGIONSERVER_OPTS.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>phoenix_sql_enabled</name>
+    <value>false</value>
+    <description>Enable Phoenix SQL</description>
+    <display-name>Enable Phoenix</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Enabled</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Disabled</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-log4j.xml b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-log4j.xml
new file mode 100644
index 0000000..10e2237
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-log4j.xml
@@ -0,0 +1,188 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="false">
+ <property>
+    <name>hbase_log_maxfilesize</name>
+    <value>256</value>
+    <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>HBase Log: backup file size</display-name>
+    <value-attributes>
+        <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+ </property>
+ <property>
+      <name>hbase_log_maxbackupindex</name>
+      <value>20</value>
+      <description>The number of backup files</description>
+      <display-name>HBase Log: # of backup files</display-name>
+      <value-attributes>
+        <type>int</type>
+        <minimum>0</minimum>
+      </value-attributes>
+      <on-ambari-upgrade add="false"/>
+ </property>
+ <property>
+    <name>hbase_security_log_maxfilesize</name>
+    <value>256</value>
+    <description>The maximum size of security backup file before the log is rotated</description>
+    <display-name>HBase Security Log: backup file size</display-name>
+    <value-attributes>
+        <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+ </property>
+ <property>
+      <name>hbase_security_log_maxbackupindex</name>
+      <value>20</value>
+      <description>The number of security backup files</description>
+      <display-name>HBase Security Log: # of backup files</display-name>
+      <value-attributes>
+        <type>int</type>
+        <minimum>0</minimum>
+      </value-attributes>
+      <on-ambari-upgrade add="false"/>
+ </property>
+  <property>
+    <name>content</name>
+    <display-name>hbase-log4j template</display-name>
+    <description>Custom log4j.properties</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Define some default values that can be overridden by system properties
+hbase.root.logger=INFO,console
+hbase.security.logger=INFO,console
+hbase.log.dir=.
+hbase.log.file=hbase.log
+
+# Define the root logger to the system property "hbase.root.logger".
+log4j.rootLogger=${hbase.root.logger}
+
+# Logging Threshold
+log4j.threshold=ALL
+
+#
+# Daily Rolling File Appender
+#
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
+
+# Rolling File Appender properties
+hbase.log.maxfilesize={{hbase_log_maxfilesize}}MB
+hbase.log.maxbackupindex={{hbase_log_maxbackupindex}}
+
+# Rolling File Appender
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}
+
+log4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}
+log4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex}
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
+
+#
+# Security audit appender
+#
+hbase.security.log.file=SecurityAuth.audit
+hbase.security.log.maxfilesize={{hbase_security_log_maxfilesize}}MB
+hbase.security.log.maxbackupindex={{hbase_security_log_maxbackupindex}}
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}
+log4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.category.SecurityLogger=${hbase.security.logger}
+log4j.additivity.SecurityLogger=false
+#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE
+
+#
+# Null Appender
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
+
+# Custom Logging levels
+
+log4j.logger.org.apache.zookeeper=INFO
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.hbase=INFO
+# Make these two classes INFO-level. Make them DEBUG to see more zk debug.
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO
+#log4j.logger.org.apache.hadoop.dfs=DEBUG
+# Set this class to log INFO only otherwise its OTT
+# Enable this to get detailed connection error/retry logging.
+# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE
+
+
+# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)
+#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG
+
+# Uncomment the below if you want to remove logging of client region caching'
+# and scan of .META. messages
+# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO
+# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO
+
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-policy.xml b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-policy.xml
new file mode 100644
index 0000000..8500ee0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-policy.xml
@@ -0,0 +1,53 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HRegionInterface protocol implementations (ie. 
+    clients talking to HRegionServers)
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>security.admin.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HMasterInterface protocol implementation (ie. 
+    clients talking to HMaster for admin operations).
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>security.masterregion.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HMasterRegionInterface protocol implementations
+    (for HRegionServers communicating with HMaster)
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-site.xml b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-site.xml
new file mode 100644
index 0000000..0df616e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/hbase-site.xml
@@ -0,0 +1,774 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <!-- Inherited from HBase in HDP 2.0.6 -->
+  <property>
+    <name>hbase.rootdir</name>
+    <value>hdfs://localhost:8020/apps/hbase/data</value>
+    <description>The directory shared by region servers and into
+    which HBase persists.  The URL should be 'fully-qualified'
+    to include the filesystem scheme.  For example, to specify the
+    HDFS directory '/hbase' where the HDFS instance's namenode is
+    running at namenode.example.org on port 9000, set this value to:
+    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
+    into /tmp.  Change this configuration else all data will be lost
+    on machine restart.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.cluster.distributed</name>
+    <value>true</value>
+    <description>The mode the cluster will be in. Possible values are
+      false for standalone mode and true for distributed mode.  If
+      false, startup will run all HBase and ZooKeeper daemons together
+      in the one JVM.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.master.port</name>
+    <value>16000</value>
+    <display-name>HBase Master Port</display-name>
+    <description>The port the HBase Master should bind to.</description>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.tmp.dir</name>
+    <value>/tmp/hbase-${user.name}</value>
+    <display-name>HBase tmp directory</display-name>
+    <description>Temporary directory on the local filesystem.
+    Change this setting to point to a location more permanent
+    than '/tmp' (The '/tmp' directory is often cleared on
+    machine restart).
+    </description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.local.dir</name>
+    <value>${hbase.tmp.dir}/local</value>
+    <description>Directory on the local filesystem to be used as a local storage
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.master.info.bindAddress</name>
+    <value>0.0.0.0</value>
+    <description>The bind address for the HBase Master web UI
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>16010</value>
+    <description>The port for the HBase Master web UI.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>16030</value>
+    <description>The port for the HBase RegionServer web UI.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value>30</value>
+    <description>
+      Count of RPC Listener instances spun up on RegionServers.
+      Same property is used by the Master for count of master handlers.
+    </description>
+    <display-name>Number of Handlers per RegionServer</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>5</minimum>
+      <maximum>240</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.hregion.majorcompaction</name>
+    <value>604800000</value>
+    <description>Time between major compactions, expressed in milliseconds. Set to 0 to disable
+      time-based automatic major compactions. User-requested and size-based major compactions will
+      still run. This value is multiplied by hbase.hregion.majorcompaction.jitter to cause
+      compaction to start at a somewhat-random time during a given window of time. The default value
+      is 7 days, expressed in milliseconds. If major compactions are causing disruption in your
+      environment, you can configure them to run at off-peak times for your deployment, or disable
+      time-based major compactions by setting this parameter to 0, and run major compactions in a
+      cron job or by another external mechanism.</description>
+    <display-name>Major Compaction Interval</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>2592000000</maximum>
+      <unit>milliseconds</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.block.multiplier</name>
+    <value>4</value>
+    <description>
+      Block updates if memstore has hbase.hregion.memstore.block.multiplier
+      times hbase.hregion.memstore.flush.size bytes.  Useful preventing
+      runaway memstore during spikes in update traffic.  Without an
+      upper-bound, memstore fills such that when it flushes the
+      resultant flush files take a long time to compact or split, or
+      worse, we OOME.
+    </description>
+    <display-name>HBase Region Block Multiplier</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>2</value>
+        </entry>
+        <entry>
+          <value>4</value>
+        </entry>
+        <entry>
+          <value>8</value>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.flush.size</name>
+    <value>134217728</value>
+    <description>
+      The size of an individual memstore. Each column familiy within each region is allocated its own memstore.
+    </description>
+    <display-name>Memstore Flush Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>33554432</minimum>
+      <maximum>268435456</maximum>
+      <increment-step>1048576</increment-step>
+      <unit>B</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.mslab.enabled</name>
+    <value>true</value>
+    <description>
+      Enables the MemStore-Local Allocation Buffer,
+      a feature which works to prevent heap fragmentation under
+      heavy write loads. This can reduce the frequency of stop-the-world
+      GC pauses on large heaps.
+    </description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>10737418240</value>
+    <description>
+      Maximum HFile size. If the sum of the sizes of a region's HFiles has grown to exceed this
+      value, the region is split in two.
+    </description>
+    <display-name>Maximum Region File Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>1073741824</minimum>
+      <maximum>107374182400</maximum>
+      <unit>B</unit>
+      <increment-step>1073741824</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.client.scanner.caching</name>
+    <value>100</value>
+    <description>Number of rows that will be fetched when calling next
+    on a scanner if it is not served from (local, client) memory. Higher
+    caching values will enable faster scanners but will eat up more memory
+    and some calls of next may take longer and longer times when the cache is empty.
+    Do not set this value such that the time between invocations is greater
+    than the scanner timeout; i.e. hbase.regionserver.lease.period
+    </description>
+    <display-name>Number of Fetched Rows when Scanning from Disk</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>100</minimum>
+      <maximum>10000</maximum>
+      <increment-step>100</increment-step>
+      <unit>rows</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zookeeper.session.timeout</name>
+    <value>90000</value>
+    <description>ZooKeeper session timeout.
+      ZooKeeper session timeout in milliseconds. It is used in two different ways.
+      First, this value is used in the ZK client that HBase uses to connect to the ensemble.
+      It is also used by HBase when it starts a ZK server and it is passed as the 'maxSessionTimeout'. See
+      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions.
+      For example, if a HBase region server connects to a ZK ensemble that's also managed by HBase, then the
+      session timeout will be the one specified by this configuration. But, a region server that connects
+      to an ensemble managed with a different configuration will be subjected that ensemble's maxSessionTimeout. So,
+      even though HBase might propose using 90 seconds, the ensemble can have a max timeout lower than this and
+      it will take precedence.
+    </description>
+    <display-name>Zookeeper Session Timeout</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>10000</minimum>
+      <maximum>180000</maximum>
+      <unit>milliseconds</unit>
+      <increment-step>10000</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.client.keyvalue.maxsize</name>
+    <value>1048576</value>
+    <description>
+      Specifies the combined maximum allowed size of a KeyValue
+      instance. This is to set an upper boundary for a single entry saved in a
+      storage file. Since they cannot be split it helps avoiding that a region
+      cannot be split any further because the data is too large. It seems wise
+      to set this to a fraction of the maximum region size. Setting it to zero
+      or less disables the check.
+    </description>
+    <display-name>Maximum Record Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>1048576</minimum>
+      <maximum>31457280</maximum>
+      <unit>B</unit>
+      <increment-step>262144</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.hstore.compactionThreshold</name>
+    <value>3</value>
+    <description>
+      The maximum number of StoreFiles which will be selected for a single minor
+      compaction, regardless of the number of eligible StoreFiles. Effectively, the value of
+      hbase.hstore.compaction.max controls the length of time it takes a single compaction to
+      complete. Setting it larger means that more StoreFiles are included in a compaction. For most
+      cases, the default value is appropriate.
+    </description>
+    <display-name>Maximum Store Files before Minor Compaction</display-name>
+    <value-attributes>
+      <type>int</type>
+      <entries>
+        <entry>
+          <value>2</value>
+        </entry>
+        <entry>
+          <value>3</value>
+        </entry>
+        <entry>
+          <value>4</value>
+        </entry>
+      </entries>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.hstore.blockingStoreFiles</name>
+    <display-name>hstore blocking storefiles</display-name>
+    <value>10</value>
+    <description>
+    If more than this number of StoreFiles in any one Store
+    (one StoreFile is written per flush of MemStore) then updates are
+    blocked for this HRegion until a compaction is completed, or
+    until hbase.hstore.blockingWaitTime has been exceeded.
+    </description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hfile.block.cache.size</name>
+    <value>0.40</value>
+    <description>Percentage of RegionServer memory to allocate to read buffers.</description>
+    <display-name>% of RegionServer Allocated to Read Buffers</display-name>
+    <value-attributes>
+      <type>float</type>
+      <minimum>0</minimum>
+      <maximum>0.8</maximum>
+      <increment-step>0.01</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <!-- Additional configuration specific to HBase security -->
+  <property>
+    <name>hbase.superuser</name>
+    <value>hbase</value>
+    <description>List of users or groups (comma-separated), who are allowed
+    full privileges, regardless of stored ACLs, across the cluster.
+    Only used when HBase security is enabled.
+    </description>
+    <depends-on>
+      <property>
+        <type>hbase-env</type>
+        <name>hbase_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.security.authentication</name>
+    <value>simple</value>
+    <description>
+      Select Simple or Kerberos authentication. Note: Kerberos must be set up before the Kerberos option will take effect.
+    </description>
+    <display-name>Enable Authentication</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <label>Simple</label>
+          <value>simple</value>
+        </entry>
+        <entry>
+          <label>Kerberos</label>
+          <value>kerberos</value>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.security.authorization</name>
+    <value>false</value>
+    <description> Set Authorization Method.</description>
+    <display-name>Enable Authorization</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Native</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Off</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.coprocessor.region.classes</name>
+    <value>org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint</value>
+    <description>A comma-separated list of Coprocessors that are loaded by
+      default on all tables. For any override coprocessor method, these classes
+      will be called in order. After implementing your own Coprocessor, just put
+      it in HBase's classpath and add the fully qualified class name here.
+      A coprocessor can also be loaded on demand by setting HTableDescriptor.
+    </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hbase-site</type>
+        <name>hbase.security.authorization</name>
+      </property>
+      <property>
+        <type>hbase-site</type>
+        <name>hbase.security.authentication</name>
+      </property>
+      <property>
+        <type>ranger-hbase-plugin-properties</type>
+        <name>ranger-hbase-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.coprocessor.master.classes</name>
+    <value/>
+    <description>A comma-separated list of
+      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
+      loaded by default on the active HMaster process. For any implemented
+      coprocessor methods, the listed classes will be called in order. After
+      implementing your own MasterObserver, just put it in HBase's classpath
+      and add the fully qualified class name here.
+    </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hbase-site</type>
+        <name>hbase.security.authorization</name>
+      </property>
+      <property>
+        <type>ranger-hbase-plugin-properties</type>
+        <name>ranger-hbase-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>2181</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <!--
+  The following three properties are used together to create the list of
+  host:peer_port:leader_port quorum servers for ZooKeeper.
+  -->
+  <property>
+    <name>hbase.zookeeper.quorum</name>
+    <value>localhost</value>
+    <description>Comma separated list of servers in the ZooKeeper Quorum.
+    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+    By default this is set to localhost for local and pseudo-distributed modes
+    of operation. For a fully-distributed setup, this should be set to a full
+    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
+    this is the list of servers which we will start/stop ZooKeeper on.
+    </description>
+    <value-attributes>
+      <type>multiLine</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
+  <property>
+    <name>hbase.zookeeper.useMulti</name>
+    <value>true</value>
+    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
+    This allows certain ZooKeeper operations to complete more quickly and prevents some issues
+    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).&#xB7;
+    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
+    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
+    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zookeeper.znode.parent</name>
+    <value>/hbase-unsecure</value>
+    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
+      files that are configured with a relative path will go under this node.
+      By default, all of HBase's ZooKeeper file path are configured with a
+      relative path, so they will all go under this directory unless changed.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.client.retries.number</name>
+    <value>35</value>
+    <description>Maximum retries.  Used as maximum for all retryable
+    operations such as the getting of a cell's value, starting a row update,
+    etc.  Retry interval is a rough function based on hbase.client.pause.  At
+    first we retry at this interval but then with backoff, we pretty quickly reach
+    retrying every ten seconds.  See HConstants#RETRY_BACKOFF for how the backup
+    ramps up.  Change this setting and hbase.client.pause to suit your workload.</description>
+    <display-name>Maximum Client Retries</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>5</minimum>
+      <maximum>50</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.rpc.timeout</name>
+    <value>90000</value>
+    <description>
+      This is for the RPC layer to define how long HBase client applications
+      take for a remote call to time out. It uses pings to check connections
+      but will eventually throw a TimeoutException.
+    </description>
+    <display-name>HBase RPC Timeout</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>10000</minimum>
+      <maximum>180000</maximum>
+      <unit>milliseconds</unit>
+      <increment-step>10000</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.defaults.for.version.skip</name>
+    <value>true</value>
+    <description>Disables version verification.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>phoenix.query.timeoutMs</name>
+    <value>60000</value>
+    <description>Number of milliseconds after which a Phoenix query will timeout on the client.</description>
+    <display-name>Phoenix Query Timeout</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>30000</minimum>
+      <maximum>180000</maximum>
+      <unit>milliseconds</unit>
+      <increment-step>10000</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>dfs.domain.socket.path</name>
+    <value>/var/lib/hadoop-hdfs/dn_socket</value>
+    <description>Path to domain socket.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.rpc.protection</name>
+    <value>authentication</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- Inherited from HBase in HDP 2.2 -->
+  <property>
+    <name>hbase.bulkload.staging.dir</name>
+    <value>/apps/hbase/staging</value>
+    <description>A staging directory in default file system (HDFS)
+      for bulk loading.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.hregion.majorcompaction.jitter</name>
+    <value>0.50</value>
+    <description>A multiplier applied to hbase.hregion.majorcompaction to cause compaction to occur
+      a given amount of time either side of hbase.hregion.majorcompaction. The smaller the number,
+      the closer the compactions will happen to the hbase.hregion.majorcompaction
+      interval.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.bucketcache.ioengine</name>
+    <value/>
+    <description>Where to store the contents of the bucketcache. One of: onheap,
+      offheap, or file. If a file, set it to file:PATH_TO_FILE.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.bucketcache.size</name>
+    <value/>
+    <description>The size of the buckets for the bucketcache if you only use a single size.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.bucketcache.percentage.in.combinedcache</name>
+    <value/>
+    <description>Value to be set between 0.0 and 1.0</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.regionserver.wal.codec</name>
+    <display-name>RegionServer WAL Codec</display-name>
+    <value>org.apache.hadoop.hbase.regionserver.wal.WALCellCodec</value>
+    <depends-on>
+      <property>
+        <type>hbase-env</type>
+        <name>phoenix_sql_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.region.server.rpc.scheduler.factory.class</name>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hbase-env</type>
+        <name>phoenix_sql_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.rpc.controllerfactory.class</name>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hbase-env</type>
+        <name>phoenix_sql_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>phoenix.functions.allowUserDefinedFunctions</name>
+    <value> </value>
+    <depends-on>
+      <property>
+        <type>hbase-env</type>
+        <name>phoenix_sql_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.coprocessor.regionserver.classes</name>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hbase-site</type>
+        <name>hbase.security.authorization</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.hstore.compaction.max</name>
+    <value>10</value>
+    <description>The maximum number of StoreFiles which will be selected for a single minor
+      compaction, regardless of the number of eligible StoreFiles. Effectively, the value of
+      hbase.hstore.compaction.max controls the length of time it takes a single compaction to
+      complete. Setting it larger means that more StoreFiles are included in a compaction. For most
+      cases, the default value is appropriate.
+    </description>
+    <display-name>Maximum Files for Compaction</display-name>
+    <value-attributes>
+      <type>int</type>
+      <entries>
+        <entry>
+          <value>8</value>
+        </entry>
+        <entry>
+          <value>9</value>
+        </entry>
+        <entry>
+          <value>10</value>
+        </entry>
+        <entry>
+          <value>11</value>
+        </entry>
+        <entry>
+          <value>12</value>
+        </entry>
+        <entry>
+          <value>13</value>
+        </entry>
+        <entry>
+          <value>14</value>
+        </entry>
+        <entry>
+          <value>15</value>
+        </entry>
+      </entries>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.regionserver.global.memstore.size</name>
+    <value>0.4</value>
+    <description>Percentage of RegionServer memory to allocate to write buffers.
+      Each column family within each region is allocated a smaller pool (the memstore) within this shared write pool.
+      If this buffer is full, updates are blocked and data is flushed from memstores until a global low watermark
+      (hbase.regionserver.global.memstore.size.lower.limit) is reached.
+    </description>
+    <display-name>% of RegionServer Allocated to Write Buffers</display-name>
+    <value-attributes>
+      <type>float</type>
+      <minimum>0</minimum>
+      <maximum>0.8</maximum>
+      <increment-step>0.01</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- Inherited from HBase in HDP 2.3 -->
+  <property>
+    <name>hbase.regionserver.port</name>
+    <value>16020</value>
+    <description>The port the HBase RegionServer binds to.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- Inherited from HBase in HDP 2.5 -->
+  <property>
+    <name>hbase.master.ui.readonly</name>
+    <value>false</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zookeeper.recovery.retry</name>
+    <value>6</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- Inherited from HBase in HDP 2.6 -->
+  <property>
+    <name>hbase.regionserver.executor.openregion.threads</name>
+    <value>20</value>
+    <description>The number of threads region server uses to open regions
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.master.namespace.init.timeout</name>
+    <value>2400000</value>
+    <description>The number of milliseconds master waits for hbase:namespace table to be initialized
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.master.wait.on.regionservers.timeout</name>
+    <value>30000</value>
+    <description>The number of milliseconds master waits for region servers to report in
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-audit.xml b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-audit.xml
new file mode 100644
index 0000000..9d4e7d0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-audit.xml
@@ -0,0 +1,132 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.audit.is.enabled</name>
+    <value>true</value>
+    <description>Is Audit enabled?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs</name>
+    <value>true</value>
+    <display-name>Audit to HDFS</display-name>
+    <description>Is Audit to HDFS enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.dir</name>
+    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
+    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs.dir</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
+    <value>/var/log/hbase/audit/hdfs/spool</value>
+    <description>/var/log/hbase/audit/hdfs/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr</name>
+    <value>false</value>
+    <display-name>Audit to SOLR</display-name>
+    <description>Is Solr audit enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.solr</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.urls</name>
+    <value/>
+    <description>Solr URL</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.urls</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.zookeepers</name>
+    <value>NONE</value>
+    <description>Solr Zookeeper string</description>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.zookeepers</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
+    <value>/var/log/hbase/audit/solr/spool</value>
+    <description>/var/log/hbase/audit/solr/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.provider.summary.enabled</name>
+    <value>true</value>
+    <display-name>Audit provider summary enabled</display-name>
+    <description>Enable Summary audit?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- Inherited from HBase in HDP 2.6 -->
+  <property>
+    <name>ranger.plugin.hbase.ambari.cluster.name</name>
+    <value>{{cluster_name}}</value>
+    <description>Capture cluster name from where Ranger hbase plugin is enabled.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-plugin-properties.xml b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-plugin-properties.xml
new file mode 100644
index 0000000..7d7f1fa
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-plugin-properties.xml
@@ -0,0 +1,135 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>common.name.for.certificate</name>
+    <value/>
+    <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>policy_user</name>
+    <value>ambari-qa</value>
+    <display-name>Policy user for HBASE</display-name>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>ranger_user</name>
+      </property>
+    </depends-on>
+    <description>This user must be system user and also present at Ranger admin portal</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger-hbase-plugin-enabled</name>
+    <value>No</value>
+    <display-name>Enable Ranger for HBASE</display-name>
+    <description>Enable ranger hbase plugin ?</description>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>ranger-hbase-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_USERNAME</name>
+    <value>hbase</value>
+    <display-name>Ranger repository config user</display-name>
+    <description>Used for repository creation on ranger admin</description>
+    <depends-on>
+      <property>
+        <type>ranger-hbase-plugin-properties</type>
+        <name>ranger-hbase-plugin-enabled</name>
+      </property>
+      <property>
+        <type>hbase-env</type>
+        <name>hbase_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_PASSWORD</name>
+    <value>hbase</value>
+    <display-name>Ranger repository config password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Used for repository creation on ranger admin</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- Inherited from HBase in HDP 2.6 -->
+  <property>
+    <name>external_admin_username</name>
+    <value></value>
+    <display-name>External Ranger admin username</display-name>
+    <description>Add ranger default admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>external_admin_password</name>
+    <value></value>
+    <display-name>External Ranger admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>external_ranger_admin_username</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin username</display-name>
+    <description>Add ranger default ambari admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>external_ranger_admin_password</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default ambari admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-policymgr-ssl.xml b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-policymgr-ssl.xml
new file mode 100644
index 0000000..c761b26
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-policymgr-ssl.xml
@@ -0,0 +1,66 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore</name>
+    <value>/usr/hdp/current/hbase-client/conf/ranger-plugin-keystore.jks</value>
+    <description>Java Keystore files</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.password</name>
+    <value>myKeyFilePassword</value>
+    <property-type>PASSWORD</property-type>
+    <description>password for keystore</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore</name>
+    <value>/usr/hdp/current/hbase-client/conf/ranger-plugin-truststore.jks</value>
+    <description>java truststore file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.password</name>
+    <value>changeit</value>
+    <property-type>PASSWORD</property-type>
+    <description>java truststore password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java keystore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java truststore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-security.xml b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-security.xml
new file mode 100644
index 0000000..4a0909a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/configuration/ranger-hbase-security.xml
@@ -0,0 +1,74 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>ranger.plugin.hbase.service.name</name>
+    <value>{{repo_name}}</value>
+    <description>Name of the Ranger service containing HBase policies</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hbase.policy.source.impl</name>
+    <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
+    <description>Class to retrieve policies from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hbase.policy.rest.url</name>
+    <value>{{policymgr_mgr_url}}</value>
+    <description>URL to Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>admin-properties</type>
+        <name>policymgr_external_url</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>ranger.plugin.hbase.policy.rest.ssl.config.file</name>
+    <value>/etc/hbase/conf/ranger-policymgr-ssl.xml</value>
+    <description>Path to the file containing SSL details to contact Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hbase.policy.pollIntervalMs</name>
+    <value>30000</value>
+    <description>How often to poll for changes in policies?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hbase.policy.cache.dir</name>
+    <value>/etc/ranger/{{repo_name}}/policycache</value>
+    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.hbase.update.xapolicies.on.grant.revoke</name>
+    <value>true</value>
+    <display-name>Should HBase GRANT/REVOKE update XA policies</display-name>
+    <description>Should HBase plugin update Ranger policies for updates to permissions done using GRANT/REVOKE?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/kerberos.json b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/kerberos.json
new file mode 100644
index 0000000..011921b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/kerberos.json
@@ -0,0 +1,160 @@
+{
+  "services": [
+    {
+      "name": "HBASE",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "hbase",
+          "principal": {
+            "value": "${hbase-env/hbase_user}${principal_suffix}@${realm}",
+            "type" : "user",
+            "configuration": "hbase-env/hbase_principal_name",
+            "local_username": "${hbase-env/hbase_user}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/hbase.headless.keytab",
+            "owner": {
+              "name": "${hbase-env/hbase_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": "r"
+            },
+            "configuration": "hbase-env/hbase_user_keytab"
+          }
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "hbase-site": {
+            "hbase.security.authentication": "kerberos",
+            "hbase.security.authorization": "true",
+            "zookeeper.znode.parent": "/hbase-secure",
+            "hbase.coprocessor.master.classes": "{{hbase_coprocessor_master_classes}}",
+            "hbase.coprocessor.region.classes": "{{hbase_coprocessor_region_classes}}",
+            "hbase.coprocessor.regionserver.classes": "{{hbase_coprocessor_regionserver_classes}}",
+            "hbase.bulkload.staging.dir": "/apps/hbase/staging",
+            "hbase.master.ui.readonly": "true"
+          }
+        },
+        {
+          "ranger-hbase-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "HBASE_MASTER",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "hbase_master_hbase",
+              "principal": {
+                "value": "hbase/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hbase-site/hbase.master.kerberos.principal",
+                "local_username": "${hbase-env/hbase_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hbase.service.keytab",
+                "owner": {
+                  "name": "${hbase-env/hbase_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hbase-site/hbase.master.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hbase-site/hbase.security.authentication.spnego.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "hbase-site/hbase.security.authentication.spnego.kerberos.keytab"
+              }
+            },
+            {
+              "name" : "ranger_hbase_audit",
+              "reference": "/HBASE/HBASE_MASTER/hbase_master_hbase",
+              "principal": {
+                "configuration": "ranger-hbase-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-hbase-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "HBASE_REGIONSERVER",
+          "identities": [
+            {
+              "name": "hbase_regionserver_hbase",
+              "principal": {
+                "value": "hbase/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hbase-site/hbase.regionserver.kerberos.principal",
+                "local_username": "${hbase-env/hbase_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hbase.service.keytab",
+                "owner": {
+                  "name": "${hbase-env/hbase_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hbase-site/hbase.regionserver.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hbase-site/hbase.security.authentication.spnego.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "hbase-site/hbase.security.authentication.spnego.kerberos.keytab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "PHOENIX_QUERY_SERVER",
+          "identities": [
+            {
+              "name": "phoenix_spnego",
+              "reference": "/spnego",
+              "principal": {
+                "configuration": "hbase-site/phoenix.queryserver.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "hbase-site/phoenix.queryserver.keytab.file"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/metainfo.xml
new file mode 100644
index 0000000..ac57693
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/metainfo.xml
@@ -0,0 +1,232 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <displayName>HBase</displayName>
+      <comment>Non-relational distributed database and centralized service for configuration management &amp;
+        synchronization
+      </comment>
+      <!-- For Alpha-1 release, it will be version 1.1.2. -->
+      <version>2.0.0.3.0</version>
+      <components>
+        <component>
+          <name>HBASE_MASTER</name>
+          <displayName>HBase Master</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <timelineAppid>HBASE</timelineAppid>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HBASE/HBASE_MASTER</co-locate>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/hbase_master.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>hbase_master</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/hbase_master.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>HBASE_REGIONSERVER</name>
+          <displayName>RegionServer</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <decommissionAllowed>true</decommissionAllowed>
+          <timelineAppid>HBASE</timelineAppid>
+          <commandScript>
+            <script>scripts/hbase_regionserver.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <bulkCommands>
+            <displayName>RegionServers</displayName>
+            <!-- Used by decommission and recommission -->
+            <masterComponent>HBASE_MASTER</masterComponent>
+          </bulkCommands>
+          <logs>
+            <log>
+              <logId>hbase_regionserver</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+
+        <component>
+          <name>HBASE_CLIENT</name>
+          <displayName>HBase Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/hbase_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>hbase-site.xml</fileName>
+              <dictionaryName>hbase-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hbase-env.sh</fileName>
+              <dictionaryName>hbase-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>hbase-policy.xml</fileName>
+              <dictionaryName>hbase-policy</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>log4j.properties</fileName>
+              <dictionaryName>hbase-log4j</dictionaryName>
+            </configFile>            
+          </configFiles>
+        </component>
+
+        <component>
+          <name>PHOENIX_QUERY_SERVER</name>
+          <displayName>Phoenix Query Server</displayName>
+          <category>SLAVE</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/phoenix_queryserver.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>hbase_phoenix_server</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hbase</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+        <service>HDFS</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>core-site</config-type> <!-- hbase puts core-site in it's folder -->
+        <config-type>hbase-policy</config-type>
+        <config-type>hbase-site</config-type>
+        <config-type>hbase-env</config-type>
+        <config-type>hbase-log4j</config-type>
+        <config-type>ranger-hbase-plugin-properties</config-type>
+        <config-type>ranger-hbase-audit</config-type>
+        <config-type>ranger-hbase-policymgr-ssl</config-type>
+        <config-type>ranger-hbase-security</config-type>
+      </configuration-dependencies>
+
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat7,amazon2015,redhat6,suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>hbase_${stack_version}</name>
+            </package>
+            <package>
+              <name>phoenix_${stack_version}</name>
+              <condition>should_install_phoenix</condition>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+          <packages>
+            <package>
+              <name>hbase-${stack_version}</name>
+            </package>
+            <package>
+              <name>phoenix-${stack_version}</name>
+              <condition>should_install_phoenix</condition>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+    </service>
+  </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/metrics.json b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/metrics.json
new file mode 100644
index 0000000..f94f510
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/metrics.json
@@ -0,0 +1,4733 @@
+{
+  "HBASE_REGIONSERVER": {
+    "Component": [
+      {
+        "type": "jmx",
+        "metrics": {
+          "default": {
+            "metrics/hbase/regionserver/slowPutCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowPutCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/percentFilesLocal": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.percentFilesLocal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_min": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_min",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheFree": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheFreeSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/mutationsWithoutWALSize": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheMissCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheMissCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/flushQueueSize": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.flushQueueLength",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_99th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/ScanNext_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.ScanNext_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/Increment_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Increment_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/Append_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Append_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/ScanNext_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.ScanNext_95th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/Append_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Append_95th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/Increment_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Increment_95th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/updatesBlockedTime": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.updatesBlockedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/IPC/numActiveHandler": {
+              "metric": "Hadoop:service=HBase,name=IPC,sub=IPC.numActiveHandler",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/IPC/numCallsInGeneralQueue": {
+              "metric": "Hadoop:service=HBase,name=IPC,sub=IPC.numCallsInGeneralQueue",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/IPC/numOpenConnections": {
+              "metric": "Hadoop:service=HBase,name=IPC,sub=IPC.numOpenConnections",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowAppendCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowAppendCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheSize": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowIncrementCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowIncrementCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheEvictedCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheEvictionCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_median": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_median",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_mean": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_mean",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowGetCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowGetCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_75th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_min": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_min",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/storefileIndexSizeMB": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileIndexSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_median": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_median",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_max": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_max",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/totalStaticIndexSizeKB": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticIndexSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_mean": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_mean",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/requests": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.totalRequestCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/storefiles": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/mutationsWithoutWALCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_median": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_median",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowDeleteCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowDeleteCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_99th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_99th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/stores": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_min": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_min",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_max": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_max",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_mean": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_mean",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_75th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_max": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_max",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_75th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_75th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/totalStaticBloomSizeKB": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticBloomSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheHitCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheHitCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_99th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_99th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            }
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "jmx",
+        "metrics": {
+          "default": {
+            "metrics/hbase/regionserver/slowPutCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowPutCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/percentFilesLocal": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.percentFilesLocal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_min": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_min",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheFree": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheFreeSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/mutationsWithoutWALSize": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheMissCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheMissCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/flushQueueSize": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.flushQueueLength",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_99th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/ScanNext_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.ScanNext_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/Increment_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Increment_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/Append_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Append_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/ScanNext_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.ScanNext_95th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/Append_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Append_95th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/Increment_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Increment_95th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/updatesBlockedTime": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.updatesBlockedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/IPC/numActiveHandler": {
+              "metric": "Hadoop:service=HBase,name=IPC,sub=IPC.numActiveHandler",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/IPC/numCallsInGeneralQueue": {
+              "metric": "Hadoop:service=HBase,name=IPC,sub=IPC.numCallsInGeneralQueue",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/IPC/numOpenConnections": {
+              "metric": "Hadoop:service=HBase,name=IPC,sub=IPC.numOpenConnections",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowAppendCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowAppendCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheSize": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowIncrementCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowIncrementCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheEvictedCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheEvictionCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_median": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_median",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_mean": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_mean",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowGetCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowGetCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_75th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_min": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_min",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/storefileIndexSizeMB": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileIndexSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_median": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_median",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_max": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_max",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/totalStaticIndexSizeKB": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticIndexSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_mean": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_mean",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/requests": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.totalRequestCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/storefiles": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/mutationsWithoutWALCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_median": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_median",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowDeleteCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowDeleteCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_99th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_99th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/stores": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_min": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_min",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_max": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_max",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_mean": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_mean",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_75th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_max": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_max",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_75th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_75th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/totalStaticBloomSizeKB": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticBloomSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheHitCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheHitCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_99th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_99th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            }
+          }
+        }
+      }
+    ]
+  },
+  "HBASE_MASTER": {
+    "Component": [
+      {
+        "type": "jmx",
+        "metrics": {
+          "default": {
+            "metrics/rpc/regionServerReport.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memMaxM": {
+              "metric": "Hadoop:service=HBase,name=JvmMetrics.MemMaxM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/reportRSFatalError.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/closeRegionMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.closeRegionMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/isMasterRunningAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunningAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteTableNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTableNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/Revision": {
+              "metric": "hadoop:service=HBase,name=Info.revision",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/splitRegionAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.splitRegionAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHTableDescriptorsMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClusterStatus.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/splitRegionNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.splitRegionNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolVersionAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersionAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getBlockCacheColumnFamilySummariesMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyColumnMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumnMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClosestRowBeforeMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolSignatureNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcSlowResponseMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/AverageLoad": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.averageLoad",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openScannerNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openScannerNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteTable.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteColumn.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getCompactionStateMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getCompactionStateMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerReport.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopMaster.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyColumn.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHServerInfoAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHServerInfoAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rollHLogWriterMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.rollHLogWriterMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/offlineMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.offlineMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolSignatureMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/ServerName": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.tag.serverName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHServerInfoMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHServerInfoMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitSizeMaxTime": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitSize_max",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/execCoprocessorAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.execCoprocessorAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTimeMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClusterStatusMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatusMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/offline.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/ZookeeperQuorum": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.tag.zookeeperQuorum",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/hdfsDate": {
+              "metric": "hadoop:service=HBase,name=Info.hdfsDate",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/createTable.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/offlineMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.offlineMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/offline.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClosestRowBeforeNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyColumnNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumnNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getLastFlushTimeNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopMaster.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteTable.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/hdfsUrl": {
+              "metric": "hadoop:service=HBase,name=Info.hdfsUrl",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/multiMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.multiMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/revision": {
+              "metric": "hadoop:service=HBase,name=Info.revision",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyColumnMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumnMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/addColumnMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.addColumnMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHTableDescriptorsNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/multiNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.multiNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolVersion.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/offline.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteTable.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/enableTableMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.enableTableMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerReportMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReportMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/reportRSFatalErrorNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/addColumn.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unassign.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unassign.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/reportRSFatalErrorMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/disableTable.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/existsAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.existsAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/MasterActiveTime": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.masterActiveTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getBlockCacheColumnFamilySummariesNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/createTable.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.rpcAuthorizationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/hdfsUser": {
+              "metric": "hadoop:service=HBase,name=Info.hdfsUser",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerStartupAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartupAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerStartupNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartupNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteColumnNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumnNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/version": {
+              "metric": "hadoop:service=HBase,name=Info.version",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitTimeMaxTime": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitTime_max",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/synchronousBalanceSwitchNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopMasterNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopMasterNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitTimeNumOps": {
+              "metric": "hadoop:service=Master,name=MasterStatistics.splitTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/reportRSFatalErrorAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/replicateLogEntriesNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/multiMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.multiMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteColumnMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumnMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolSignature.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getLastFlushTimeMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/disableTable.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.NumOpenConnections",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerReportMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReportMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/IsActiveMaster": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.tag.isActiveMaster",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/bulkLoadHFilesMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/MasterStartTime": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.masterStartTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceSwitchMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitchMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unlockRowMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unlockRowMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/incrementNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.incrementNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/reportRSFatalError.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceSwitch.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/execCoprocessorMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.execCoprocessorMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/enableTable.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/putMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.putMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/flushRegionMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.flushRegionMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/nextNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.nextNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getOnlineRegionsAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/createTableAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.createTableAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyTable.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTable.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getAlterStatusAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatusAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/assign.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerStartup.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceSwitch.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/isMasterRunningMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunningMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/existsNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.existsNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/compactRegionMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.compactRegionMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/bulkLoadHFilesMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rollHLogWriterNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.rollHLogWriterNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unlockRowAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unlockRowAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openRegionsNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionsNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/checkAndDeleteMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndDeleteMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopMaster.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/splitRegionMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.splitRegionMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHTableDescriptorsMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteColumn.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/moveMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.moveMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/shutdown.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.shutdown.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/appendNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.appendNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/appendAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.appendAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClusterStatusNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatusNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcSlowResponseNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitSize_num_ops": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitSize_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/MasterActiveTime": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.masterActiveTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getLastFlushTimeMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/checkAndPutNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndPutNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitTime_avg_time": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitTime_mean",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/createTableMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.createTableMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/incrementMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.incrementMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getAlterStatus.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getCompactionStateMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getCompactionStateMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitTimeAvgTime": {
+              "metric": "hadoop:service=Master,name=MasterStatistics.splitTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getStoreFileListMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getStoreFileListMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolSignature.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTimeAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/incrementColumnValueNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.incrementColumnValueNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/enableTable.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/multiAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.multiAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/createTable.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/shutdownAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.shutdownAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openRegionMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getBlockCacheColumnFamilySummariesMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteColumn.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyColumn.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/disableTableNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.disableTableNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolVersion.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/replicateLogEntriesAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/cluster_requests": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.clusterRequests",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHServerInfoMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHServerInfoMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClusterStatusMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatusMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.rpcAuthenticationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/Coprocessors": {
+              "metric": "hadoop:service=Master,name=Master.Coprocessors",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unlockRowNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unlockRowNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getAlterStatus.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerStartup.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/incrementColumnValueMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.incrementColumnValueMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyColumn.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolVersionMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersionMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/RegionsInTransition": {
+              "metric": "hadoop:service=Master,name=Master.RegionsInTransition",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/master/AssignmentManger/ritCount": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=AssignmentManger.ritCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceSwitchAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitchAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getAlterStatusMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatusMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unassignMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unassignMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/nextAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.nextAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rollHLogWriterMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.rollHLogWriterMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getAlterStatus.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/hdfsVersion": {
+              "metric": "hadoop:service=HBase,name=Info.hdfsVersion",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unassignMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unassignMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcSlowResponseAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/assignNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.assignNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getLastFlushTimeAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClusterStatusAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatusAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/mutateRowNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.mutateRowNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClosestRowBeforeMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerReport.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClusterStatus.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceSwitchNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitchNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/RegionServers": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.numRegionServers",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/bulkLoadHFilesAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/compactRegionAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.compactRegionAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/disableTableMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.disableTableMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/closeMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.closeMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/enableTableNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.enableTableNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openScannerMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openScannerMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/moveMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.moveMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/checkAndPutMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndPutMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteTableAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTableAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/assign.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/ClusterId": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.tag.clusterId",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolVersionMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersionMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerStartupMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartupMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/createTableNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.createTableNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTimeMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getAlterStatus.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/addColumnNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.addColumnNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitSizeNumOps": {
+              "metric": "hadoop:service=Master,name=MasterStatistics.splitSizeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unassignNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unassignNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balance.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/closeRegionNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.closeRegionNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolSignature.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/synchronousBalanceSwitchAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/appendMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.appendMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unlockRowMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unlockRowMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getStoreFileListMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getStoreFileListMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/moveAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.moveAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/mutateRowMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.mutateRowMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHTableDescriptors.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptors.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getCompactionStateAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getCompactionStateAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/closeRegionMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.closeRegionMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/reportRSFatalError.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcSlowResponseMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceSwitchMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitchMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/lockRowMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.lockRowMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openScannerMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openScannerMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getStoreFileListAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getStoreFileListAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openRegionsAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionsAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/disableTableMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.disableTableMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getOnlineRegionsNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteColumnAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumnAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/putMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.putMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/replicationCallQueueLen": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.replicationCallQueueLen",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyTableMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTableMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/reportRSFatalErrorMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/flushRegionAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.flushRegionAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteColumn.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/offline.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitTimeMinTime": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitTime_min",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/createTable.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/lockRowAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.lockRowAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/lockRowMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.lockRowMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClusterStatus.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/addColumn.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/isMasterRunningNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunningNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/closeAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.closeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/addColumn.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/closeMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.closeMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/replicateLogEntriesMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openRegionsMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionsMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balance.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/replicateLogEntriesMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/execCoprocessorMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.execCoprocessorMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitTime_num_ops": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitTime_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/RegionsInTransition": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.ritCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyTableAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTableAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/assignMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.assignMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/isMasterRunning.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunning.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerReport.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.rpcAuthenticationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/enableTable.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/execCoprocessorNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.execCoprocessorNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/move.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.move.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/shutdownMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.shutdownMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/assign.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerReportNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReportNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/addColumnAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.addColumnAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/isMasterRunning.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunning.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolVersion.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyTable.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTable.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/HeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rollHLogWriterAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.rollHLogWriterAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getRegionInfoNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getRegionInfoNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.ReceivedBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/move.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.move.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolSignatureAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/NonHeapMemoryMax": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyTableMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTableMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/incrementColumnValueAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.incrementColumnValueAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyTableNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTableNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/disableTable.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClusterStatus.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyColumnAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumnAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/reportRSFatalError.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/DeadRegionServers": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.numDeadRegionServers",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/closeNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.closeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unassign.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unassign.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balance.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/nextMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.nextMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/AverageLoad": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.averageLoad",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/MasterStartTime": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.masterStartTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/appendMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.appendMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/priorityCallQueueLen": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.priorityCallQueueLen",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getAlterStatusMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatusMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/bulkLoadHFilesNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/callQueueLen": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.callQueueLen",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/synchronousBalanceSwitchMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/flushRegionNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.flushRegionNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerStartup.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unassignAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unassignAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolVersionNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersionNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/shutdown.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.shutdown.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/checkAndPutMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndPutMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/assignAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.assignAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/checkAndDeleteMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndDeleteMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/shutdownNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.shutdownNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getAlterStatusNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatusNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/disableTable.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openRegionNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyColumn.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getCompactionStateNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getCompactionStateNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHTableDescriptorsAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openRegionAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopMaster.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getRegionInfoMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getRegionInfoMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/putNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.putNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/hdfsRevision": {
+              "metric": "hadoop:service=HBase,name=Info.hdfsRevision",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/url": {
+              "metric": "hadoop:service=HBase,name=Info.url",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolSignature.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openRegionsMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionsMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/compactRegionNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.compactRegionNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/nextMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.nextMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getOnlineRegionsMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/checkAndDeleteNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndDeleteNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/isMasterRunning.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunning.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteTable.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unassign.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unassign.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getRegionInfoAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getRegionInfoAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balance.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHServerInfoNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHServerInfoNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/shutdown.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.shutdown.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/NonHeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteColumnMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumnMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getStoreFileListNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getStoreFileListNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteTableMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTableMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/addColumn.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/synchronousBalanceSwitchMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/isMasterRunningMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunningMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/closeRegionAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.closeRegionAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/disableTableAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.disableTableAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/assign.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/offlineAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.offlineAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/HeapMemoryMax": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/moveNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.moveNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/incrementColumnValueMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.incrementColumnValueMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitSize_avg_time": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitSize_mean",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/checkAndDeleteAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndDeleteAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/mutateRowAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.mutateRowAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/existsMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.existsMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceSwitch.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyTable.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTable.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/date": {
+              "metric": "hadoop:service=HBase,name=Info.date",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/incrementAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.incrementAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/flushRegionMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.flushRegionMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getOnlineRegionsMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/user": {
+              "metric": "java.lang:type=Runtime.SystemProperties.user.name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHTableDescriptors.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptors.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClosestRowBeforeAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/offlineNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.offlineNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.SentBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/incrementMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.incrementMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteTableMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTableMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/checkAndPutAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndPutAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openScannerAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openScannerAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopMasterAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopMasterAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/assignMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.assignMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/compactRegionMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.compactRegionMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openRegionMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/addColumnMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.addColumnMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTimeNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/existsMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.existsMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/enableTableMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.enableTableMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitSizeMinTime": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitSize_min",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/shutdown.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.shutdown.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/enableTableAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.enableTableAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerStartup.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/lockRowNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.lockRowNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopMasterMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopMasterMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolVersion.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceSwitch.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/move.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.move.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolSignatureMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyTable.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTable.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/splitRegionMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.splitRegionMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/mutateRowMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.mutateRowMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitSizeAvgTime": {
+              "metric": "hadoop:service=Master,name=MasterStatistics.splitSizeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerReportAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReportAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/putAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.putAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/shutdownMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.shutdownMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getBlockCacheColumnFamilySummariesAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHTableDescriptors.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptors.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerStartupMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartupMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/createTableMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.createTableMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHTableDescriptors.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptors.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.rpcAuthorizationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/isMasterRunning.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunning.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/enableTable.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/Version": {
+              "metric": "hadoop:service=HBase,name=Info.version",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getRegionInfoMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getRegionInfoMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unassign.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unassign.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopMasterMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopMasterMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/move.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.move.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            }
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "jmx",
+        "metrics": {
+          "default": {
+            "metrics/rpc/regionServerReport.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memMaxM": {
+              "metric": "Hadoop:service=HBase,name=JvmMetrics.MemMaxM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/reportRSFatalError.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/closeRegionMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.closeRegionMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/isMasterRunningAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunningAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteTableNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTableNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/splitRegionAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.splitRegionAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHTableDescriptorsMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClusterStatus.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/splitRegionNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.splitRegionNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolVersionAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersionAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyColumnMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumnMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getBlockCacheColumnFamilySummariesMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClosestRowBeforeMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolSignatureNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcSlowResponseMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/AverageLoad": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.averageLoad",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openScannerNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openScannerNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteTable.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteColumn.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getCompactionStateMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getCompactionStateMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopMaster.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerReport.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHServerInfoAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHServerInfoAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyColumn.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rollHLogWriterMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.rollHLogWriterMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/offlineMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.offlineMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolSignatureMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/ServerName": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.tag.serverName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHServerInfoMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHServerInfoMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitSizeMaxTime": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitSize_max",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/execCoprocessorAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.execCoprocessorAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTimeMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClusterStatusMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatusMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/offline.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/ZookeeperQuorum": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.tag.zookeeperQuorum",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/hdfsDate": {
+              "metric": "hadoop:service=HBase,name=Info.hdfsDate",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/createTable.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/offlineMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.offlineMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/offline.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClosestRowBeforeNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyColumnNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumnNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getLastFlushTimeNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopMaster.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteTable.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/hdfsUrl": {
+              "metric": "hadoop:service=HBase,name=Info.hdfsUrl",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/NonHeapMemoryMax": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/multiMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.multiMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/revision": {
+              "metric": "hadoop:service=HBase,name=Info.revision",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyColumnMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumnMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/addColumnMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.addColumnMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHTableDescriptorsNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/multiNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.multiNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolVersion.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/offline.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteTable.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/enableTableMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.enableTableMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerReportMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReportMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/reportRSFatalErrorNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/addColumn.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unassign.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unassign.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/reportRSFatalErrorMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/disableTable.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/existsAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.existsAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/MasterActiveTime": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.masterActiveTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/master/AssignmentManger/ritCount": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=AssignmentManger.ritCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getBlockCacheColumnFamilySummariesNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/createTable.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.rpcAuthorizationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/hdfsUser": {
+              "metric": "hadoop:service=HBase,name=Info.hdfsUser",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerStartupAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartupAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerStartupNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartupNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteColumnNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumnNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/version": {
+              "metric": "hadoop:service=HBase,name=Info.version",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitTimeMaxTime": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitTime_max",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/synchronousBalanceSwitchNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopMasterNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopMasterNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitTimeNumOps": {
+              "metric": "hadoop:service=Master,name=MasterStatistics.splitTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/reportRSFatalErrorAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/replicateLogEntriesNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/multiMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.multiMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteColumnMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumnMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolSignature.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getLastFlushTimeMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/disableTable.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.NumOpenConnections",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerReportMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReportMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/IsActiveMaster": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.tag.isActiveMaster",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/bulkLoadHFilesMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/MasterStartTime": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.masterStartTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceSwitchMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitchMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unlockRowMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unlockRowMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/incrementNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.incrementNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/reportRSFatalError.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceSwitch.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/execCoprocessorMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.execCoprocessorMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/enableTable.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/putMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.putMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/flushRegionMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.flushRegionMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/nextNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.nextNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getOnlineRegionsAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/createTableAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.createTableAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyTable.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTable.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getAlterStatusAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatusAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/assign.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerStartup.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceSwitch.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/isMasterRunningMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunningMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/existsNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.existsNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/compactRegionMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.compactRegionMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/bulkLoadHFilesMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rollHLogWriterNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.rollHLogWriterNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unlockRowAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unlockRowAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openRegionsNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionsNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/checkAndDeleteMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndDeleteMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopMaster.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/splitRegionMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.splitRegionMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHTableDescriptorsMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteColumn.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/moveMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.moveMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/shutdown.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.shutdown.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/appendNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.appendNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/appendAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.appendAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClusterStatusNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatusNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcSlowResponseNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitSize_num_ops": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitSize_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getLastFlushTimeMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/checkAndPutNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndPutNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitTime_avg_time": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitTime_mean",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/createTableMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.createTableMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/incrementMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.incrementMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getAlterStatus.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getCompactionStateMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getCompactionStateMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitTimeAvgTime": {
+              "metric": "hadoop:service=Master,name=MasterStatistics.splitTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getStoreFileListMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getStoreFileListMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolSignature.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTimeAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/incrementColumnValueNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.incrementColumnValueNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/enableTable.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/multiAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.multiAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/createTable.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/shutdownAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.shutdownAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openRegionMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getBlockCacheColumnFamilySummariesMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteColumn.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyColumn.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/disableTableNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.disableTableNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolVersion.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/replicateLogEntriesAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/cluster_requests": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.clusterRequests",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHServerInfoMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHServerInfoMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClusterStatusMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatusMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.rpcAuthenticationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/Coprocessors": {
+              "metric": "hadoop:service=Master,name=Master.Coprocessors",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unlockRowNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unlockRowNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getAlterStatus.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerStartup.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyColumn.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/incrementColumnValueMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.incrementColumnValueMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolVersionMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersionMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/RegionsInTransition": {
+              "metric": "hadoop:service=Master,name=Master.RegionsInTransition",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceSwitchAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitchAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getAlterStatusMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatusMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unassignMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unassignMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/nextAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.nextAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rollHLogWriterMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.rollHLogWriterMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getAlterStatus.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/hdfsVersion": {
+              "metric": "hadoop:service=HBase,name=Info.hdfsVersion",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unassignMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unassignMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcSlowResponseAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/assignNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.assignNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getLastFlushTimeAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClusterStatusAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatusAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/mutateRowNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.mutateRowNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClosestRowBeforeMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerReport.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClusterStatus.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceSwitchNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitchNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/RegionServers": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.numRegionServers",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/bulkLoadHFilesAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/compactRegionAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.compactRegionAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/disableTableMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.disableTableMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/closeMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.closeMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/enableTableNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.enableTableNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openScannerMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openScannerMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/moveMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.moveMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/checkAndPutMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndPutMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteTableAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTableAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/assign.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/ClusterId": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.tag.clusterId",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolVersionMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersionMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerStartupMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartupMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/createTableNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.createTableNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTimeMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getAlterStatus.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitSizeNumOps": {
+              "metric": "hadoop:service=Master,name=MasterStatistics.splitSizeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/addColumnNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.addColumnNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unassignNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unassignNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balance.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/closeRegionNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.closeRegionNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolSignature.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/synchronousBalanceSwitchAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/appendMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.appendMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unlockRowMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unlockRowMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getStoreFileListMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getStoreFileListMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/moveAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.moveAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/mutateRowMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.mutateRowMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHTableDescriptors.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptors.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getCompactionStateAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getCompactionStateAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/closeRegionMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.closeRegionMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/reportRSFatalError.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/HeapMemoryMax": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcSlowResponseMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceSwitchMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitchMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/HeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/lockRowMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.lockRowMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openScannerMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openScannerMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getStoreFileListAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getStoreFileListAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openRegionsAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionsAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/disableTableMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.disableTableMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getOnlineRegionsNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteColumnAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumnAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/replicationCallQueueLen": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.replicationCallQueueLen",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/putMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.putMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyTableMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTableMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/reportRSFatalErrorMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/flushRegionAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.flushRegionAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteColumn.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/offline.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitTimeMinTime": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitTime_min",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/createTable.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/lockRowAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.lockRowAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/lockRowMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.lockRowMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClusterStatus.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/addColumn.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/isMasterRunningNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunningNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/closeAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.closeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/addColumn.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/closeMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.closeMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/replicateLogEntriesMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openRegionsMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionsMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balance.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/replicateLogEntriesMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/execCoprocessorMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.execCoprocessorMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitTime_num_ops": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitTime_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyTableAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTableAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/assignMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.assignMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/isMasterRunning.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunning.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerReport.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.rpcAuthenticationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/enableTable.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/execCoprocessorNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.execCoprocessorNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/NonHeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/move.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.move.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/shutdownMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.shutdownMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/assign.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerReportNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReportNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/addColumnAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.addColumnAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/isMasterRunning.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunning.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolVersion.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyTable.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTable.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rollHLogWriterAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.rollHLogWriterAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getRegionInfoNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getRegionInfoNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.ReceivedBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/move.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.move.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolSignatureAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyTableMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTableMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/incrementColumnValueAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.incrementColumnValueAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyTableNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTableNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/disableTable.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClusterStatus.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyColumnAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumnAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/reportRSFatalError.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/DeadRegionServers": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.numDeadRegionServers",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/closeNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.closeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unassign.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unassign.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balance.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/nextMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.nextMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/appendMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.appendMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/priorityCallQueueLen": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.priorityCallQueueLen",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getAlterStatusMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatusMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/bulkLoadHFilesNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/callQueueLen": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.callQueueLen",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/synchronousBalanceSwitchMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/flushRegionNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.flushRegionNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerStartup.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unassignAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unassignAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolVersionNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersionNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/shutdown.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.shutdown.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/checkAndPutMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndPutMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/assignAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.assignAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/checkAndDeleteMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndDeleteMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/shutdownNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.shutdownNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getAlterStatusNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getAlterStatusNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/disableTable.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openRegionNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyColumn.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getCompactionStateNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getCompactionStateNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHTableDescriptorsAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openRegionAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopMaster.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getRegionInfoMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getRegionInfoMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/putNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.putNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/hdfsRevision": {
+              "metric": "hadoop:service=HBase,name=Info.hdfsRevision",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/url": {
+              "metric": "hadoop:service=HBase,name=Info.url",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolSignature.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openRegionsMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionsMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/compactRegionNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.compactRegionNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/nextMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.nextMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getOnlineRegionsMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/checkAndDeleteNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndDeleteNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/isMasterRunning.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunning.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteTable.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unassign.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unassign.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getRegionInfoAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getRegionInfoAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balance.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHServerInfoNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHServerInfoNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/shutdown.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.shutdown.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteColumnMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteColumnMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getStoreFileListNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getStoreFileListNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteTableMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTableMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/addColumn.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/synchronousBalanceSwitchMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/isMasterRunningMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunningMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/closeRegionAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.closeRegionAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/disableTableAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.disableTableAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/assign.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/offlineAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.offlineAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/moveNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.moveNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/incrementColumnValueMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.incrementColumnValueMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitSize_avg_time": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitSize_mean",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/load/AverageLoad": {
+              "metric": "hadoop:service=Master,name=Master.AverageLoad",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/checkAndDeleteAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndDeleteAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/mutateRowAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.mutateRowAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/existsMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.existsMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceSwitch.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyTable.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTable.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/date": {
+              "metric": "hadoop:service=HBase,name=Info.date",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/incrementAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.incrementAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/flushRegionMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.flushRegionMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getOnlineRegionsMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/user": {
+              "metric": "java.lang:type=Runtime.SystemProperties.user.name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHTableDescriptors.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptors.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getClosestRowBeforeAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/offlineNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.offlineNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.SentBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/incrementMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.incrementMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/deleteTableMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.deleteTableMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/checkAndPutAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.checkAndPutAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openScannerAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openScannerAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopMasterAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopMasterAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/assignMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.assignMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/compactRegionMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.compactRegionMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/openRegionMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.openRegionMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/addColumnMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.addColumnMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTimeNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/existsMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.existsMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/enableTableMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.enableTableMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitSizeMinTime": {
+              "metric": "Hadoop:service=HBase,name=Master,sub=Server.HlogSplitSize_min",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/shutdown.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.shutdown.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/enableTableAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.enableTableAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerStartup.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/lockRowNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.lockRowNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolVersion.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopMasterMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopMasterMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/balanceSwitch.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/move.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.move.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getProtocolSignatureMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/modifyTable.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.modifyTable.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/splitRegionMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.splitRegionMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/mutateRowMinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.mutateRowMinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/master/splitSizeAvgTime": {
+              "metric": "hadoop:service=Master,name=MasterStatistics.splitSizeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerReportAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerReportAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/putAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.putAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getNumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/shutdownMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.shutdownMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getBlockCacheColumnFamilySummariesAvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHTableDescriptors.aboveOneSec.MaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptors.aboveOneSec.MaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/regionServerStartupMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.regionServerStartupMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/createTableMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.createTableMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getHTableDescriptors.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getHTableDescriptors.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.rpcAuthorizationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/isMasterRunning.aboveOneSec.NumOps": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.isMasterRunning.aboveOneSec.NumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/enableTable.aboveOneSec.AvgTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.AvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/getRegionInfoMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.getRegionInfoMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/unassign.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.unassign.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/stopMasterMaxTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.stopMasterMaxTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/move.aboveOneSec.MinTime": {
+              "metric": "hadoop:service=HBase,name=RPCStatistics.move.aboveOneSec.MinTime",
+              "pointInTime": true,
+              "temporal": false
+            }
+          }
+        }
+      }
+    ]
+  }
+}
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/draining_servers.rb b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/draining_servers.rb
new file mode 100644
index 0000000..5bcb5b6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/draining_servers.rb
@@ -0,0 +1,164 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Add or remove servers from draining mode via zookeeper 
+
+require 'optparse'
+include Java
+
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.client.HBaseAdmin
+import org.apache.hadoop.hbase.zookeeper.ZKUtil
+import org.apache.commons.logging.Log
+import org.apache.commons.logging.LogFactory
+
+# Name of this script
+NAME = "draining_servers"
+
+# Do command-line parsing
+options = {}
+optparse = OptionParser.new do |opts|
+  opts.banner = "Usage: ./hbase org.jruby.Main #{NAME}.rb [options] add|remove|list <hostname>|<host:port>|<servername> ..."
+  opts.separator 'Add remove or list servers in draining mode. Can accept either hostname to drain all region servers' +
+                 'in that host, a host:port pair or a host,port,startCode triplet. More than one server can be given separated by space'
+  opts.on('-h', '--help', 'Display usage information') do
+    puts opts
+    exit
+  end
+  options[:debug] = false
+  opts.on('-d', '--debug', 'Display extra debug logging') do
+    options[:debug] = true
+  end
+end
+optparse.parse!
+
+# Return array of servernames where servername is hostname+port+startcode
+# comma-delimited
+def getServers(admin)
+  serverInfos = admin.getClusterStatus().getServerInfo()
+  servers = []
+  for server in serverInfos
+    servers << server.getServerName()
+  end
+  return servers
+end
+
+def getServerNames(hostOrServers, config)
+  ret = []
+  
+  for hostOrServer in hostOrServers
+    # check whether it is already serverName. No need to connect to cluster
+    parts = hostOrServer.split(',')
+    if parts.size() == 3
+      ret << hostOrServer
+    else 
+      admin = HBaseAdmin.new(config) if not admin
+      servers = getServers(admin)
+
+      hostOrServer = hostOrServer.gsub(/:/, ",")
+      for server in servers 
+        ret << server if server.start_with?(hostOrServer)
+      end
+    end
+  end
+  
+  admin.close() if admin
+  return ret
+end
+
+def addServers(options, hostOrServers)
+  config = HBaseConfiguration.create()
+  servers = getServerNames(hostOrServers, config)
+  
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+  
+  begin
+    for server in servers
+      node = ZKUtil.joinZNode(parentZnode, server)
+      ZKUtil.createAndFailSilent(zkw, node)
+    end
+  ensure
+    zkw.close()
+  end
+end
+
+def removeServers(options, hostOrServers)
+  config = HBaseConfiguration.create()
+  servers = getServerNames(hostOrServers, config)
+  
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+  
+  begin
+    for server in servers
+      node = ZKUtil.joinZNode(parentZnode, server)
+      ZKUtil.deleteNodeFailSilent(zkw, node)
+    end
+  ensure
+    zkw.close()
+  end
+end
+
+# list servers in draining mode
+def listServers(options)
+  config = HBaseConfiguration.create()
+  
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+
+  servers = ZKUtil.listChildrenNoWatch(zkw, parentZnode)
+  servers.each {|server| puts server}
+end
+
+hostOrServers = ARGV[1..ARGV.size()]
+
+# Create a logger and disable the DEBUG-level annoying client logging
+def configureLogging(options)
+  apacheLogger = LogFactory.getLog(NAME)
+  # Configure log4j to not spew so much
+  unless (options[:debug]) 
+    logger = org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase")
+    logger.setLevel(org.apache.log4j.Level::WARN)
+    logger = org.apache.log4j.Logger.getLogger("org.apache.zookeeper")
+    logger.setLevel(org.apache.log4j.Level::WARN)
+  end
+  return apacheLogger
+end
+
+# Create a logger and save it to ruby global
+$LOG = configureLogging(options)
+case ARGV[0]
+  when 'add'
+    if ARGV.length < 2
+      puts optparse
+      exit 1
+    end
+    addServers(options, hostOrServers)
+  when 'remove'
+    if ARGV.length < 2
+      puts optparse
+      exit 1
+    end
+    removeServers(options, hostOrServers)
+  when 'list'
+    listServers(options)
+  else
+    puts optparse
+    exit 3
+end
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/hbase-smoke-cleanup.sh b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/hbase-smoke-cleanup.sh
new file mode 100644
index 0000000..cde19e4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/hbase-smoke-cleanup.sh
@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+disable 'ambarismoketest'
+drop 'ambarismoketest'
+exit
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/hbaseSmokeVerify.sh b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/hbaseSmokeVerify.sh
new file mode 100644
index 0000000..8b085e8
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/hbaseSmokeVerify.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+conf_dir=$1
+data=$2
+hbase_cmd=$3
+echo "scan 'ambarismoketest'" | $hbase_cmd --config $conf_dir shell > /tmp/hbase_chk_verify
+cat /tmp/hbase_chk_verify
+echo "Looking for $data"
+tr -d '\n|\t| ' < /tmp/hbase_chk_verify | grep -q $data
+if [ "$?" -ne 0 ]
+then
+  exit 1
+fi
+
+grep -q '1 row(s)' /tmp/hbase_chk_verify
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/__init__.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/__init__.py
new file mode 100644
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/functions.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/functions.py
new file mode 100644
index 0000000..f98b9b9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/functions.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import re
+import math
+import datetime
+
+from resource_management.core.shell import checked_call
+
+def calc_xmn_from_xms(heapsize_str, xmn_percent, xmn_max):
+  """
+  @param heapsize_str: str (e.g '1000m')
+  @param xmn_percent: float (e.g 0.2)
+  @param xmn_max: integer (e.g 512)
+  """
+  heapsize = int(re.search('\d+',heapsize_str).group(0))
+  heapsize_unit = re.search('\D+',heapsize_str).group(0)
+  xmn_val = int(math.floor(heapsize*xmn_percent))
+  xmn_val -= xmn_val % 8
+  
+  result_xmn_val = xmn_max if xmn_val > xmn_max else xmn_val
+  return str(result_xmn_val) + heapsize_unit
+
+def ensure_unit_for_memory(memory_size):
+  memory_size_values = re.findall('\d+', str(memory_size))
+  memory_size_unit = re.findall('\D+', str(memory_size))
+
+  if len(memory_size_values) > 0:
+    unit = 'm'
+    if len(memory_size_unit) > 0:
+      unit = memory_size_unit[0]
+    if unit not in ['b', 'k', 'm', 'g', 't', 'p']:
+      raise Exception("Memory size unit error. %s - wrong unit" % unit)
+    return "%s%s" % (memory_size_values[0], unit)
+  else:
+    raise Exception('Memory size can not be calculated')
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase.py
new file mode 100644
index 0000000..fced4fc
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase.py
@@ -0,0 +1,230 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import sys
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.resources.template_config import TemplateConfig
+from resource_management.libraries.functions.format import format
+from resource_management.core.source import Template, InlineTemplate
+from resource_management.core.resources import Package
+from resource_management.core.resources.service import ServiceConfig
+from resource_management.core.resources.system import Directory, Execute, File
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hbase(name=None):
+  import params
+  XmlConfig("hbase-site.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['hbase-site'],
+            configuration_attributes=params.config['configuration_attributes']['hbase-site']
+  )
+
+  if params.service_map.has_key(name):
+    # Manually overriding service logon user & password set by the installation package
+    service_name = params.service_map[name]
+    ServiceConfig(service_name,
+                  action="change_user",
+                  username = params.hbase_user,
+                  password = Script.get_password(params.hbase_user))
+
+# name is 'master' or 'regionserver' or 'queryserver' or 'client'
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hbase(name=None):
+  import params
+
+  Directory( params.etc_prefix_dir,
+      mode=0755
+  )
+
+  Directory( params.hbase_conf_dir,
+      owner = params.hbase_user,
+      group = params.user_group,
+      create_parents = True
+  )
+   
+  Directory(params.java_io_tmpdir,
+      create_parents = True,
+      mode=0777
+  )
+
+  # If a file location is specified in ioengine parameter,
+  # ensure that directory exists. Otherwise create the
+  # directory with permissions assigned to hbase:hadoop.
+  ioengine_input = params.ioengine_param
+  if ioengine_input != None:
+    if ioengine_input.startswith("file:/"):
+      ioengine_fullpath = ioengine_input[5:]
+      ioengine_dir = os.path.dirname(ioengine_fullpath)
+      Directory(ioengine_dir,
+          owner = params.hbase_user,
+          group = params.user_group,
+          create_parents = True,
+          mode = 0755
+      )
+  
+  parent_dir = os.path.dirname(params.tmp_dir)
+  # In case if we have several placeholders in path
+  while ("${" in parent_dir):
+    parent_dir = os.path.dirname(parent_dir)
+  if parent_dir != os.path.abspath(os.sep) :
+    Directory (parent_dir,
+          create_parents = True,
+          cd_access="a",
+    )
+    Execute(("chmod", "1777", parent_dir), sudo=True)
+
+  XmlConfig( "hbase-site.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['hbase-site'],
+            configuration_attributes=params.config['configuration_attributes']['hbase-site'],
+            owner = params.hbase_user,
+            group = params.user_group
+  )
+
+  if check_stack_feature(StackFeature.PHOENIX_CORE_HDFS_SITE_REQUIRED, params.version_for_stack_feature_checks):
+    XmlConfig( "core-site.xml",
+               conf_dir = params.hbase_conf_dir,
+               configurations = params.config['configurations']['core-site'],
+               configuration_attributes=params.config['configuration_attributes']['core-site'],
+               owner = params.hbase_user,
+               group = params.user_group
+    )
+    if 'hdfs-site' in params.config['configurations']:
+      XmlConfig( "hdfs-site.xml",
+              conf_dir = params.hbase_conf_dir,
+              configurations = params.config['configurations']['hdfs-site'],
+              configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+              owner = params.hbase_user,
+              group = params.user_group
+      )
+  else:
+    File(format("{params.hbase_conf_dir}/hdfs-site.xml"),
+         action="delete"
+    )
+    File(format("{params.hbase_conf_dir}/core-site.xml"),
+         action="delete"
+    )
+
+  if 'hbase-policy' in params.config['configurations']:
+    XmlConfig( "hbase-policy.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['hbase-policy'],
+            configuration_attributes=params.config['configuration_attributes']['hbase-policy'],
+            owner = params.hbase_user,
+            group = params.user_group
+    )
+  # Manually overriding ownership of file installed by hadoop package
+  else: 
+    File( format("{params.hbase_conf_dir}/hbase-policy.xml"),
+      owner = params.hbase_user,
+      group = params.user_group
+    )
+
+  File(format("{hbase_conf_dir}/hbase-env.sh"),
+       owner = params.hbase_user,
+       content=InlineTemplate(params.hbase_env_sh_template),
+       group = params.user_group,
+  )
+  
+  # On some OS this folder could be not exists, so we will create it before pushing there files
+  Directory(params.limits_conf_dir,
+            create_parents = True,
+            owner='root',
+            group='root'
+            )
+  
+  File(os.path.join(params.limits_conf_dir, 'hbase.conf'),
+       owner='root',
+       group='root',
+       mode=0644,
+       content=Template("hbase.conf.j2")
+       )
+
+  hbase_TemplateConfig( 'regionservers')
+
+  if params.security_enabled:
+    hbase_TemplateConfig( format("hbase_{name}_jaas.conf"))
+  
+  if name != "client":
+    Directory( params.pid_dir,
+      owner = params.hbase_user,
+      create_parents = True,
+      cd_access = "a",
+      mode = 0755,
+    )
+  
+    Directory (params.log_dir,
+      owner = params.hbase_user,
+      create_parents = True,
+      cd_access = "a",
+      mode = 0755,
+    )
+
+  if (params.log4j_props != None):
+    File(format("{params.hbase_conf_dir}/log4j.properties"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hbase_user,
+         content=InlineTemplate(params.log4j_props)
+    )
+  elif (os.path.exists(format("{params.hbase_conf_dir}/log4j.properties"))):
+    File(format("{params.hbase_conf_dir}/log4j.properties"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.hbase_user
+    )
+  if name == "master":
+    params.HdfsResource(params.hbase_hdfs_root_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hbase_user
+    )
+    params.HdfsResource(params.hbase_staging_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hbase_user,
+                         mode=0711
+    )
+    if params.create_hbase_home_directory:
+      params.HdfsResource(params.hbase_home_directory,
+                          type="directory",
+                          action="create_on_execute",
+                          owner=params.hbase_user,
+                          mode=0755
+      )
+    params.HdfsResource(None, action="execute")
+
+  if params.phoenix_enabled:
+    Package(params.phoenix_package,
+            retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+            retry_count=params.agent_stack_retry_count)
+
+def hbase_TemplateConfig(name, tag=None):
+  import params
+
+  TemplateConfig( format("{hbase_conf_dir}/{name}"),
+      owner = params.hbase_user,
+      template_tag = tag
+  )
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_client.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_client.py
new file mode 100644
index 0000000..c8128ab
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_client.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from hbase import hbase
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+
+class HbaseClient(Script):
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hbase(name='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HbaseClientWindows(HbaseClient):
+  pass
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HbaseClientDefault(HbaseClient):
+  def get_component_name(self):
+    return "hbase-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version): 
+      conf_select.select(params.stack_name, "hbase", params.version)
+      stack_select.select("hbase-client", params.version)
+
+      # phoenix may not always be deployed
+      try:
+        stack_select.select("phoenix-client", params.version)
+      except Exception as e:
+        print "Ignoring error due to missing phoenix-client"
+        print str(e)
+
+
+      # set all of the hadoop clients since hbase client is upgraded as part
+      # of the final "CLIENTS" group and we need to ensure that hadoop-client
+      # is also set
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-client", params.version)
+
+
+if __name__ == "__main__":
+  HbaseClient().execute()
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_decommission.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_decommission.py
new file mode 100644
index 0000000..7358674
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_decommission.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.core.resources.system import Execute, File
+from resource_management.core.source import StaticFile
+from resource_management.libraries.functions.format import format
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hbase_decommission(env):
+  import params
+
+  env.set_params(params)
+  File(params.region_drainer, content=StaticFile("draining_servers.rb"), owner=params.hbase_user, mode="f")
+
+  hosts = params.hbase_excluded_hosts.split(",")
+  for host in hosts:
+    if host:
+      if params.hbase_drain_only == True:
+        regiondrainer_cmd = format("cmd /c {hbase_executable} org.jruby.Main {region_drainer} remove {host}")
+        Execute(regiondrainer_cmd, user=params.hbase_user, logoutput=True)
+      else:
+        regiondrainer_cmd = format("cmd /c {hbase_executable} org.jruby.Main {region_drainer} add {host}")
+        regionmover_cmd = format("cmd /c {hbase_executable} org.jruby.Main {region_mover} unload {host}")
+        Execute(regiondrainer_cmd, user=params.hbase_user, logoutput=True)
+        Execute(regionmover_cmd, user=params.hbase_user, logoutput=True)
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hbase_decommission(env):
+  import params
+
+  env.set_params(params)
+  kinit_cmd = params.kinit_cmd_master
+
+  File(params.region_drainer,
+       content=StaticFile("draining_servers.rb"),
+       mode=0755
+  )
+  
+  if params.hbase_excluded_hosts and params.hbase_excluded_hosts.split(","):
+    hosts = params.hbase_excluded_hosts.split(",")
+  elif params.hbase_included_hosts and params.hbase_included_hosts.split(","):
+    hosts = params.hbase_included_hosts.split(",")
+
+  if params.hbase_drain_only:
+    for host in hosts:
+      if host:
+        regiondrainer_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} {master_security_config} org.jruby.Main {region_drainer} remove {host}")
+        Execute(regiondrainer_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+        pass
+    pass
+
+  else:
+    for host in hosts:
+      if host:
+        regiondrainer_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} {master_security_config} org.jruby.Main {region_drainer} add {host}")
+        regionmover_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} {master_security_config} org.jruby.Main {region_mover} unload {host}")
+
+        Execute(regiondrainer_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+
+        Execute(regionmover_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+      pass
+    pass
+  pass
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_master.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_master.py
new file mode 100644
index 0000000..d2c8089
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_master.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from hbase import hbase
+from hbase_service import hbase_service
+from hbase_decommission import hbase_decommission
+import upgrade
+from setup_ranger_hbase import setup_ranger_hbase
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+
+class HbaseMaster(Script):
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hbase(name='master')
+
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)
+
+  def decommission(self, env):
+    import params
+    env.set_params(params)
+    hbase_decommission(env)
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HbaseMasterWindows(HbaseMaster):
+  def start(self, env):
+    import status_params
+    self.configure(env)
+    Service(status_params.hbase_master_win_service_name, action="start")
+
+  def stop(self, env):
+    import status_params
+    env.set_params(status_params)
+    Service(status_params.hbase_master_win_service_name, action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_windows_service_status(status_params.hbase_master_win_service_name)
+
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HbaseMasterDefault(HbaseMaster):
+  def get_component_name(self):
+    return "hbase-master"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    upgrade.prestart(env, "hbase-master")
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+    setup_ranger_hbase(upgrade_type=upgrade_type, service_name="hbase-master")
+    hbase_service('master', action = 'start')
+    
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    hbase_service('master', action = 'stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+
+    check_process_status(status_params.hbase_master_pid_file)
+
+  def security_status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"hbase.security.authentication" : "kerberos",
+                           "hbase.security.authorization": "true"}
+      props_empty_check = ['hbase.master.keytab.file',
+                           'hbase.master.kerberos.principal']
+      props_read_check = ['hbase.master.keytab.file']
+      hbase_site_expectations = build_expectations('hbase-site', props_value_check, props_empty_check,
+                                                  props_read_check)
+
+      hbase_expectations = {}
+      hbase_expectations.update(hbase_site_expectations)
+
+      security_params = get_params_from_filesystem(status_params.hbase_conf_dir,
+                                                   {'hbase-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, hbase_expectations)
+      if not result_issues:  # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'hbase-site' not in security_params
+               or 'hbase.master.keytab.file' not in security_params['hbase-site']
+               or 'hbase.master.kerberos.principal' not in security_params['hbase-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hbase_user,
+                                security_params['hbase-site']['hbase.master.keytab.file'],
+                                security_params['hbase-site']['hbase.master.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+      
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.hbase_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.hbase_master_pid_file]
+
+if __name__ == "__main__":
+  HbaseMaster().execute()
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_regionserver.py
new file mode 100644
index 0000000..226e7fd5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_regionserver.py
@@ -0,0 +1,174 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+
+from resource_management.core import shell
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+from hbase import hbase
+from hbase_service import hbase_service
+import upgrade
+from setup_ranger_hbase import setup_ranger_hbase
+
+
+class HbaseRegionServer(Script):
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hbase(name='regionserver')
+
+  def decommission(self, env):
+    print "Decommission not yet implemented!"
+
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HbaseRegionServerWindows(HbaseRegionServer):
+  def start(self, env):
+    import status_params
+    self.configure(env)
+    Service(status_params.hbase_regionserver_win_service_name, action="start")
+
+  def stop(self, env):
+    import status_params
+    env.set_params(status_params)
+    Service(status_params.hbase_regionserver_win_service_name, action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_windows_service_status(status_params.hbase_regionserver_win_service_name)
+
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HbaseRegionServerDefault(HbaseRegionServer):
+  def get_component_name(self):
+    return "hbase-regionserver"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    upgrade.prestart(env, "hbase-regionserver")
+
+  def post_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    upgrade.post_regionserver(env)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+    setup_ranger_hbase(upgrade_type=upgrade_type, service_name="hbase-regionserver")
+
+    hbase_service('regionserver', action='start')
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'regionserver',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+
+    check_process_status(status_params.regionserver_pid_file)
+
+  def security_status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"hbase.security.authentication" : "kerberos",
+                           "hbase.security.authorization": "true"}
+      props_empty_check = ['hbase.regionserver.keytab.file',
+                           'hbase.regionserver.kerberos.principal']
+      props_read_check = ['hbase.regionserver.keytab.file']
+      hbase_site_expectations = build_expectations('hbase-site', props_value_check, props_empty_check,
+                                                   props_read_check)
+
+      hbase_expectations = {}
+      hbase_expectations.update(hbase_site_expectations)
+
+      security_params = get_params_from_filesystem(status_params.hbase_conf_dir,
+                                                   {'hbase-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, hbase_expectations)
+      if not result_issues:  # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'hbase-site' not in security_params
+               or 'hbase.regionserver.keytab.file' not in security_params['hbase-site']
+               or 'hbase.regionserver.kerberos.principal' not in security_params['hbase-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hbase_user,
+                                security_params['hbase-site']['hbase.regionserver.keytab.file'],
+                                security_params['hbase-site']['hbase.regionserver.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.hbase_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.regionserver_pid_file]
+
+if __name__ == "__main__":
+  HbaseRegionServer().execute()
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_service.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_service.py
new file mode 100644
index 0000000..1dbd560
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_service.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.show_logs import show_logs
+from resource_management.core.shell import as_sudo
+from resource_management.core.resources.system import Execute, File
+
+def hbase_service(
+  name,
+  action = 'start'): # 'start' or 'stop' or 'status'
+    
+    import params
+  
+    role = name
+    cmd = format("{daemon_script} --config {hbase_conf_dir}")
+    pid_file = format("{pid_dir}/hbase-{hbase_user}-{role}.pid")
+    pid_expression = as_sudo(["cat", pid_file])
+    no_op_test = as_sudo(["test", "-f", pid_file]) + format(" && ps -p `{pid_expression}` >/dev/null 2>&1")
+    
+    if action == 'start':
+      daemon_cmd = format("{cmd} start {role}")
+      
+      try:
+        Execute ( daemon_cmd,
+          not_if = no_op_test,
+          user = params.hbase_user
+        )
+      except:
+        show_logs(params.log_dir, params.hbase_user)
+        raise
+    elif action == 'stop':
+      daemon_cmd = format("{cmd} stop {role}")
+
+      try:
+        Execute ( daemon_cmd,
+          user = params.hbase_user,
+          only_if = no_op_test,
+          # BUGFIX: hbase regionserver sometimes hangs when nn is in safemode
+          timeout = params.hbase_regionserver_shutdown_timeout,
+          on_timeout = format("! ( {no_op_test} ) || {sudo} -H -E kill -9 `{pid_expression}`"),
+        )
+      except:
+        show_logs(params.log_dir, params.hbase_user)
+        raise
+      
+      File(pid_file,
+           action = "delete",
+      )
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_upgrade.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_upgrade.py
new file mode 100644
index 0000000..e5bb781
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_upgrade.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.core.resources.system import Execute
+
+class HbaseMasterUpgrade(Script):
+
+  def take_snapshot(self, env):
+    import params
+
+    snap_cmd = "echo 'snapshot_all' | {0} shell".format(params.hbase_cmd)
+
+    exec_cmd = "{0} {1}".format(params.kinit_cmd, snap_cmd)
+
+    Execute(exec_cmd, user=params.hbase_user)
+
+  def restore_snapshot(self, env):
+    import params
+    print "TODO AMBARI-12698"
+
+if __name__ == "__main__":
+  HbaseMasterUpgrade().execute()
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params.py
new file mode 100644
index 0000000..e0607f3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from ambari_commons import OSCheck
+from resource_management.libraries.functions.default import default
+
+if OSCheck.is_windows_family():
+  from params_windows import *
+else:
+  from params_linux import *
+
+retryAble = default("/commandParams/command_retry_enabled", False)
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params_linux.py
new file mode 100644
index 0000000..b7e2b89
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params_linux.py
@@ -0,0 +1,426 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import status_params
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+
+from functions import calc_xmn_from_xms, ensure_unit_for_memory
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from ambari_commons.os_check import OSCheck
+from ambari_commons.str_utils import string_set_intersection
+
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import is_empty
+from resource_management.libraries.functions import get_unique_id_and_date
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.expect import expect
+from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames
+from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
+
+# server configurations
+config = Script.get_config()
+exec_tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
+
+stack_name = status_params.stack_name
+agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
+agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
+version = default("/commandParams/version", None)
+component_directory = status_params.component_directory
+etc_prefix_dir = "/etc/hbase"
+
+stack_version_unformatted = status_params.stack_version_unformatted
+stack_version_formatted = status_params.stack_version_formatted
+stack_root = status_params.stack_root
+
+# get the correct version to use for checking stack features
+version_for_stack_feature_checks = get_stack_feature_version(config)
+
+stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
+stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
+
+# hadoop default parameters
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
+region_mover = "/usr/lib/hbase/bin/region_mover.rb"
+region_drainer = "/usr/lib/hbase/bin/draining_servers.rb"
+hbase_cmd = "/usr/lib/hbase/bin/hbase"
+hbase_max_direct_memory_size = None
+
+# hadoop parameters for stacks supporting rolling_upgrade
+if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+  daemon_script = format('{stack_root}/current/hbase-client/bin/hbase-daemon.sh')
+  region_mover = format('{stack_root}/current/hbase-client/bin/region_mover.rb')
+  region_drainer = format('{stack_root}/current/hbase-client/bin/draining_servers.rb')
+  hbase_cmd = format('{stack_root}/current/hbase-client/bin/hbase')
+
+  hbase_max_direct_memory_size  = default('configurations/hbase-env/hbase_max_direct_memory_size', None)
+
+  daemon_script=format("{stack_root}/current/{component_directory}/bin/hbase-daemon.sh")
+  region_mover = format("{stack_root}/current/{component_directory}/bin/region_mover.rb")
+  region_drainer = format("{stack_root}/current/{component_directory}/bin/draining_servers.rb")
+  hbase_cmd = format("{stack_root}/current/{component_directory}/bin/hbase")
+
+
+hbase_conf_dir = status_params.hbase_conf_dir
+limits_conf_dir = status_params.limits_conf_dir
+
+hbase_user_nofile_limit = default("/configurations/hbase-env/hbase_user_nofile_limit", "32000")
+hbase_user_nproc_limit = default("/configurations/hbase-env/hbase_user_nproc_limit", "16000")
+
+# no symlink for phoenix-server at this point
+phx_daemon_script = format('{stack_root}/current/phoenix-server/bin/queryserver.py')
+
+hbase_excluded_hosts = config['commandParams']['excluded_hosts']
+hbase_drain_only = default("/commandParams/mark_draining_only",False)
+hbase_included_hosts = config['commandParams']['included_hosts']
+
+hbase_user = status_params.hbase_user
+hbase_principal_name = config['configurations']['hbase-env']['hbase_principal_name']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+_authentication = config['configurations']['core-site']['hadoop.security.authentication']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+# this is "hadoop-metrics.properties" for 1.x stacks
+metric_prop_file_name = "hadoop-metrics2-hbase.properties"
+
+# not supporting 32 bit jdk.
+java64_home = config['hostLevelParams']['java_home']
+java_version = expect("/hostLevelParams/java_version", int)
+
+log_dir = config['configurations']['hbase-env']['hbase_log_dir']
+java_io_tmpdir = default("/configurations/hbase-env/hbase_java_io_tmpdir", "/tmp")
+master_heapsize = ensure_unit_for_memory(config['configurations']['hbase-env']['hbase_master_heapsize'])
+
+regionserver_heapsize = ensure_unit_for_memory(config['configurations']['hbase-env']['hbase_regionserver_heapsize'])
+regionserver_xmn_max = config['configurations']['hbase-env']['hbase_regionserver_xmn_max']
+regionserver_xmn_percent = expect("/configurations/hbase-env/hbase_regionserver_xmn_ratio", float)
+regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, regionserver_xmn_percent, regionserver_xmn_max)
+
+hbase_regionserver_shutdown_timeout = expect('/configurations/hbase-env/hbase_regionserver_shutdown_timeout', int, 30)
+
+phoenix_hosts = default('/clusterHostInfo/phoenix_query_server_hosts', [])
+phoenix_enabled = default('/configurations/hbase-env/phoenix_sql_enabled', False)
+has_phoenix = len(phoenix_hosts) > 0
+
+underscored_version = stack_version_unformatted.replace('.', '_')
+dashed_version = stack_version_unformatted.replace('.', '-')
+if OSCheck.is_redhat_family() or OSCheck.is_suse_family():
+  phoenix_package = format("phoenix_{underscored_version}_*")
+elif OSCheck.is_ubuntu_family():
+  phoenix_package = format("phoenix-{dashed_version}-.*")
+
+pid_dir = status_params.pid_dir
+tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
+local_dir = config['configurations']['hbase-site']['hbase.local.dir']
+ioengine_param = default('/configurations/hbase-site/hbase.bucketcache.ioengine', None)
+
+client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
+master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
+regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
+queryserver_jaas_config_file = format("{hbase_conf_dir}/hbase_queryserver_jaas.conf")
+
+ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+has_metric_collector = not len(ams_collector_hosts) == 0
+if has_metric_collector:
+  if 'cluster-env' in config['configurations'] and \
+      'metrics_collector_vip_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
+  else:
+    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
+    if metric_collector_web_address.find(':') != -1:
+      metric_collector_port = metric_collector_web_address.split(':')[1]
+    else:
+      metric_collector_port = '6188'
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
+  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+
+  pass
+metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
+metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
+
+# if hbase is selected the hbase_rs_hosts, should not be empty, but still default just in case
+if 'slave_hosts' in config['clusterHostInfo']:
+  rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/slave_hosts') #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
+else:
+  rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/all_hosts') 
+
+smoke_test_user = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal =  config['configurations']['cluster-env']['smokeuser_principal_name']
+smokeuser_permissions = "RWXCA"
+service_check_data = get_unique_id_and_date()
+user_group = config['configurations']['cluster-env']["user_group"]
+
+if security_enabled:
+  _hostname_lowercase = config['hostname'].lower()
+  master_jaas_princ = config['configurations']['hbase-site']['hbase.master.kerberos.principal'].replace('_HOST',_hostname_lowercase)
+  master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
+  regionserver_jaas_princ = config['configurations']['hbase-site']['hbase.regionserver.kerberos.principal'].replace('_HOST',_hostname_lowercase)
+  _queryserver_jaas_princ = config['configurations']['hbase-site']['phoenix.queryserver.kerberos.principal']
+  if not is_empty(_queryserver_jaas_princ):
+    queryserver_jaas_princ =_queryserver_jaas_princ.replace('_HOST',_hostname_lowercase)
+
+regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
+queryserver_keytab_path = config['configurations']['hbase-site']['phoenix.queryserver.keytab.file']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+hbase_user_keytab = config['configurations']['hbase-env']['hbase_user_keytab']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+if security_enabled:
+  kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_principal_name};")
+  kinit_cmd_master = format("{kinit_path_local} -kt {master_keytab_path} {master_jaas_princ};")
+  master_security_config = format("-Djava.security.auth.login.config={hbase_conf_dir}/hbase_master_jaas.conf")
+else:
+  kinit_cmd = ""
+  kinit_cmd_master = ""
+  master_security_config = ""
+
+#log4j.properties
+# HBase log4j settings
+hbase_log_maxfilesize = default('configurations/hbase-log4j/hbase_log_maxfilesize',256)
+hbase_log_maxbackupindex = default('configurations/hbase-log4j/hbase_log_maxbackupindex',20)
+hbase_security_log_maxfilesize = default('configurations/hbase-log4j/hbase_security_log_maxfilesize',256)
+hbase_security_log_maxbackupindex = default('configurations/hbase-log4j/hbase_security_log_maxbackupindex',20)
+
+if (('hbase-log4j' in config['configurations']) and ('content' in config['configurations']['hbase-log4j'])):
+  log4j_props = config['configurations']['hbase-log4j']['content']
+else:
+  log4j_props = None
+  
+hbase_env_sh_template = config['configurations']['hbase-env']['content']
+
+hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']
+hbase_staging_dir = "/apps/hbase/staging"
+#for create_hdfs_directory
+hostname = config["hostname"]
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+dfs_type = default("/commandParams/dfs_type", "")
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
+)
+
+zookeeper_znode_parent = config['configurations']['hbase-site']['zookeeper.znode.parent']
+hbase_zookeeper_quorum = config['configurations']['hbase-site']['hbase.zookeeper.quorum']
+hbase_zookeeper_property_clientPort = config['configurations']['hbase-site']['hbase.zookeeper.property.clientPort']
+hbase_security_authentication = config['configurations']['hbase-site']['hbase.security.authentication']
+hadoop_security_authentication = config['configurations']['core-site']['hadoop.security.authentication']
+
+# ranger hbase plugin section start
+
+# to get db connector jar
+jdk_location = config['hostLevelParams']['jdk_location']
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+
+# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env introduced, using stack feature
+xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
+
+# ambari-server hostname
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+# ranger hbase plugin enabled property
+enable_ranger_hbase = default("/configurations/ranger-hbase-plugin-properties/ranger-hbase-plugin-enabled", "No")
+enable_ranger_hbase = True if enable_ranger_hbase.lower() == 'yes' else False
+
+# ranger hbase properties
+if enable_ranger_hbase:
+  # get ranger policy url
+  policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
+  if xml_configurations_supported:
+    policymgr_mgr_url = config['configurations']['ranger-hbase-security']['ranger.plugin.hbase.policy.rest.url']
+
+  if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
+    policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
+
+  # ranger audit db user
+  xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
+
+  # ranger hbase service/repository name
+  repo_name = str(config['clusterName']) + '_hbase'
+  repo_name_value = config['configurations']['ranger-hbase-security']['ranger.plugin.hbase.service.name']
+  if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
+    repo_name = repo_name_value
+
+  common_name_for_certificate = config['configurations']['ranger-hbase-plugin-properties']['common.name.for.certificate']
+  repo_config_username = config['configurations']['ranger-hbase-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+  ranger_plugin_properties = config['configurations']['ranger-hbase-plugin-properties']
+  policy_user = config['configurations']['ranger-hbase-plugin-properties']['policy_user']
+  repo_config_password = config['configurations']['ranger-hbase-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
+
+  # ranger-env config
+  ranger_env = config['configurations']['ranger-env']
+
+  # create ranger-env config having external ranger credential properties
+  if not has_ranger_admin and enable_ranger_hbase:
+    external_admin_username = default('/configurations/ranger-hbase-plugin-properties/external_admin_username', 'admin')
+    external_admin_password = default('/configurations/ranger-hbase-plugin-properties/external_admin_password', 'admin')
+    external_ranger_admin_username = default('/configurations/ranger-hbase-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
+    external_ranger_admin_password = default('/configurations/ranger-hbase-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
+    ranger_env = {}
+    ranger_env['admin_username'] = external_admin_username
+    ranger_env['admin_password'] = external_admin_password
+    ranger_env['ranger_admin_username'] = external_ranger_admin_username
+    ranger_env['ranger_admin_password'] = external_ranger_admin_password
+
+  xa_audit_db_password = ''
+  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db and has_ranger_admin:
+    xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password']
+
+  downloaded_custom_connector = None
+  previous_jdbc_jar_name = None
+  driver_curl_source = None
+  driver_curl_target = None
+  previous_jdbc_jar = None
+
+  if has_ranger_admin and stack_supports_ranger_audit_db:
+    xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
+    jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver = get_audit_configs(config)
+
+    downloaded_custom_connector = format("{exec_tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_target = format("{stack_root}/current/{component_directory}/lib/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    previous_jdbc_jar = format("{stack_root}/current/{component_directory}/lib/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    sql_connector_jar = ''
+
+  if security_enabled:
+    master_principal = config['configurations']['hbase-site']['hbase.master.kerberos.principal']
+
+  hbase_ranger_plugin_config = {
+    'username': repo_config_username,
+    'password': repo_config_password,
+    'hadoop.security.authentication': hadoop_security_authentication,
+    'hbase.security.authentication': hbase_security_authentication,
+    'hbase.zookeeper.property.clientPort': hbase_zookeeper_property_clientPort,
+    'hbase.zookeeper.quorum': hbase_zookeeper_quorum,
+    'zookeeper.znode.parent': zookeeper_znode_parent,
+    'commonNameForCertificate': common_name_for_certificate,
+    'hbase.master.kerberos.principal': master_principal if security_enabled else ''
+  }
+
+  hbase_ranger_plugin_repo = {
+    'isActive': 'true',
+    'config': json.dumps(hbase_ranger_plugin_config),
+    'description': 'hbase repo',
+    'name': repo_name,
+    'repositoryType': 'hbase',
+    'assetType': '2'
+  }
+
+  custom_ranger_service_config = generate_ranger_service_config(ranger_plugin_properties)
+  if len(custom_ranger_service_config) > 0:
+    hbase_ranger_plugin_config.update(custom_ranger_service_config)
+
+  if stack_supports_ranger_kerberos and security_enabled:
+    hbase_ranger_plugin_config['policy.download.auth.users'] = hbase_user
+    hbase_ranger_plugin_config['tag.download.auth.users'] = hbase_user
+    hbase_ranger_plugin_config['policy.grantrevoke.auth.users'] = hbase_user
+
+  if stack_supports_ranger_kerberos:
+    hbase_ranger_plugin_config['ambari.service.check.user'] = policy_user
+
+    hbase_ranger_plugin_repo = {
+      'isEnabled': 'true',
+      'configs': hbase_ranger_plugin_config,
+      'description': 'hbase repo',
+      'name': repo_name,
+      'type': 'hbase'
+    }
+
+  ranger_hbase_principal = None
+  ranger_hbase_keytab = None
+  if stack_supports_ranger_kerberos and security_enabled and 'hbase-master' in component_directory.lower():
+    ranger_hbase_principal = master_jaas_princ
+    ranger_hbase_keytab = master_keytab_path
+  elif stack_supports_ranger_kerberos and security_enabled and 'hbase-regionserver' in component_directory.lower():
+    ranger_hbase_principal = regionserver_jaas_princ
+    ranger_hbase_keytab = regionserver_keytab_path
+
+  xa_audit_db_is_enabled = False
+  if xml_configurations_supported and stack_supports_ranger_audit_db:
+    xa_audit_db_is_enabled = config['configurations']['ranger-hbase-audit']['xasecure.audit.destination.db']
+
+  xa_audit_hdfs_is_enabled = config['configurations']['ranger-hbase-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else False
+  ssl_keystore_password = config['configurations']['ranger-hbase-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None
+  ssl_truststore_password = config['configurations']['ranger-hbase-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None
+  credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
+
+  # for SQLA explicitly disable audit to DB for Ranger
+  if has_ranger_admin and stack_supports_ranger_audit_db and xa_audit_db_flavor.lower() == 'sqla':
+    xa_audit_db_is_enabled = False
+
+# need this to capture cluster name from where ranger hbase plugin is enabled
+cluster_name = config['clusterName']
+
+# ranger hbase plugin section end
+
+create_hbase_home_directory = check_stack_feature(StackFeature.HBASE_HOME_DIRECTORY, stack_version_formatted)
+hbase_home_directory = format("/user/{hbase_user}")
+
+atlas_hosts = default('/clusterHostInfo/atlas_server_hosts', [])
+has_atlas = len(atlas_hosts) > 0
+
+metadata_user = default('/configurations/atlas-env/metadata_user', None)
+atlas_graph_storage_hostname = default('/configurations/application-properties/atlas.graph.storage.hostname', None)
+atlas_graph_storage_hbase_table = default('/configurations/application-properties/atlas.graph.storage.hbase.table', None)
+atlas_audit_hbase_tablename = default('/configurations/application-properties/atlas.audit.hbase.tablename', None)
+
+if has_atlas:
+  zk_hosts_matches = string_set_intersection(atlas_graph_storage_hostname, hbase_zookeeper_quorum)
+  atlas_with_managed_hbase = len(zk_hosts_matches) > 0
+else:
+  atlas_with_managed_hbase = False
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params_windows.py
new file mode 100644
index 0000000..ddc9e93
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params_windows.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import status_params
+from resource_management.libraries.script.script import Script
+
+# server configurations
+config = Script.get_config()
+hbase_conf_dir = os.environ["HBASE_CONF_DIR"]
+hbase_bin_dir = os.path.join(os.environ["HBASE_HOME"],'bin')
+hbase_executable = os.path.join(hbase_bin_dir,"hbase.cmd")
+stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
+hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
+hbase_user = hadoop_user
+
+#decomm params
+region_drainer = os.path.join(hbase_bin_dir,"draining_servers.rb")
+region_mover = os.path.join(hbase_bin_dir,"region_mover.rb")
+hbase_excluded_hosts = config['commandParams']['excluded_hosts']
+hbase_drain_only = config['commandParams']['mark_draining_only']
+
+service_map = {
+  'master' : status_params.hbase_master_win_service_name,
+  'regionserver' : status_params.hbase_regionserver_win_service_name
+}
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/phoenix_queryserver.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/phoenix_queryserver.py
new file mode 100644
index 0000000..77820cc
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/phoenix_queryserver.py
@@ -0,0 +1,92 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.script import Script
+from phoenix_service import phoenix_service
+from hbase import hbase
+
+# Note: Phoenix Query Server is only applicable to stack version supporting Phoenix.
+class PhoenixQueryServer(Script):
+
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)
+
+
+  def get_component_name(self):
+    return "phoenix-server"
+
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hbase(name='queryserver')
+
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    phoenix_service('start')
+
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    phoenix_service('stop')
+
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.stack_version_formatted and check_stack_feature(StackFeature.PHOENIX, params.stack_version_formatted):     
+      # phoenix uses hbase configs
+      conf_select.select(params.stack_name, "hbase", params.version)
+      stack_select.select("phoenix-server", params.version)
+
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    phoenix_service('status')
+
+
+  def security_status(self, env):
+    self.put_structured_out({"securityState": "UNSECURED"})
+    
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.hbase_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.phoenix_pid_file]
+
+if __name__ == "__main__":
+  PhoenixQueryServer().execute()
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/phoenix_service.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/phoenix_service.py
new file mode 100644
index 0000000..42d9cd1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/phoenix_service.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import errno
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute
+from resource_management.core.resources.system import File
+from resource_management.libraries.functions import check_process_status, format
+
+# Note: Phoenix Query Server is only applicable to phoenix version stacks and above.
+def phoenix_service(action = 'start'): # 'start', 'stop', 'status'
+    # Note: params should already be imported before calling phoenix_service()
+    import status_params
+    pid_file = status_params.phoenix_pid_file
+    no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
+
+    if action == "status":
+      check_process_status(pid_file)
+    else:
+      env = {'JAVA_HOME': format("{java64_home}"), 'HBASE_CONF_DIR': format("{hbase_conf_dir}")}
+      daemon_cmd = format("{phx_daemon_script} {action}")
+      if action == 'start':
+        Execute(daemon_cmd,
+                user=format("{hbase_user}"),
+                environment=env)
+  
+      elif action == 'stop':
+        Execute(daemon_cmd,
+                user=format("{hbase_user}"),
+                environment=env
+        )
+        try:
+          File(pid_file, action = "delete")
+        except OSError as exc:
+          # OSError: [Errno 2] No such file or directory
+          if exc.errno == errno.ENOENT:
+            Logger.info("Did not remove '{0}' as it did not exist".format(pid_file))
+          else:
+            raise
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/service_check.py
new file mode 100644
index 0000000..5184ea7
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/service_check.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.core.resources.system import Execute, File
+from resource_management.core.source import StaticFile
+from resource_management.core.source import Template
+import functions
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+
+class HbaseServiceCheck(Script):
+  pass
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HbaseServiceCheckWindows(HbaseServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    smoke_cmd = os.path.join(params.stack_root, "Run-SmokeTests.cmd")
+    service = "HBASE"
+    Execute(format("cmd /C {smoke_cmd} {service}"), user=params.hbase_user, logoutput=True)
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HbaseServiceCheckDefault(HbaseServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    
+    output_file = "/apps/hbase/data/ambarismoketest"
+    smokeuser_kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal} &&") if params.security_enabled else ""
+    hbase_servicecheck_file = format("{exec_tmp_dir}/hbase-smoke.sh")
+    hbase_servicecheck_cleanup_file = format("{exec_tmp_dir}/hbase-smoke-cleanup.sh")
+
+    File( format("{exec_tmp_dir}/hbaseSmokeVerify.sh"),
+      content = StaticFile("hbaseSmokeVerify.sh"),
+      mode = 0755
+    )
+
+    File(hbase_servicecheck_cleanup_file,
+      content = StaticFile("hbase-smoke-cleanup.sh"),
+      mode = 0755
+    )
+  
+    File( hbase_servicecheck_file,
+      mode = 0755,
+      content = Template('hbase-smoke.sh.j2')
+    )
+    
+    if params.security_enabled:    
+      hbase_grant_premissions_file = format("{exec_tmp_dir}/hbase_grant_permissions.sh")
+      grantprivelegecmd = format("{kinit_cmd} {hbase_cmd} shell {hbase_grant_premissions_file}")
+  
+      File( hbase_grant_premissions_file,
+        owner   = params.hbase_user,
+        group   = params.user_group,
+        mode    = 0644,
+        content = Template('hbase_grant_permissions.j2')
+      )
+      
+      Execute( grantprivelegecmd,
+        user = params.hbase_user,
+        logoutput = True
+      )
+
+    servicecheckcmd = format("{smokeuser_kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} shell {hbase_servicecheck_file}")
+    smokeverifycmd = format("{exec_tmp_dir}/hbaseSmokeVerify.sh {hbase_conf_dir} {service_check_data} {hbase_cmd}")
+    cleanupCmd = format("{smokeuser_kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} shell {hbase_servicecheck_cleanup_file}")
+    Execute(format("{servicecheckcmd} && {smokeverifycmd} && {cleanupCmd}"),
+      tries     = 6,
+      try_sleep = 5,
+      user = params.smoke_test_user,
+      logoutput = True
+    )
+
+if __name__ == "__main__":
+  HbaseServiceCheck().execute()
+  
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/setup_ranger_hbase.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/setup_ranger_hbase.py
new file mode 100644
index 0000000..d32dce1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/setup_ranger_hbase.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.core.logger import Logger
+
+def setup_ranger_hbase(upgrade_type=None, service_name="hbase-master"):
+  import params
+
+  if params.enable_ranger_hbase:
+
+    stack_version = None
+
+    if upgrade_type is not None:
+      stack_version = params.version
+
+    if params.retryAble:
+      Logger.info("HBase: Setup ranger: command retry enables thus retrying if ranger admin is down !")
+    else:
+      Logger.info("HBase: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
+
+    if params.xml_configurations_supported and params.enable_ranger_hbase and params.xa_audit_hdfs_is_enabled and service_name == 'hbase-master' :
+      params.HdfsResource("/ranger/audit",
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hdfs_user,
+                         group=params.hdfs_user,
+                         mode=0755,
+                         recursive_chmod=True
+      )
+      params.HdfsResource("/ranger/audit/hbaseMaster",
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hbase_user,
+                         group=params.hbase_user,
+                         mode=0700,
+                         recursive_chmod=True
+      )
+      params.HdfsResource("/ranger/audit/hbaseRegional",
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hbase_user,
+                         group=params.hbase_user,
+                         mode=0700,
+                         recursive_chmod=True
+      )
+      params.HdfsResource(None, action="execute")
+
+    if params.xml_configurations_supported:
+      api_version=None
+      if params.stack_supports_ranger_kerberos:
+        api_version='v2'
+      from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
+      setup_ranger_plugin('hbase-client', 'hbase', params.previous_jdbc_jar, params.downloaded_custom_connector,
+                          params.driver_curl_source, params.driver_curl_target, params.java64_home,
+                          params.repo_name, params.hbase_ranger_plugin_repo,
+                          params.ranger_env, params.ranger_plugin_properties,
+                          params.policy_user, params.policymgr_mgr_url,
+                          params.enable_ranger_hbase, conf_dict=params.hbase_conf_dir,
+                          component_user=params.hbase_user, component_group=params.user_group, cache_service_list=['hbaseMaster', 'hbaseRegional'],
+                          plugin_audit_properties=params.config['configurations']['ranger-hbase-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-hbase-audit'],
+                          plugin_security_properties=params.config['configurations']['ranger-hbase-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-hbase-security'],
+                          plugin_policymgr_ssl_properties=params.config['configurations']['ranger-hbase-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-hbase-policymgr-ssl'],
+                          component_list=['hbase-client', 'hbase-master', 'hbase-regionserver'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
+                          credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password,
+                          ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
+                          stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble, api_version=api_version,
+                          is_security_enabled = params.security_enabled,
+                          is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos if params.security_enabled else None,
+                          component_user_principal=params.ranger_hbase_principal if params.security_enabled else None,
+                          component_user_keytab=params.ranger_hbase_keytab if params.security_enabled else None)
+
+    else:
+      from resource_management.libraries.functions.setup_ranger_plugin import setup_ranger_plugin
+      setup_ranger_plugin('hbase-client', 'hbase', params.previous_jdbc_jar,
+                        params.downloaded_custom_connector, params.driver_curl_source,
+                        params.driver_curl_target, params.java64_home,
+                        params.repo_name, params.hbase_ranger_plugin_repo,
+                        params.ranger_env, params.ranger_plugin_properties,
+                        params.policy_user, params.policymgr_mgr_url,
+                        params.enable_ranger_hbase, conf_dict=params.hbase_conf_dir,
+                        component_user=params.hbase_user, component_group=params.user_group, cache_service_list=['hbaseMaster', 'hbaseRegional'],
+                        plugin_audit_properties=params.config['configurations']['ranger-hbase-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-hbase-audit'],
+                        plugin_security_properties=params.config['configurations']['ranger-hbase-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-hbase-security'],
+                        plugin_policymgr_ssl_properties=params.config['configurations']['ranger-hbase-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-hbase-policymgr-ssl'],
+                        component_list=['hbase-client', 'hbase-master', 'hbase-regionserver'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
+                        credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password,
+                        ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
+                        stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
+  else:
+    Logger.info('Ranger HBase plugin is not enabled')
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/status_params.py
new file mode 100644
index 0000000..bc75c78
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/status_params.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from ambari_commons.os_check import OSCheck
+
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+
+# a map of the Ambari role to the component name
+# for use with <stack-root>/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'HBASE_MASTER' : 'hbase-master',
+  'HBASE_REGIONSERVER' : 'hbase-regionserver',
+  'HBASE_CLIENT' : 'hbase-client'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "HBASE_CLIENT")
+
+config = Script.get_config()
+
+if OSCheck.is_windows_family():
+  hbase_master_win_service_name = "master"
+  hbase_regionserver_win_service_name = "regionserver"
+else:
+  pid_dir = config['configurations']['hbase-env']['hbase_pid_dir']
+  hbase_user = config['configurations']['hbase-env']['hbase_user']
+
+  hbase_master_pid_file = format("{pid_dir}/hbase-{hbase_user}-master.pid")
+  regionserver_pid_file = format("{pid_dir}/hbase-{hbase_user}-regionserver.pid")
+  phoenix_pid_file = format("{pid_dir}/phoenix-{hbase_user}-server.pid")
+
+  # Security related/required params
+  hostname = config['hostname']
+  security_enabled = config['configurations']['cluster-env']['security_enabled']
+  kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+  tmp_dir = Script.get_tmp_dir()
+  
+  stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+  stack_version_formatted = format_stack_version(stack_version_unformatted)
+  stack_root = Script.get_stack_root()
+
+  hbase_conf_dir = "/etc/hbase/conf"
+  limits_conf_dir = "/etc/security/limits.d"
+  if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+    hbase_conf_dir = format("{stack_root}/current/{component_directory}/conf")
+    
+stack_name = default("/hostLevelParams/stack_name", None)
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/upgrade.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/upgrade.py
new file mode 100644
index 0000000..703fe26
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/upgrade.py
@@ -0,0 +1,106 @@
+
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import re
+import socket
+
+from resource_management.core import shell
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.core.exceptions import Fail
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.decorator import retry
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import check_process_status
+
+
+def prestart(env, stack_component):
+  import params
+
+  if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+    conf_select.select(params.stack_name, "hbase", params.version)
+    stack_select.select(stack_component, params.version)
+
+def post_regionserver(env):
+  import params
+  env.set_params(params)
+
+  check_cmd = "echo 'status \"simple\"' | {0} shell".format(params.hbase_cmd)
+
+  exec_cmd = "{0} {1}".format(params.kinit_cmd, check_cmd)
+  is_regionserver_registered(exec_cmd, params.hbase_user, params.hostname, re.IGNORECASE)
+
+
+def is_region_server_process_running():
+  try:
+    pid_file = format("{pid_dir}/hbase-{hbase_user}-regionserver.pid")
+    check_process_status(pid_file)
+    return True
+  except ComponentIsNotRunning:
+    return False
+
+
+@retry(times=30, sleep_time=30, err_class=Fail)
+def is_regionserver_registered(cmd, user, hostname, regex_search_flags):
+  """
+  Queries HBase through the HBase shell to see which servers have successfully registered. This is
+  useful in cases, such as upgrades, where we must ensure that a RegionServer has not only started,
+  but also completed it's registration handshake before moving into upgrading the next RegionServer.
+
+  The hbase shell is used along with the "show 'simple'" command in order to determine if the
+  specified host has registered.
+  :param cmd:
+  :param user:
+  :param hostname:
+  :param regex_search_flags:
+  :return:
+  """
+  if not is_region_server_process_running():
+    Logger.info("RegionServer process is not running")
+    raise Fail("RegionServer process is not running")
+
+  # use hbase shell with "status 'simple'" command
+  code, out = shell.call(cmd, user=user)
+
+  # if we don't have ouput, then we can't check
+  if not out:
+    raise Fail("Unable to retrieve status information from the HBase shell")
+
+  # try matching the hostname with a colon (which indicates a bound port)
+  bound_hostname_to_match = hostname + ":"
+  match = re.search(bound_hostname_to_match, out, regex_search_flags)
+
+  # if there's no match, try again with the IP address
+  if not match:
+    try:
+      ip_address = socket.gethostbyname(hostname)
+      bound_ip_address_to_match = ip_address + ":"
+      match = re.search(bound_ip_address_to_match, out, regex_search_flags)
+    except socket.error:
+      # this is merely a backup, so just log that it failed
+      Logger.warning("Unable to lookup the IP address of {0}, reverse DNS lookup may not be working.".format(hostname))
+      pass
+
+  # failed with both a hostname and an IP address, so raise the Fail and let the function auto retry
+  if not match:
+    raise Fail(
+      "The RegionServer named {0} has not yet registered with the HBase Master".format(hostname))
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase-smoke.sh.j2 b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase-smoke.sh.j2
new file mode 100644
index 0000000..458da95
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase-smoke.sh.j2
@@ -0,0 +1,44 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+disable 'ambarismoketest'
+drop 'ambarismoketest'
+create 'ambarismoketest','family'
+put 'ambarismoketest','row01','family:col01','{{service_check_data}}'
+scan 'ambarismoketest'
+exit
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase.conf.j2 b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase.conf.j2
new file mode 100644
index 0000000..3580db0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{hbase_user}}   - nofile   {{hbase_user_nofile_limit}}
+{{hbase_user}}   - nproc    {{hbase_user_nproc_limit}}
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_client_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_client_jaas.conf.j2
new file mode 100644
index 0000000..38f9721
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_client_jaas.conf.j2
@@ -0,0 +1,23 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=false
+useTicketCache=true;
+};
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_grant_permissions.j2 b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_grant_permissions.j2
new file mode 100644
index 0000000..3378983
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_grant_permissions.j2
@@ -0,0 +1,39 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+grant '{{smoke_test_user}}', '{{smokeuser_permissions}}'
+exit
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_master_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_master_jaas.conf.j2
new file mode 100644
index 0000000..a93c36c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_master_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{master_keytab_path}}"
+principal="{{master_jaas_princ}}";
+};
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_queryserver_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_queryserver_jaas.conf.j2
new file mode 100644
index 0000000..c5a6c3f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_queryserver_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{queryserver_keytab_path}}"
+principal="{{queryserver_jaas_princ}}";
+};
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_regionserver_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_regionserver_jaas.conf.j2
new file mode 100644
index 0000000..7097481
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/hbase_regionserver_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{regionserver_keytab_path}}"
+principal="{{regionserver_jaas_princ}}";
+};
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/input.config-hbase.json.j2 b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/input.config-hbase.json.j2
new file mode 100644
index 0000000..94fbc64
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/input.config-hbase.json.j2
@@ -0,0 +1,79 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"hbase_master",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hbase-env/hbase_log_dir', '/var/log/hbase')}}/hbase-*-master-*.log"
+    },
+    {
+      "type":"hbase_regionserver",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hbase-env/hbase_log_dir', '/var/log/hbase')}}/hbase-*-regionserver-*.log"
+    },
+    {
+      "type":"hbase_phoenix_server",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hbase-env/hbase_log_dir', '/var/log/hbase')}}/phoenix-*-server.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "hbase_master",
+            "hbase_regionserver"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} %-5p [%t] %c{2}: %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    },
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "hbase_phoenix_server"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} %-5p [%t] %c{2}: %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/regionservers.j2 b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/regionservers.j2
new file mode 100644
index 0000000..fc6cc37
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/templates/regionservers.j2
@@ -0,0 +1,20 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in rs_hosts %}{{host}}
+{% endfor %}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/quicklinks/quicklinks.json
new file mode 100644
index 0000000..5568122
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/quicklinks/quicklinks.json
@@ -0,0 +1,97 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"http"
+    },
+
+    "links": [
+      {
+        "name": "hbase_master_ui",
+        "label": "HBase Master UI",
+        "url":"%@://%@:%@/master-status",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "hbase.master.info.port",
+          "http_default_port": "60010",
+          "https_property": "hbase.master.info.port",
+          "https_default_port": "60443",
+          "regex": "",
+          "site": "hbase-site"
+        }
+      },
+      {
+        "name": "hbase_logs",
+        "label": "HBase Logs",
+        "url":"%@://%@:%@/logs",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "hbase.master.info.port",
+          "http_default_port": "60010",
+          "https_property": "hbase.master.info.port",
+          "https_default_port": "60443",
+          "regex": "",
+          "site": "hbase-site"
+        }
+      },
+      {
+        "name": "zookeeper_info",
+        "label": "Zookeeper Info",
+        "url":"%@://%@:%@/zk.jsp",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "hbase.master.info.port",
+          "http_default_port": "60010",
+          "https_property": "hbase.master.info.port",
+          "https_default_port": "60443",
+          "regex": "",
+          "site": "hbase-site"
+        }
+      },
+      {
+        "name": "hbase_master_jmx",
+        "label": "HBase Master JMX",
+        "url":"%@://%@:%@/jmx",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "hbase.master.info.port",
+          "http_default_port": "60010",
+          "https_property": "hbase.master.info.port",
+          "https_default_port": "60443",
+          "regex": "",
+          "site": "hbase-site"
+        }
+      },
+      {
+        "name": "debug_dump",
+        "label": "Debug Dump",
+        "url":"%@://%@:%@/dump",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "hbase.master.info.port",
+          "http_default_port": "60010",
+          "https_property": "hbase.master.info.port",
+          "https_default_port": "60443",
+          "regex": "",
+          "site": "hbase-site"
+        }
+      },
+      {
+        "name": "thread_stacks",
+        "label": "Thread Stacks",
+        "url":"%@://%@:%@/stacks",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "hbase.master.info.port",
+          "http_default_port": "60010",
+          "https_property": "hbase.master.info.port",
+          "https_default_port": "60443",
+          "regex": "",
+          "site": "hbase-site"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
new file mode 100644
index 0000000..44d0c61
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
@@ -0,0 +1,10 @@
+{
+  "general_deps" : {
+    "_comment" : "dependencies for HBase",
+    "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
+    "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
+    "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
+    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START"],
+    "PHOENIX_QUERY_SERVER-START": ["HBASE_MASTER-START"]
+  }
+}
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/service_advisor.py
new file mode 100644
index 0000000..f9d1a59
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/service_advisor.py
@@ -0,0 +1,675 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+# Python imports
+import imp
+import os
+import traceback
+import re
+import socket
+import fnmatch
+
+
+from resource_management.core.logger import Logger
+
+SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
+STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../stacks/')
+PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
+
+try:
+  with open(PARENT_FILE, 'rb') as fp:
+    service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE, ('.py', 'rb', imp.PY_SOURCE))
+except Exception as e:
+  traceback.print_exc()
+  print "Failed to load parent"
+
+class HBASEServiceAdvisor(service_advisor.ServiceAdvisor):
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(HBASEServiceAdvisor, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    # Always call these methods
+    self.modifyMastersWithMultipleInstances()
+    self.modifyCardinalitiesDict()
+    self.modifyHeapSizeProperties()
+    self.modifyNotValuableComponents()
+    self.modifyComponentsNotPreferableOnServer()
+    self.modifyComponentLayoutSchemes()
+
+  def modifyMastersWithMultipleInstances(self):
+    """
+    Modify the set of masters with multiple instances.
+    Must be overriden in child class.
+    """
+    self.mastersWithMultipleInstances.add("HBASE_MASTER")
+
+  def modifyCardinalitiesDict(self):
+    """
+    Modify the dictionary of cardinalities.
+    Must be overriden in child class.
+    """
+    self.cardinalitiesDict["HBASE_MASTER"] = {"min": 1}
+
+  def modifyHeapSizeProperties(self):
+    """
+    Modify the dictionary of heap size properties.
+    Must be overriden in child class.
+    """
+    self.heap_size_properties = {"REGIONSERVER":
+                                   [{"config-name": "hbase-env",
+                                     "property": "hbase_regionserver_heapsize",
+                                     "default": "1024m"}],
+                                 "HBASE_MASTER":
+                                   [{"config-name": "hbase-env",
+                                     "property": "hbase_master_heapsize",
+                                     "default": "1024m"}]}
+
+  def modifyNotValuableComponents(self):
+    """
+    Modify the set of components whose host assignment is based on other services.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentsNotPreferableOnServer(self):
+    """
+    Modify the set of components that are not preferable on the server.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentLayoutSchemes(self):
+    """
+    Modify layout scheme dictionaries for components.
+    The scheme dictionary basically maps the number of hosts to
+    host index where component should exist.
+    Must be overriden in child class.
+    """
+    self.componentLayoutSchemes.update({
+      'HBASE_MASTER': {6: 0, 31: 2, "else": 3}
+    })
+
+  def getServiceComponentLayoutValidations(self, services, hosts):
+    """
+    Get a list of errors.
+    Must be overriden in child class.
+    """
+
+    return []
+
+  def getServiceConfigurationRecommendations(self, configurations, clusterData, services, hosts):
+    """
+    Entry point.
+    Must be overriden in child class.
+    """
+    #Logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    recommender = HBASERecommender()
+    recommender.recommendHbaseConfigurationsFromHDP206(configurations, clusterData, services, hosts)
+    recommender.recommendHBASEConfigurationsFromHDP22(configurations, clusterData, services, hosts)
+    recommender.recommendHBASEConfigurationsFromHDP23(configurations, clusterData, services, hosts)
+    recommender.recommendHBASEConfigurationsFromHDP25(configurations, clusterData, services, hosts)
+    recommender.recommendHBASEConfigurationsFromHDP26(configurations, clusterData, services, hosts)
+
+
+  def getServiceConfigurationsValidationItems(self, configurations, recommendedDefaults, services, hosts):
+    """
+    Entry point.
+    Validate configurations for the service. Return a list of errors.
+    The code for this function should be the same for each Service Advisor.
+    """
+    #Logger.info("Class: %s, Method: %s. Validating Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    validator = HBASEValidator()
+    # Calls the methods of the validator using arguments,
+    # method(siteProperties, siteRecommendations, configurations, services, hosts)
+    return validator.validateListOfConfigUsingMethod(configurations, recommendedDefaults, services, hosts, validator.validators)
+
+
+
+class HBASERecommender(service_advisor.ServiceAdvisor):
+  """
+  HBASE Recommender suggests properties when adding the service for the first time or modifying configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(HBASERecommender, self)
+    self.as_super.__init__(*args, **kwargs)
+
+
+  def recommendHbaseConfigurationsFromHDP206(self, configurations, clusterData, services, hosts):
+    # recommendations for HBase env config
+
+    # If cluster size is < 100, hbase master heap = 2G
+    # else If cluster size is < 500, hbase master heap = 4G
+    # else hbase master heap = 8G
+    # for small test clusters use 1 gb
+    hostsCount = 0
+    if hosts and "items" in hosts:
+      hostsCount = len(hosts["items"])
+
+    hbaseMasterRam = {
+      hostsCount < 20: 1,
+      20 <= hostsCount < 100: 2,
+      100 <= hostsCount < 500: 4,
+      500 <= hostsCount: 8
+    }[True]
+
+    putHbaseProperty = self.putProperty(configurations, "hbase-env", services)
+    putHbaseProperty('hbase_regionserver_heapsize', int(clusterData['hbaseRam']) * 1024)
+    putHbaseProperty('hbase_master_heapsize', hbaseMasterRam * 1024)
+
+    # recommendations for HBase site config
+    putHbaseSiteProperty = self.putProperty(configurations, "hbase-site", services)
+
+    if 'hbase-site' in services['configurations'] and 'hbase.superuser' in services['configurations']['hbase-site']['properties'] \
+      and 'hbase-env' in services['configurations'] and 'hbase_user' in services['configurations']['hbase-env']['properties'] \
+      and services['configurations']['hbase-env']['properties']['hbase_user'] != services['configurations']['hbase-site']['properties']['hbase.superuser']:
+      putHbaseSiteProperty("hbase.superuser", services['configurations']['hbase-env']['properties']['hbase_user'])
+
+
+  def recommendHBASEConfigurationsFromHDP22(self, configurations, clusterData, services, hosts):
+    putHbaseEnvPropertyAttributes = self.putPropertyAttribute(configurations, "hbase-env")
+
+    hmaster_host = self.getHostWithComponent("HBASE", "HBASE_MASTER", services, hosts)
+    if hmaster_host is not None:
+      host_ram = hmaster_host["Hosts"]["total_mem"]
+      putHbaseEnvPropertyAttributes('hbase_master_heapsize', 'maximum', max(1024, int(host_ram/1024)))
+
+    rs_hosts = self.getHostsWithComponent("HBASE", "HBASE_REGIONSERVER", services, hosts)
+    if rs_hosts is not None and len(rs_hosts) > 0:
+      min_ram = rs_hosts[0]["Hosts"]["total_mem"]
+      for host in rs_hosts:
+        host_ram = host["Hosts"]["total_mem"]
+        min_ram = min(min_ram, host_ram)
+
+      putHbaseEnvPropertyAttributes('hbase_regionserver_heapsize', 'maximum', max(1024, int(min_ram*0.8/1024)))
+
+    putHbaseSiteProperty = self.putProperty(configurations, "hbase-site", services)
+    putHbaseSitePropertyAttributes = self.putPropertyAttribute(configurations, "hbase-site")
+    putHbaseSiteProperty("hbase.regionserver.global.memstore.size", '0.4')
+
+    if 'hbase-env' in services['configurations'] and 'phoenix_sql_enabled' in services['configurations']['hbase-env']['properties'] and \
+                    'true' == services['configurations']['hbase-env']['properties']['phoenix_sql_enabled'].lower():
+      putHbaseSiteProperty("hbase.regionserver.wal.codec", 'org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec')
+      putHbaseSiteProperty("phoenix.functions.allowUserDefinedFunctions", 'true')
+    else:
+      putHbaseSiteProperty("hbase.regionserver.wal.codec", 'org.apache.hadoop.hbase.regionserver.wal.WALCellCodec')
+      if ('hbase.rpc.controllerfactory.class' in configurations["hbase-site"]["properties"]) or \
+              ('hbase-site' in services['configurations'] and 'hbase.rpc.controllerfactory.class' in services['configurations']["hbase-site"]["properties"]):
+        putHbaseSitePropertyAttributes('hbase.rpc.controllerfactory.class', 'delete', 'true')
+      if ('phoenix.functions.allowUserDefinedFunctions' in configurations["hbase-site"]["properties"]) or \
+              ('hbase-site' in services['configurations'] and 'phoenix.functions.allowUserDefinedFunctions' in services['configurations']["hbase-site"]["properties"]):
+        putHbaseSitePropertyAttributes('phoenix.functions.allowUserDefinedFunctions', 'delete', 'true')
+
+    if "ranger-env" in services["configurations"] and "ranger-hbase-plugin-properties" in services["configurations"] and \
+                    "ranger-hbase-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
+      putHbaseRangerPluginProperty = self.putProperty(configurations, "ranger-hbase-plugin-properties", services)
+      rangerEnvHbasePluginProperty = services["configurations"]["ranger-env"]["properties"]["ranger-hbase-plugin-enabled"]
+      putHbaseRangerPluginProperty("ranger-hbase-plugin-enabled", rangerEnvHbasePluginProperty)
+      if "cluster-env" in services["configurations"] and "smokeuser" in services["configurations"]["cluster-env"]["properties"]:
+        smoke_user = services["configurations"]["cluster-env"]["properties"]["smokeuser"]
+        putHbaseRangerPluginProperty("policy_user", smoke_user)
+    rangerPluginEnabled = ''
+    if 'ranger-hbase-plugin-properties' in configurations and 'ranger-hbase-plugin-enabled' in  configurations['ranger-hbase-plugin-properties']['properties']:
+      rangerPluginEnabled = configurations['ranger-hbase-plugin-properties']['properties']['ranger-hbase-plugin-enabled']
+    elif 'ranger-hbase-plugin-properties' in services['configurations'] and 'ranger-hbase-plugin-enabled' in services['configurations']['ranger-hbase-plugin-properties']['properties']:
+      rangerPluginEnabled = services['configurations']['ranger-hbase-plugin-properties']['properties']['ranger-hbase-plugin-enabled']
+
+    if rangerPluginEnabled and rangerPluginEnabled.lower() == 'Yes'.lower():
+      putHbaseSiteProperty('hbase.security.authorization','true')
+
+    # Recommend configs for bucket cache
+    threshold = 23 # 2 Gb is reserved for other offheap memory
+    mb = 1024
+    if (int(clusterData["hbaseRam"]) > threshold):
+      # To enable cache - calculate values
+      regionserver_total_ram = int(clusterData["hbaseRam"]) * mb
+      regionserver_heap_size = 20480
+      regionserver_max_direct_memory_size = regionserver_total_ram - regionserver_heap_size
+      hfile_block_cache_size = '0.4'
+      block_cache_heap = 8192 # int(regionserver_heap_size * hfile_block_cache_size)
+      hbase_regionserver_global_memstore_size = '0.4'
+      reserved_offheap_memory = 2048
+      bucketcache_offheap_memory = regionserver_max_direct_memory_size - reserved_offheap_memory
+      hbase_bucketcache_size = bucketcache_offheap_memory
+      hbase_bucketcache_percentage_in_combinedcache = float(bucketcache_offheap_memory) / hbase_bucketcache_size
+      hbase_bucketcache_percentage_in_combinedcache_str = "{0:.4f}".format(math.ceil(hbase_bucketcache_percentage_in_combinedcache * 10000) / 10000.0)
+
+      # Set values in hbase-site
+      putHbaseSiteProperty('hfile.block.cache.size', hfile_block_cache_size)
+      putHbaseSiteProperty('hbase.regionserver.global.memstore.size', hbase_regionserver_global_memstore_size)
+      putHbaseSiteProperty('hbase.bucketcache.ioengine', 'offheap')
+      putHbaseSiteProperty('hbase.bucketcache.size', hbase_bucketcache_size)
+      putHbaseSiteProperty('hbase.bucketcache.percentage.in.combinedcache', hbase_bucketcache_percentage_in_combinedcache_str)
+
+      # Enable in hbase-env
+      putHbaseEnvProperty = self.putProperty(configurations, "hbase-env", services)
+      putHbaseEnvProperty('hbase_max_direct_memory_size', regionserver_max_direct_memory_size)
+      putHbaseEnvProperty('hbase_regionserver_heapsize', regionserver_heap_size)
+    else:
+      # Disable
+      if ('hbase.bucketcache.ioengine' in configurations["hbase-site"]["properties"]) or \
+              ('hbase-site' in services['configurations'] and 'hbase.bucketcache.ioengine' in services['configurations']["hbase-site"]["properties"]):
+        putHbaseSitePropertyAttributes('hbase.bucketcache.ioengine', 'delete', 'true')
+      if ('hbase.bucketcache.size' in configurations["hbase-site"]["properties"]) or \
+              ('hbase-site' in services['configurations'] and 'hbase.bucketcache.size' in services['configurations']["hbase-site"]["properties"]):
+        putHbaseSitePropertyAttributes('hbase.bucketcache.size', 'delete', 'true')
+      if ('hbase.bucketcache.percentage.in.combinedcache' in configurations["hbase-site"]["properties"]) or \
+              ('hbase-site' in services['configurations'] and 'hbase.bucketcache.percentage.in.combinedcache' in services['configurations']["hbase-site"]["properties"]):
+        putHbaseSitePropertyAttributes('hbase.bucketcache.percentage.in.combinedcache', 'delete', 'true')
+      if ('hbase_max_direct_memory_size' in configurations["hbase-env"]["properties"]) or \
+              ('hbase-env' in services['configurations'] and 'hbase_max_direct_memory_size' in services['configurations']["hbase-env"]["properties"]):
+        putHbaseEnvPropertyAttributes('hbase_max_direct_memory_size', 'delete', 'true')
+
+    # Authorization
+    hbaseCoProcessorConfigs = {
+      'hbase.coprocessor.region.classes': [],
+      'hbase.coprocessor.regionserver.classes': [],
+      'hbase.coprocessor.master.classes': []
+    }
+    for key in hbaseCoProcessorConfigs:
+      hbase_coprocessor_classes = None
+      if key in configurations["hbase-site"]["properties"]:
+        hbase_coprocessor_classes = configurations["hbase-site"]["properties"][key].strip()
+      elif 'hbase-site' in services['configurations'] and key in services['configurations']["hbase-site"]["properties"]:
+        hbase_coprocessor_classes = services['configurations']["hbase-site"]["properties"][key].strip()
+      if hbase_coprocessor_classes:
+        hbaseCoProcessorConfigs[key] = hbase_coprocessor_classes.split(',')
+
+    # If configurations has it - it has priority as it is calculated. Then, the service's configurations will be used.
+    hbase_security_authorization = None
+    if 'hbase-site' in configurations and 'hbase.security.authorization' in configurations['hbase-site']['properties']:
+      hbase_security_authorization = configurations['hbase-site']['properties']['hbase.security.authorization']
+    elif 'hbase-site' in services['configurations'] and 'hbase.security.authorization' in services['configurations']['hbase-site']['properties']:
+      hbase_security_authorization = services['configurations']['hbase-site']['properties']['hbase.security.authorization']
+    if hbase_security_authorization:
+      if 'true' == hbase_security_authorization.lower():
+        hbaseCoProcessorConfigs['hbase.coprocessor.master.classes'].append('org.apache.hadoop.hbase.security.access.AccessController')
+        hbaseCoProcessorConfigs['hbase.coprocessor.regionserver.classes'].append('org.apache.hadoop.hbase.security.access.AccessController')
+        # regional classes when hbase authorization is enabled
+        authRegionClasses = ['org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint', 'org.apache.hadoop.hbase.security.access.AccessController']
+        for item in range(len(authRegionClasses)):
+          hbaseCoProcessorConfigs['hbase.coprocessor.region.classes'].append(authRegionClasses[item])
+      else:
+        if 'org.apache.hadoop.hbase.security.access.AccessController' in hbaseCoProcessorConfigs['hbase.coprocessor.region.classes']:
+          hbaseCoProcessorConfigs['hbase.coprocessor.region.classes'].remove('org.apache.hadoop.hbase.security.access.AccessController')
+        if 'org.apache.hadoop.hbase.security.access.AccessController' in hbaseCoProcessorConfigs['hbase.coprocessor.master.classes']:
+          hbaseCoProcessorConfigs['hbase.coprocessor.master.classes'].remove('org.apache.hadoop.hbase.security.access.AccessController')
+
+        hbaseCoProcessorConfigs['hbase.coprocessor.region.classes'].append("org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint")
+        if ('hbase.coprocessor.regionserver.classes' in configurations["hbase-site"]["properties"]) or \
+                ('hbase-site' in services['configurations'] and 'hbase.coprocessor.regionserver.classes' in services['configurations']["hbase-site"]["properties"]):
+          putHbaseSitePropertyAttributes('hbase.coprocessor.regionserver.classes', 'delete', 'true')
+    else:
+      hbaseCoProcessorConfigs['hbase.coprocessor.region.classes'].append("org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint")
+      if ('hbase.coprocessor.regionserver.classes' in configurations["hbase-site"]["properties"]) or \
+              ('hbase-site' in services['configurations'] and 'hbase.coprocessor.regionserver.classes' in services['configurations']["hbase-site"]["properties"]):
+        putHbaseSitePropertyAttributes('hbase.coprocessor.regionserver.classes', 'delete', 'true')
+
+    # Authentication
+    if 'hbase-site' in services['configurations'] and 'hbase.security.authentication' in services['configurations']['hbase-site']['properties']:
+      if 'kerberos' == services['configurations']['hbase-site']['properties']['hbase.security.authentication'].lower():
+        if 'org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint' not in hbaseCoProcessorConfigs['hbase.coprocessor.region.classes']:
+          hbaseCoProcessorConfigs['hbase.coprocessor.region.classes'].append('org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint')
+        if 'org.apache.hadoop.hbase.security.token.TokenProvider' not in hbaseCoProcessorConfigs['hbase.coprocessor.region.classes']:
+          hbaseCoProcessorConfigs['hbase.coprocessor.region.classes'].append('org.apache.hadoop.hbase.security.token.TokenProvider')
+      else:
+        if 'org.apache.hadoop.hbase.security.token.TokenProvider' in hbaseCoProcessorConfigs['hbase.coprocessor.region.classes']:
+          hbaseCoProcessorConfigs['hbase.coprocessor.region.classes'].remove('org.apache.hadoop.hbase.security.token.TokenProvider')
+
+    #Remove duplicates
+    for key in hbaseCoProcessorConfigs:
+      uniqueCoprocessorRegionClassList = []
+      [uniqueCoprocessorRegionClassList.append(i)
+       for i in hbaseCoProcessorConfigs[key] if
+       not i in uniqueCoprocessorRegionClassList
+       and (i.strip() not in ['{{hbase_coprocessor_region_classes}}', '{{hbase_coprocessor_master_classes}}', '{{hbase_coprocessor_regionserver_classes}}'])]
+      putHbaseSiteProperty(key, ','.join(set(uniqueCoprocessorRegionClassList)))
+
+
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    rangerServiceVersion=''
+    if 'RANGER' in servicesList:
+      rangerServiceVersion = [service['StackServices']['service_version'] for service in services["services"] if service['StackServices']['service_name'] == 'RANGER'][0]
+
+    if rangerServiceVersion and rangerServiceVersion == '0.4.0':
+      rangerClass = 'com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor'
+    else:
+      rangerClass = 'org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor'
+
+    nonRangerClass = 'org.apache.hadoop.hbase.security.access.AccessController'
+    hbaseClassConfigs =  hbaseCoProcessorConfigs.keys()
+
+    for item in range(len(hbaseClassConfigs)):
+      if 'hbase-site' in services['configurations']:
+        if hbaseClassConfigs[item] in services['configurations']['hbase-site']['properties']:
+          if 'hbase-site' in configurations and hbaseClassConfigs[item] in configurations['hbase-site']['properties']:
+            coprocessorConfig = configurations['hbase-site']['properties'][hbaseClassConfigs[item]]
+          else:
+            coprocessorConfig = services['configurations']['hbase-site']['properties'][hbaseClassConfigs[item]]
+          coprocessorClasses = coprocessorConfig.split(",")
+          coprocessorClasses = filter(None, coprocessorClasses) # Removes empty string elements from array
+          if rangerPluginEnabled and rangerPluginEnabled.lower() == 'Yes'.lower():
+            if nonRangerClass in coprocessorClasses:
+              coprocessorClasses.remove(nonRangerClass)
+            if not rangerClass in coprocessorClasses:
+              coprocessorClasses.append(rangerClass)
+            putHbaseSiteProperty(hbaseClassConfigs[item], ','.join(coprocessorClasses))
+          elif rangerPluginEnabled and rangerPluginEnabled.lower() == 'No'.lower():
+            if rangerClass in coprocessorClasses:
+              coprocessorClasses.remove(rangerClass)
+              if not nonRangerClass in coprocessorClasses:
+                coprocessorClasses.append(nonRangerClass)
+              putHbaseSiteProperty(hbaseClassConfigs[item], ','.join(coprocessorClasses))
+        elif rangerPluginEnabled and rangerPluginEnabled.lower() == 'Yes'.lower():
+          putHbaseSiteProperty(hbaseClassConfigs[item], rangerClass)
+
+
+  def recommendHBASEConfigurationsFromHDP23(self, configurations, clusterData, services, hosts):
+    putHbaseSiteProperty = self.putProperty(configurations, "hbase-site", services)
+    putHbaseSitePropertyAttributes = self.putPropertyAttribute(configurations, "hbase-site")
+    putHbaseEnvProperty = self.putProperty(configurations, "hbase-env", services)
+    putHbaseEnvPropertyAttributes = self.putPropertyAttribute(configurations, "hbase-env")
+
+    # bucket cache for 1.x is configured slightly differently, HBASE-11520
+    threshold = 23 # 2 Gb is reserved for other offheap memory
+    if (int(clusterData["hbaseRam"]) > threshold):
+      # To enable cache - calculate values
+      regionserver_total_ram = int(clusterData["hbaseRam"]) * 1024
+      regionserver_heap_size = 20480
+      regionserver_max_direct_memory_size = regionserver_total_ram - regionserver_heap_size
+      hfile_block_cache_size = '0.4'
+      block_cache_heap = 8192 # int(regionserver_heap_size * hfile_block_cache_size)
+      hbase_regionserver_global_memstore_size = '0.4'
+      reserved_offheap_memory = 2048
+      bucketcache_offheap_memory = regionserver_max_direct_memory_size - reserved_offheap_memory
+      hbase_bucketcache_size = bucketcache_offheap_memory
+
+      # Set values in hbase-site
+      putHbaseSiteProperty('hfile.block.cache.size', hfile_block_cache_size)
+      putHbaseSiteProperty('hbase.regionserver.global.memstore.size', hbase_regionserver_global_memstore_size)
+      putHbaseSiteProperty('hbase.bucketcache.ioengine', 'offheap')
+      putHbaseSiteProperty('hbase.bucketcache.size', hbase_bucketcache_size)
+      # 2.2 stack method was called earlier, unset
+      putHbaseSitePropertyAttributes('hbase.bucketcache.percentage.in.combinedcache', 'delete', 'true')
+
+      # Enable in hbase-env
+      putHbaseEnvProperty('hbase_max_direct_memory_size', regionserver_max_direct_memory_size)
+      putHbaseEnvProperty('hbase_regionserver_heapsize', regionserver_heap_size)
+    else:
+      # Disable
+      putHbaseSitePropertyAttributes('hbase.bucketcache.ioengine', 'delete', 'true')
+      putHbaseSitePropertyAttributes('hbase.bucketcache.size', 'delete', 'true')
+      putHbaseSitePropertyAttributes('hbase.bucketcache.percentage.in.combinedcache', 'delete', 'true')
+
+      putHbaseEnvPropertyAttributes('hbase_max_direct_memory_size', 'delete', 'true')
+
+    if 'hbase-env' in services['configurations'] and 'phoenix_sql_enabled' in services['configurations']['hbase-env']['properties'] and \
+                    'true' == services['configurations']['hbase-env']['properties']['phoenix_sql_enabled'].lower():
+      if 'hbase.rpc.controllerfactory.class' in services['configurations']['hbase-site']['properties'] and \
+                      services['configurations']['hbase-site']['properties']['hbase.rpc.controllerfactory.class'] == \
+                      'org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory':
+        putHbaseSitePropertyAttributes('hbase.rpc.controllerfactory.class', 'delete', 'true')
+
+      putHbaseSiteProperty("hbase.region.server.rpc.scheduler.factory.class", "org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory")
+    else:
+      putHbaseSitePropertyAttributes('hbase.region.server.rpc.scheduler.factory.class', 'delete', 'true')
+
+
+  def recommendHBASEConfigurationsFromHDP25(self, configurations, clusterData, services, hosts):
+    putHbaseSiteProperty = self.putProperty(configurations, "hbase-site", services)
+    putCoreSiteProperty = self.putProperty(configurations, "core-site", services)
+
+    if "cluster-env" in services["configurations"] \
+      and "security_enabled" in services["configurations"]["cluster-env"]["properties"] \
+      and services["configurations"]["cluster-env"]["properties"]["security_enabled"].lower() == "true":
+      # Set the master's UI to readonly
+      putHbaseSiteProperty('hbase.master.ui.readonly', 'true')
+
+      phoenix_query_server_hosts = self.get_phoenix_query_server_hosts(services, hosts)
+      self.logger.debug("Calculated Phoenix Query Server hosts: %s" % str(phoenix_query_server_hosts))
+      if phoenix_query_server_hosts:
+        self.logger.debug("Attempting to update hadoop.proxyuser.HTTP.hosts with %s" % str(phoenix_query_server_hosts))
+        # The PQS hosts we want to ensure are set
+        new_value = ','.join(phoenix_query_server_hosts)
+        # Update the proxyuser setting, deferring to out callback to merge results together
+        self.put_proxyuser_value("HTTP", new_value, services=services, configurations=configurations, put_function=putCoreSiteProperty)
+      else:
+        self.logger.debug("No phoenix query server hosts to update")
+    else:
+      putHbaseSiteProperty('hbase.master.ui.readonly', 'false')
+
+
+  def recommendHBASEConfigurationsFromHDP26(self, configurations, clusterData, services, hosts):
+    if 'hbase-env' in services['configurations'] and 'hbase_user' in services['configurations']['hbase-env']['properties']:
+      hbase_user = services['configurations']['hbase-env']['properties']['hbase_user']
+    else:
+      hbase_user = 'hbase'
+
+    if 'ranger-hbase-plugin-properties' in configurations and 'ranger-hbase-plugin-enabled' in configurations['ranger-hbase-plugin-properties']['properties']:
+      ranger_hbase_plugin_enabled = (configurations['ranger-hbase-plugin-properties']['properties']['ranger-hbase-plugin-enabled'].lower() == 'Yes'.lower())
+    elif 'ranger-hbase-plugin-properties' in services['configurations'] and 'ranger-hbase-plugin-enabled' in services['configurations']['ranger-hbase-plugin-properties']['properties']:
+      ranger_hbase_plugin_enabled = (services['configurations']['ranger-hbase-plugin-properties']['properties']['ranger-hbase-plugin-enabled'].lower() == 'Yes'.lower())
+    else:
+      ranger_hbase_plugin_enabled = False
+
+    if ranger_hbase_plugin_enabled and 'ranger-hbase-plugin-properties' in services['configurations'] and 'REPOSITORY_CONFIG_USERNAME' in services['configurations']['ranger-hbase-plugin-properties']['properties']:
+      self.logger.info("Setting Hbase Repo user for Ranger.")
+      putRangerHbasePluginProperty = self.putProperty(configurations, "ranger-hbase-plugin-properties", services)
+      putRangerHbasePluginProperty("REPOSITORY_CONFIG_USERNAME",hbase_user)
+    else:
+      self.logger.info("Not setting Hbase Repo user for Ranger.")
+
+
+class HBASEValidator(service_advisor.ServiceAdvisor):
+  """
+  HBASE Validator checks the correctness of properties whenever the service is first added or the user attempts to
+  change configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(HBASEValidator, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    self.validators = [("hbase-env", self.validateHbaseEnvConfigurationsFromHDP206),
+                       ("hbase-site", self.validateHBASEConfigurationsFromHDP22),
+                       ("hbase-env", self.validateHBASEEnvConfigurationsFromHDP22),
+                       ("ranger-hbase-plugin-properties", self.validateHBASERangerPluginConfigurationsFromHDP22),
+                       ("hbase-site", self.validateHBASEConfigurationsFromHDP23)]
+
+
+  def validateHbaseEnvConfigurationsFromHDP206(self, properties, recommendedDefaults, configurations, services, hosts):
+    hbase_site = self.getSiteProperties(configurations, "hbase-site")
+    validationItems = [ {"config-name": 'hbase_regionserver_heapsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hbase_regionserver_heapsize')},
+                        {"config-name": 'hbase_master_heapsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hbase_master_heapsize')},
+                        {"config-name": "hbase_user", "item": self.validatorEqualsPropertyItem(properties, "hbase_user", hbase_site, "hbase.superuser")} ]
+    return self.toConfigurationValidationProblems(validationItems, "hbase-env")
+
+  def is_number(self, s):
+    try:
+      float(s)
+      return True
+    except ValueError:
+      pass
+
+
+  def validateHBASEConfigurationsFromHDP22(self, properties, recommendedDefaults, configurations, services, hosts):
+    hbase_site = properties
+    validationItems = []
+
+    prop_name1 = 'hbase.regionserver.global.memstore.size'
+    prop_name2 = 'hfile.block.cache.size'
+    props_max_sum = 0.8
+
+    if prop_name1 in hbase_site and not self.is_number(hbase_site[prop_name1]):
+      validationItems.append({"config-name": prop_name1,
+                              "item": self.getWarnItem(
+                                "{0} should be float value".format(prop_name1))})
+    elif prop_name2 in hbase_site and not self.is_number(hbase_site[prop_name2]):
+      validationItems.append({"config-name": prop_name2,
+                              "item": self.getWarnItem(
+                                "{0} should be float value".format(prop_name2))})
+    elif prop_name1 in hbase_site and prop_name2 in hbase_site and \
+                            float(hbase_site[prop_name1]) + float(hbase_site[prop_name2]) > props_max_sum:
+      validationItems.append({"config-name": prop_name1,
+                              "item": self.getWarnItem(
+                                "{0} and {1} sum should not exceed {2}".format(prop_name1, prop_name2, props_max_sum))})
+
+    #Adding Ranger Plugin logic here
+    ranger_plugin_properties = self.getSiteProperties(configurations, "ranger-hbase-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-hbase-plugin-enabled'] if ranger_plugin_properties else 'No'
+    prop_name = 'hbase.security.authorization'
+    prop_val = "true"
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if ("RANGER" in servicesList) and (ranger_plugin_enabled.lower() == 'Yes'.lower()):
+      if hbase_site[prop_name] != prop_val:
+        validationItems.append({"config-name": prop_name,
+                                "item": self.getWarnItem(
+                                  "If Ranger HBase Plugin is enabled." \
+                                  "{0} needs to be set to {1}".format(prop_name,prop_val))})
+      prop_name = "hbase.coprocessor.master.classes"
+      prop_val = "com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor"
+      exclude_val = "org.apache.hadoop.hbase.security.access.AccessController"
+      if (prop_val in hbase_site[prop_name] and exclude_val not in hbase_site[prop_name]):
+        pass
+      else:
+        validationItems.append({"config-name": prop_name,
+                                "item": self.getWarnItem(
+                                  "If Ranger HBase Plugin is enabled." \
+                                  " {0} needs to contain {1} instead of {2}".format(prop_name,prop_val,exclude_val))})
+      prop_name = "hbase.coprocessor.region.classes"
+      prop_val = "com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor"
+      if (prop_val in hbase_site[prop_name] and exclude_val not in hbase_site[prop_name]):
+        pass
+      else:
+        validationItems.append({"config-name": prop_name,
+                                "item": self.getWarnItem(
+                                  "If Ranger HBase Plugin is enabled." \
+                                  " {0} needs to contain {1} instead of {2}".format(prop_name,prop_val,exclude_val))})
+
+    # Validate bucket cache correct config
+    prop_name = "hbase.bucketcache.ioengine"
+    prop_val = "offheap"
+    if prop_name in hbase_site and not (not hbase_site[prop_name] or hbase_site[prop_name] == prop_val):
+      validationItems.append({"config-name": prop_name,
+                              "item": self.getWarnItem(
+                                "Recommended values of " \
+                                " {0} is empty or '{1}'".format(prop_name,prop_val))})
+
+    prop_name1 = "hbase.bucketcache.ioengine"
+    prop_name2 = "hbase.bucketcache.size"
+    prop_name3 = "hbase.bucketcache.percentage.in.combinedcache"
+
+    if prop_name1 in hbase_site and prop_name2 in hbase_site and hbase_site[prop_name1] and not hbase_site[prop_name2]:
+      validationItems.append({"config-name": prop_name2,
+                              "item": self.getWarnItem(
+                                "If bucketcache ioengine is enabled, {0} should be set".format(prop_name2))})
+    if prop_name1 in hbase_site and prop_name3 in hbase_site and hbase_site[prop_name1] and not hbase_site[prop_name3]:
+      validationItems.append({"config-name": prop_name3,
+                              "item": self.getWarnItem(
+                                "If bucketcache ioengine is enabled, {0} should be set".format(prop_name3))})
+
+    # Validate hbase.security.authentication.
+    # Kerberos works only when security enabled.
+    if "hbase.security.authentication" in properties:
+      hbase_security_kerberos = properties["hbase.security.authentication"].lower() == "kerberos"
+      core_site_properties = self.getSiteProperties(configurations, "core-site")
+      security_enabled = False
+      if core_site_properties:
+        security_enabled = core_site_properties['hadoop.security.authentication'] == 'kerberos' and core_site_properties['hadoop.security.authorization'] == 'true'
+      if not security_enabled and hbase_security_kerberos:
+        validationItems.append({"config-name": "hbase.security.authentication",
+                                "item": self.getWarnItem("Cluster must be secured with Kerberos before hbase.security.authentication's value of kerberos will have effect")})
+
+    return self.toConfigurationValidationProblems(validationItems, "hbase-site")
+
+  def validateHBASEEnvConfigurationsFromHDP22(self, properties, recommendedDefaults, configurations, services, hosts):
+    hbase_env = properties
+    validationItems = [ {"config-name": 'hbase_regionserver_heapsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hbase_regionserver_heapsize')},
+                        {"config-name": 'hbase_master_heapsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hbase_master_heapsize')} ]
+    prop_name = "hbase_max_direct_memory_size"
+    hbase_site_properties = self.getSiteProperties(configurations, "hbase-site")
+    prop_name1 = "hbase.bucketcache.ioengine"
+
+    if prop_name1 in hbase_site_properties and prop_name in hbase_env and hbase_site_properties[prop_name1] and hbase_site_properties[prop_name1] == "offheap" and not hbase_env[prop_name]:
+      validationItems.append({"config-name": prop_name,
+                              "item": self.getWarnItem(
+                                "If bucketcache ioengine is enabled, {0} should be set".format(prop_name))})
+
+    return self.toConfigurationValidationProblems(validationItems, "hbase-env")
+
+  def validateHBASERangerPluginConfigurationsFromHDP22(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    ranger_plugin_properties = self.getSiteProperties(configurations, "ranger-hbase-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-hbase-plugin-enabled'] if ranger_plugin_properties else 'No'
+    if 'RANGER' in servicesList and ranger_plugin_enabled.lower() == 'yes':
+      # ranger-hdfs-plugin must be enabled in ranger-env
+      ranger_env = self.getServicesSiteProperties(services, 'ranger-env')
+      if not ranger_env or not 'ranger-hbase-plugin-enabled' in ranger_env or \
+                      ranger_env['ranger-hbase-plugin-enabled'].lower() != 'yes':
+        validationItems.append({"config-name": 'ranger-hbase-plugin-enabled',
+                                "item": self.getWarnItem(
+                                  "ranger-hbase-plugin-properties/ranger-hbase-plugin-enabled must correspond ranger-env/ranger-hbase-plugin-enabled")})
+    return self.toConfigurationValidationProblems(validationItems, "ranger-hbase-plugin-properties")
+
+  def validateHBASEConfigurationsFromHDP23(self, properties, recommendedDefaults, configurations, services, hosts):
+    hbase_site = properties
+    validationItems = []
+
+    #Adding Ranger Plugin logic here
+    ranger_plugin_properties = self.getSiteProperties(configurations, "ranger-hbase-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-hbase-plugin-enabled'] if ranger_plugin_properties else 'No'
+    prop_name = 'hbase.security.authorization'
+    prop_val = "true"
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if ("RANGER" in servicesList) and (ranger_plugin_enabled.lower() == 'Yes'.lower()):
+      if hbase_site[prop_name] != prop_val:
+        validationItems.append({"config-name": prop_name,
+                                "item": self.getWarnItem(
+                                  "If Ranger HBase Plugin is enabled." \
+                                  "{0} needs to be set to {1}".format(prop_name,prop_val))})
+      prop_name = "hbase.coprocessor.master.classes"
+      prop_val = "org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor"
+      exclude_val = "org.apache.hadoop.hbase.security.access.AccessController"
+      if (prop_val in hbase_site[prop_name] and exclude_val not in hbase_site[prop_name]):
+        pass
+      else:
+        validationItems.append({"config-name": prop_name,
+                                "item": self.getWarnItem(
+                                  "If Ranger HBase Plugin is enabled." \
+                                  " {0} needs to contain {1} instead of {2}".format(prop_name,prop_val,exclude_val))})
+      prop_name = "hbase.coprocessor.region.classes"
+      prop_val = "org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor"
+      if (prop_val in hbase_site[prop_name] and exclude_val not in hbase_site[prop_name]):
+        pass
+      else:
+        validationItems.append({"config-name": prop_name,
+                                "item": self.getWarnItem(
+                                  "If Ranger HBase Plugin is enabled." \
+                                  " {0} needs to contain {1} instead of {2}".format(prop_name,prop_val,exclude_val))})
+
+    validationProblems = self.toConfigurationValidationProblems(validationItems, "hbase-site")
+    return validationProblems
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/themes/theme.json b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/themes/theme.json
new file mode 100644
index 0000000..6ed89a1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/themes/theme.json
@@ -0,0 +1,407 @@
+{
+  "name": "default",
+  "description": "Default theme for HBASE service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-columns": "3",
+              "tab-rows": "3",
+              "sections": [
+                {
+                  "name": "section-hbase-memory",
+                  "display-name": "Server",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "2",
+                  "section-columns": "2",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-hbase-memory-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-hbase-memory-col2",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-hbase-client",
+                  "display-name": "Client",
+                  "row-index": "0",
+                  "column-index": "2",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-hbase-client-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-hbase-disk",
+                  "display-name": "Disk",
+                  "row-index": "1",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "3",
+                  "section-columns": "3",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-hbase-disk-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-hbase-disk-col2",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-hbase-disk-col3",
+                      "row-index": "0",
+                      "column-index": "2",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-hbase-timeouts",
+                  "display-name": "Timeouts",
+                  "row-index": "2",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-hbase-timeouts-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-hbase-security",
+                  "display-name": "Security",
+                  "row-index": "2",
+                  "column-index": "1",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-hbase-security-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-hbase-phoenix",
+                  "display-name": "Phoenix SQL",
+                  "row-index": "2",
+                  "column-index": "2",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-hbase-phoenix-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "hbase-env/hbase_master_heapsize",
+          "subsection-name": "subsection-hbase-memory-col1"
+        },
+        {
+          "config": "hbase-env/hbase_regionserver_heapsize",
+          "subsection-name": "subsection-hbase-memory-col1"
+        },
+        {
+          "config": "hbase-site/hfile.block.cache.size",
+          "subsection-name": "subsection-hbase-memory-col1"
+        },
+        {
+          "config": "hbase-site/hbase.hregion.memstore.flush.size",
+          "subsection-name": "subsection-hbase-memory-col2"
+        },
+        {
+          "config": "hbase-site/hbase.hregion.memstore.block.multiplier",
+          "subsection-name": "subsection-hbase-memory-col2"
+        },
+        {
+          "config": "hbase-site/hbase.regionserver.handler.count",
+          "subsection-name": "subsection-hbase-memory-col2"
+        },
+        {
+          "config": "hbase-site/hbase.client.retries.number",
+          "subsection-name": "subsection-hbase-client-col1"
+        },
+        {
+          "config": "hbase-site/hbase.client.keyvalue.maxsize",
+          "subsection-name": "subsection-hbase-client-col1"
+        },
+        {
+          "config": "hbase-site/hbase.hregion.max.filesize",
+          "subsection-name": "subsection-hbase-disk-col1"
+        },
+        {
+          "config": "hbase-site/hbase.hregion.majorcompaction",
+          "subsection-name": "subsection-hbase-disk-col2"
+        },
+        {
+          "config": "hbase-site/hbase.hstore.compaction.max",
+          "subsection-name": "subsection-hbase-disk-col3"
+        },
+        {
+          "config": "hbase-site/zookeeper.session.timeout",
+          "subsection-name": "subsection-hbase-timeouts-col1"
+        },
+        {
+          "config": "hbase-site/hbase.rpc.timeout",
+          "subsection-name": "subsection-hbase-timeouts-col1"
+        },
+        {
+          "config": "hbase-site/hbase.security.authentication",
+          "subsection-name": "subsection-hbase-security-col1"
+        },
+        {
+          "config": "hbase-site/hbase.security.authorization",
+          "subsection-name": "subsection-hbase-security-col1"
+        },
+        {
+          "config": "hbase-env/phoenix_sql_enabled",
+          "subsection-name": "subsection-hbase-phoenix-col1"
+        },
+        {
+          "config": "hbase-site/phoenix.query.timeoutMs",
+          "subsection-name": "subsection-hbase-phoenix-col1"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "hbase-site/hbase.regionserver.global.memstore.size",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "percent"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-env/hbase_master_heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-env/hbase_regionserver_heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hfile.block.cache.size",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "percent"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.hregion.memstore.flush.size",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.hregion.memstore.block.multiplier",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "hbase-site/hbase.regionserver.handler.count",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.client.retries.number",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.client.keyvalue.maxsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.hregion.max.filesize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.hregion.majorcompaction",
+        "widget": {
+          "type": "time-interval-spinner",
+          "units": [
+            {
+              "unit-name": "days,hours"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.hstore.compaction.max",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "hbase-site/zookeeper.session.timeout",
+        "widget": {
+          "type": "time-interval-spinner",
+          "units": [
+            {
+              "unit-name": "minutes,seconds"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.rpc.timeout",
+        "widget": {
+          "type": "time-interval-spinner",
+          "units": [
+            {
+              "unit-name": "minutes,seconds"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.security.authentication",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hbase-site/hbase.security.authorization",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hbase-env/phoenix_sql_enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hbase-site/phoenix.query.timeoutMs",
+        "widget": {
+          "type": "time-interval-spinner",
+          "units": [
+            {
+              "unit-name": "minutes,seconds"
+            }
+          ]
+        }
+      }
+    ]
+  }
+}
+
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/widgets.json b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/widgets.json
new file mode 100644
index 0000000..ae47833
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/widgets.json
@@ -0,0 +1,510 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_hbase_dashboard",
+      "display_name": "Standard HBase Dashboard",
+      "section_name": "HBASE_SUMMARY",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Reads and Writes",
+          "description": "Rate (per second) of read and write requests on all regions in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "regionserver.Server.Get_num_ops._rate",
+              "metric_path": "metrics/hbase/regionserver/Server/Get_num_ops._rate",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.ScanNext_num_ops._rate",
+              "metric_path": "metrics/hbase/regionserver/Server/ScanNext_num_ops._rate",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.Append_num_ops._rate",
+              "metric_path": "metrics/hbase/regionserver/Server/Append_num_ops._rate",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.Delete_num_ops._rate",
+              "metric_path": "metrics/hbase/regionserver/Server/Delete_num_ops._rate",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.Increment_num_ops._rate",
+              "metric_path": "metrics/hbase/regionserver/Server/Increment_num_ops._rate",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.Mutate_num_ops._rate",
+              "metric_path": "metrics/hbase/regionserver/Server/Mutate_num_ops._rate",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Read Requests",
+              "value": "${regionserver.Server.Get_num_ops._rate + regionserver.Server.ScanNext_num_ops._rate}"
+            },
+            {
+              "name": "Write Requests",
+              "value": "${regionserver.Server.Append_num_ops._rate + regionserver.Server.Delete_num_ops._rate + regionserver.Server.Increment_num_ops._rate + regionserver.Server.Mutate_num_ops._rate}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Read Latency",
+          "description": "maximum of 95% read latency.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "regionserver.Server.Get_95th_percentile._max",
+              "metric_path": "metrics/hbase/regionserver/Server/Get_95th_percentile._max",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.ScanNext_95th_percentile._max",
+              "metric_path": "metrics/hbase/regionserver/Server/ScanNext_95th_percentile._max",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Cluster wide maximum of 95% Get Latency",
+              "value": "${regionserver.Server.Get_95th_percentile._max}"
+            },
+            {
+              "name": "Cluster wide maximum of 95% ScanNext Latency",
+              "value": "${regionserver.Server.ScanNext_95th_percentile._max}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Write Latency",
+          "description": "maximum of 95% write latency.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "regionserver.Server.Mutate_95th_percentile._max",
+              "metric_path": "metrics/hbase/regionserver/Server/Mutate_95th_percentile._max",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.Increment_95th_percentile._max",
+              "metric_path": "metrics/hbase/regionserver/Server/Increment_95th_percentile._max",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.Append_95th_percentile._max",
+              "metric_path": "metrics/hbase/regionserver/Server/Append_95th_percentile._max",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.Delete_95th_percentile._max",
+              "metric_path": "metrics/hbase/regionserver/Server/Delete_95th_percentile._max",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Cluster wide maximum of 95% Mutate Latency",
+              "value": "${regionserver.Server.Mutate_95th_percentile._max}"
+            },
+            {
+              "name": "Cluster wide maximum of 95% Increment Latency",
+              "value": "${regionserver.Server.Increment_95th_percentile._max}"
+            },
+            {
+              "name": "Cluster wide maximum of 95% Append Latency",
+              "value": "${regionserver.Server.Append_95th_percentile._max}"
+            },
+            {
+              "name": "Cluster wide maximum of 95% Delete Latency",
+              "value": "${regionserver.Server.Delete_95th_percentile._max}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Open Connections",
+          "description": "Count of open connections across all RegionServer. This is indicative of RegionServer load in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "regionserver.RegionServer.numOpenConnections._sum",
+              "metric_path": "metrics/hbase/ipc/IPC/numOpenConnections._sum",
+              "category": "",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Open Connections",
+              "value": "${regionserver.RegionServer.numOpenConnections._sum}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Request Handlers",
+          "description": "Count of Active handlers vs count of calls waiting in the general queue.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "regionserver.RegionServer.numActiveHandler._sum",
+              "metric_path": "metrics/hbase/ipc/IPC/numActiveHandler._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.RegionServer.numCallsInGeneralQueue._sum",
+              "metric_path": "metrics/hbase/ipc/IPC/numCallsInGeneralQueue._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Active Handlers",
+              "value": "${regionserver.RegionServer.numActiveHandler._sum}"
+            },
+            {
+              "name": "Calls in General Queue",
+              "value": "${regionserver.RegionServer.numCallsInGeneralQueue._sum}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Files Local",
+          "description": "Average percentage of local files to RegionServer in the cluster.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "regionserver.Server.percentFilesLocal",
+              "metric_path": "metrics/hbase/regionserver/Server/percentFilesLocal",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Files Local",
+              "value": "${regionserver.Server.percentFilesLocal}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "error_threshold":"25",
+            "warning_threshold": "75"
+          }
+        },
+        {
+          "widget_name": "Blocked Updates",
+          "description": "Number of milliseconds updates have been blocked so the memstore can be flushed.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "regionserver.Server.updatesBlockedTime._rate",
+              "metric_path": "metrics/hbase/regionserver/Server/updatesBlockedTime._rate",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Updates Blocked Time",
+              "value": "${regionserver.Server.updatesBlockedTime._rate}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster CPU",
+          "description": "Percentage of CPU utilized across all RegionServer hosts.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "cpu_system._sum",
+              "metric_path": "metrics/cpu/cpu_system._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "cpu_user._sum",
+              "metric_path": "metrics/cpu/cpu_user._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "cpu_nice._sum",
+              "metric_path": "metrics/cpu/cpu_nice._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "cpu_idle._sum",
+              "metric_path": "metrics/cpu/cpu_idle._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "cpu_wio._sum",
+              "metric_path": "metrics/cpu/cpu_wio._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "CPU utilization",
+              "value": "${((cpu_system._sum + cpu_user._sum + cpu_nice._sum)/(cpu_system._sum + cpu_user._sum + cpu_nice._sum + cpu_idle._sum + cpu_wio._sum)) * 100}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "%"
+          }
+        },
+        {
+          "widget_name": "Cluster Network",
+          "description": "Average of Network IO utilized across all RegionServer hosts.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "pkts_in._avg",
+              "metric_path": "metrics/network/pkts_in._avg",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "pkts_out._avg",
+              "metric_path": "metrics/network/pkts_out._avg",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Packets In",
+              "value": "${pkts_in._avg}"
+            },
+            {
+              "name": "Packets Out",
+              "value": "${pkts_out._avg}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster Disk",
+          "description": "Sum of disk throughput for all RegionServer hosts.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "read_bps._sum",
+              "metric_path": "metrics/disk/read_bps._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "write_bps._sum",
+              "metric_path": "metrics/disk/write_bps._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Read throughput",
+              "value": "${read_bps._sum/1048576}"
+            },
+            {
+              "name": "Write throughput",
+              "value": "${write_bps._sum/1048576}"
+            }
+          ],
+          "properties": {
+            "display_unit": "Mbps",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        }
+      ]
+    },
+    {
+      "layout_name": "default_hbase_heatmap",
+      "display_name": "HBase Heatmaps",
+      "section_name": "HBASE_HEATMAPS",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "HBase Compaction Queue Size",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Hadoop:service=HBase,name=RegionServer,sub=Server.compactionQueueLength",
+              "metric_path": "metrics/hbase/regionserver/compactionQueueSize",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "HBase Compaction Queue Size",
+              "value": "${Hadoop:service=HBase,name=RegionServer,sub=Server.compactionQueueLength} "
+            }
+          ],
+          "properties": {
+            "display_unit": "",
+            "max_limit": "10"
+          }
+        },
+        {
+          "widget_name": "HBase Memstore Sizes",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=HBase,name=RegionServer,sub=Server.memStoreSize",
+              "metric_path": "metrics/hbase/regionserver/memstoreSize",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "HBase Memstore Sizes",
+              "value": "${Hadoop:service=HBase,name=RegionServer,sub=Server.memStoreSize}"
+            }
+          ],
+          "properties": {
+            "display_unit": "B",
+            "max_limit": "104857600"
+          }
+        },
+        {
+          "widget_name": "HBase Read Request Count",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=HBase,name=RegionServer,sub=Server.readRequestCount",
+              "metric_path": "metrics/hbase/regionserver/readRequestsCount",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "HBase Read Request Count",
+              "value": "${Hadoop:service=HBase,name=RegionServer,sub=Server.readRequestCount}"
+            }
+          ],
+          "properties": {
+            "max_limit": "200"
+          }
+        },
+        {
+          "widget_name": "HBase Write Request Count",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=HBase,name=RegionServer,sub=Server.writeRequestCount",
+              "metric_path": "metrics/hbase/regionserver/writeRequestsCount",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "HBase Write Request Count",
+              "value": "${Hadoop:service=HBase,name=RegionServer,sub=Server.writeRequestCount}"
+            }
+          ],
+          "properties": {
+            "max_limit": "200"
+          }
+        },
+        {
+          "widget_name": "HBase Regions",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=HBase,name=RegionServer,sub=Server.regionCount",
+              "metric_path": "metrics/hbase/regionserver/regions",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "HBase Regions",
+              "value": "${Hadoop:service=HBase,name=RegionServer,sub=Server.regionCount}"
+            }
+          ],
+          "properties": {
+            "max_limit": "10"
+          }
+        }
+      ]
+    }
+  ]
+}
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-logsearch-conf.xml
deleted file mode 100644
index d85a028..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-logsearch-conf.xml
+++ /dev/null
@@ -1,248 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>HDFS</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>NAMENODE:hdfs_namenode;DATANODE:hdfs_datanode;SECONDARY_NAMENODE:hdfs_secondarynamenode;JOURNALNODE:hdfs_journalnode;ZKFC:hdfs_zkfc;NFS_GATEWAY:hdfs_nfs3</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder Config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-  "input":[
-    {
-      "type":"hdfs_datanode",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-datanode-*.log"
-    },
-    {
-      "type":"hdfs_namenode",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-namenode-*.log"
-    },
-    {
-      "type":"hdfs_journalnode",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-journalnode-*.log"
-    },
-    {
-      "type":"hdfs_secondarynamenode",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-secondarynamenode-*.log"
-    },
-    {
-      "type":"hdfs_zkfc",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-zkfc-*.log"
-    },
-    {
-      "type":"hdfs_nfs3",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-nfs3-*.log"
-    },
-    {
-      "type":"hdfs_audit",
-      "rowtype":"audit",
-      "is_enabled":"true",
-      "add_fields":{
-        "logType":"HDFSAudit",
-        "enforcer":"hadoop-acl",
-        "repoType":"1",
-        "repo":"hdfs"
-      },
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hdfs-audit.log"
-    }
-   ],
-  "filter":[
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "hdfs_datanode",
-            "hdfs_journalnode",
-            "hdfs_secondarynamenode",
-            "hdfs_namenode",
-            "hdfs_zkfc",
-            "hdfs_nfs3"
-          ]
-         }
-       },
-      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-        }
-       }
-     },
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "hdfs_audit"
-          ]
-         }
-       },
-      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:evtTime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:evtTime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "evtTime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       }
-     },
-    {
-      "filter":"keyvalue",
-      "sort_order":1,
-      "conditions":{
-        "fields":{
-          "type":[
-            "hdfs_audit"
-          ]
-         }
-       },
-      "source_field":"log_message",
-      "value_split":"=",
-      "field_split":"\t",
-      "post_map_values":{
-        "src":{
-          "map_fieldname":{
-            "new_fieldname":"resource"
-          }
-         },
-        "ip":{
-          "map_fieldname":{
-            "new_fieldname":"cliIP"
-          }
-         },
-        "allowed":[
-          {
-            "map_fieldvalue":{
-              "pre_value":"true",
-              "post_value":"1"
-            }
-           },
-          {
-            "map_fieldvalue":{
-              "pre_value":"false",
-              "post_value":"0"
-            }
-           },
-          {
-            "map_fieldname":{
-              "new_fieldname":"result"
-            }
-           }
-         ],
-        "cmd":{
-          "map_fieldname":{
-            "new_fieldname":"action"
-          }
-         },
-        "proto":{
-          "map_fieldname":{
-            "new_fieldname":"cliType"
-          }
-         },
-        "callerContext":{
-          "map_fieldname":{
-            "new_fieldname":"req_caller_id"
-          }
-         }
-       }
-     },
-    {
-      "filter":"grok",
-      "sort_order":2,
-      "source_field":"ugi",
-      "remove_source_field":"false",
-      "conditions":{
-        "fields":{
-          "type":[
-            "hdfs_audit"
-          ]
-         }
-       },
-      "message_pattern":"%{USERNAME:p_user}.+auth:%{USERNAME:p_authType}.+via %{USERNAME:k_user}.+auth:%{USERNAME:k_authType}|%{USERNAME:user}.+auth:%{USERNAME:authType}|%{USERNAME:x_user}",
-      "post_map_values":{
-        "user":{
-          "map_fieldname":{
-            "new_fieldname":"reqUser"
-          }
-         },
-        "x_user":{
-          "map_fieldname":{
-            "new_fieldname":"reqUser"
-          }
-         },
-        "p_user":{
-          "map_fieldname":{
-            "new_fieldname":"reqUser"
-          }
-         },
-        "k_user":{
-          "map_fieldname":{
-            "new_fieldname":"proxyUsers"
-          }
-         },
-        "p_authType":{
-          "map_fieldname":{
-            "new_fieldname":"authType"
-          }
-         },
-        "k_authType":{
-          "map_fieldname":{
-            "new_fieldname":"proxyAuthType"
-          }
-         }
-       }
-     }
-   ]
- }
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml
index 3026938..0769ae8 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml
@@ -35,7 +35,7 @@
           <dependencies>
             <dependency>
               <name>HDFS/ZKFC</name>
-              <scope>host</scope>
+              <scope>cluster</scope>
               <auto-deploy>
                 <enabled>false</enabled>
               </auto-deploy>
@@ -48,7 +48,7 @@
             </dependency>
             <dependency>
               <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>host</scope>
+              <scope>cluster</scope>
               <auto-deploy>
                 <enabled>false</enabled>
               </auto-deploy>
@@ -61,7 +61,7 @@
             </dependency>
             <dependency>
               <name>HDFS/JOURNALNODE</name>
-              <scope>host</scope>
+              <scope>cluster</scope>
               <auto-deploy>
                 <enabled>false</enabled>
               </auto-deploy>
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
index bc2102a..7f64c80 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
@@ -34,7 +34,7 @@
 from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
 from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
 from ambari_commons.ambari_metrics_helper import select_metric_collector_for_sink
-
+from ambari_agent.AmbariConfig import AmbariConfig
 
 RESULT_STATE_OK = 'OK'
 RESULT_STATE_CRITICAL = 'CRITICAL'
@@ -320,7 +320,13 @@
   metric_collector_https_enabled = str(configurations[AMS_HTTP_POLICY]) == "HTTPS_ONLY"
 
   try:
-    conn = network.get_http_connection(collector_host, int(collector_port), metric_collector_https_enabled, ca_certs)
+    conn = network.get_http_connection(
+      collector_host,
+      int(collector_port),
+      metric_collector_https_enabled,
+      ca_certs,
+      ssl_version=AmbariConfig.get_resolved_config().get_force_https_protocol_value()
+    )
     conn.request("GET", AMS_METRICS_GET_URL % encoded_get_metrics_parameters)
     response = conn.getresponse()
     data = response.read()
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
index 924eea4..da03cce 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
@@ -21,10 +21,13 @@
 from ambari_commons.constants import UPGRADE_TYPE_ROLLING
 
 from hdfs_datanode import datanode
+from resource_management import Script, Fail, shell, Logger
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions import conf_select, stack_select
-from resource_management.libraries.functions.constants import StackFeature
 from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.decorator import retry
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, FILE_TYPE_XML
 from resource_management.core.logger import Logger
@@ -32,6 +35,7 @@
 from ambari_commons.os_family_impl import OsFamilyImpl
 from ambari_commons import OSConst
 from utils import get_hdfs_binary
+from utils import get_dfsadmin_base_command
 
 class DataNode(Script):
 
@@ -75,12 +79,51 @@
         datanode(action="stop")
     else:
       datanode(action="stop")
+    # verify that the datanode is down
+    self.check_datanode_shutdown(hdfs_binary)
 
   def status(self, env):
     import status_params
     env.set_params(status_params)
     datanode(action = "status")
 
+  @retry(times=24, sleep_time=5, err_class=Fail)
+  def check_datanode_shutdown(self, hdfs_binary):
+    """
+    Checks that a DataNode is down by running "hdfs dfsamin getDatanodeInfo"
+    several times, pausing in between runs. Once the DataNode stops responding
+    this method will return, otherwise it will raise a Fail(...) and retry
+    automatically.
+    The stack defaults for retrying for HDFS are also way too slow for this
+    command; they are set to wait about 45 seconds between client retries. As
+    a result, a single execution of dfsadmin will take 45 seconds to retry and
+    the DataNode may be marked as dead, causing problems with HBase.
+    https://issues.apache.org/jira/browse/HDFS-8510 tracks reducing the
+    times for ipc.client.connect.retry.interval. In the meantime, override them
+    here, but only for RU.
+    :param hdfs_binary: name/path of the HDFS binary to use
+    :return:
+    """
+    import params
+
+    # override stock retry timeouts since after 30 seconds, the datanode is
+    # marked as dead and can affect HBase during RU
+    dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
+    command = format('{dfsadmin_base_command} -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo {dfs_dn_ipc_address}')
+
+    is_datanode_deregistered = False
+    try:
+      shell.checked_call(command, user=params.hdfs_user, tries=1)
+    except:
+      is_datanode_deregistered = True
+
+    if not is_datanode_deregistered:
+      Logger.info("DataNode has not yet deregistered from the NameNode...")
+      raise Fail('DataNode has not yet deregistered from the NameNode...')
+
+    Logger.info("DataNode has successfully shutdown.")
+    return True
+
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class DataNodeDefault(DataNode):
@@ -100,64 +143,6 @@
     hdfs_binary = self.get_hdfs_binary()
     # ensure the DataNode has started and rejoined the cluster
     datanode_upgrade.post_upgrade_check(hdfs_binary)
-
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    props_value_check = None
-    props_empty_check = ['dfs.datanode.keytab.file',
-                         'dfs.datanode.kerberos.principal']
-    props_read_check = ['dfs.datanode.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-    hdfs_expectations.update(hdfs_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': FILE_TYPE_XML,
-                                                  'hdfs-site.xml': FILE_TYPE_XML})
-
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ('hdfs-site' not in security_params or
-                  'dfs.datanode.keytab.file' not in security_params['hdfs-site'] or
-                  'dfs.datanode.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                security_params['hdfs-site']['dfs.datanode.keytab.file'],
-                                security_params['hdfs-site']['dfs.datanode.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
       
   def get_log_folder(self):
     import params
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode_upgrade.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode_upgrade.py
index b55237d..c1b0296 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode_upgrade.py
@@ -48,10 +48,7 @@
   command = format('{dfsadmin_base_command} -shutdownDatanode {dfs_dn_ipc_address} upgrade')
 
   code, output = shell.call(command, user=params.hdfs_user)
-  if code == 0:
-    # verify that the datanode is down
-    _check_datanode_shutdown(hdfs_binary)
-  else:
+  if code != 0:
     # Due to bug HDFS-7533, DataNode may not always shutdown during stack upgrade, and it is necessary to kill it.
     if output is not None and re.search("Shutdown already in progress", output):
       Logger.error("Due to a known issue in DataNode, the command {0} did not work, so will need to shutdown the datanode forcefully.".format(command))
@@ -84,39 +81,6 @@
   except ComponentIsNotRunning:
     return False
 
-@retry(times=24, sleep_time=5, err_class=Fail)
-def _check_datanode_shutdown(hdfs_binary):
-  """
-  Checks that a DataNode is down by running "hdfs dfsamin getDatanodeInfo"
-  several times, pausing in between runs. Once the DataNode stops responding
-  this method will return, otherwise it will raise a Fail(...) and retry
-  automatically.
-  The stack defaults for retrying for HDFS are also way too slow for this
-  command; they are set to wait about 45 seconds between client retries. As
-  a result, a single execution of dfsadmin will take 45 seconds to retry and
-  the DataNode may be marked as dead, causing problems with HBase.
-  https://issues.apache.org/jira/browse/HDFS-8510 tracks reducing the
-  times for ipc.client.connect.retry.interval. In the meantime, override them
-  here, but only for RU.
-  :param hdfs_binary: name/path of the HDFS binary to use
-  :return:
-  """
-  import params
-
-  # override stock retry timeouts since after 30 seconds, the datanode is
-  # marked as dead and can affect HBase during RU
-  dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
-  command = format('{dfsadmin_base_command} -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo {dfs_dn_ipc_address}')
-
-  try:
-    Execute(command, user=params.hdfs_user, tries=1)
-  except:
-    Logger.info("DataNode has successfully shutdown for upgrade.")
-    return
-
-  Logger.info("DataNode has not shutdown.")
-  raise Fail('DataNode has not shutdown.')
-
 
 @retry(times=30, sleep_time=30, err_class=Fail) # keep trying for 15 mins
 def _check_datanode_startup(hdfs_binary):
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
index 4dabdbc..51acc9e 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
@@ -66,51 +66,6 @@
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-client", params.version)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    hdfs_expectations ={}
-    hdfs_expectations.update(core_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'core-site.xml': FILE_TYPE_XML})
-
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues: # If all validations passed successfully
-        if status_params.hdfs_user_principal or status_params.hdfs_user_keytab:
-          try:
-            cached_kinit_executor(status_params.kinit_path_local,
-                       status_params.hdfs_user,
-                       status_params.hdfs_user_keytab,
-                       status_params.hdfs_user_principal,
-                       status_params.hostname,
-                       status_params.tmp_dir)
-            self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-          except Exception as e:
-            self.put_structured_out({"securityState": "ERROR"})
-            self.put_structured_out({"securityStateErrorInfo": str(e)})
-        else:
-          self.put_structured_out({"securityIssuesFound": "hdfs principal and/or keytab file is not specified"})
-          self.put_structured_out({"securityState": "UNSECURED"})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
 @OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
 class HdfsClientWindows(HdfsClient):
   def install(self, env):
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
index 0489792..aa34dc0 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
@@ -533,7 +533,16 @@
   return marked
 
 
-@retry(times=125, sleep_time=5, backoff_factor=2, err_class=Fail)
+def find_timeout():
+  import params
+
+  if isinstance(params.command_timeout, (int, long)):
+    return params.command_timeout
+
+  return int(params.command_timeout)
+
+
+@retry(sleep_time=5, backoff_factor=2, err_class=Fail, timeout_func=find_timeout)
 def is_this_namenode_active():
   """
   Gets whether the current NameNode is Active. This function will wait until the NameNode is
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
index 9448fa6..7fd8d70 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
@@ -105,63 +105,6 @@
     env.set_params(status_params)
     check_process_status(status_params.journalnode_pid_file)
 
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    props_value_check = None
-    props_empty_check = ['dfs.journalnode.keytab.file',
-                         'dfs.journalnode.kerberos.principal']
-    props_read_check = ['dfs.journalnode.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(hdfs_site_expectations)
-    hdfs_expectations.update(core_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': FILE_TYPE_XML})
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ('hdfs-site' not in security_params or
-                  'dfs.journalnode.kerberos.keytab.file' not in security_params['hdfs-site'] or
-                  'dfs.journalnode.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                security_params['hdfs-site']['dfs.journalnode.kerberos.keytab.file'],
-                                security_params['hdfs-site']['dfs.journalnode.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-      
   def get_log_folder(self):
     import params
     return params.hdfs_log_dir
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
index 1347f37..65cd378 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
@@ -216,63 +216,6 @@
             try_sleep=10
     )
 
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    props_value_check = None
-    props_empty_check = ['dfs.namenode.kerberos.internal.spnego.principal',
-                         'dfs.namenode.keytab.file',
-                         'dfs.namenode.kerberos.principal']
-    props_read_check = ['dfs.namenode.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-    hdfs_expectations.update(hdfs_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': FILE_TYPE_XML,
-                                                  'hdfs-site.xml': FILE_TYPE_XML})
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'hdfs-site' not in security_params
-               or 'dfs.namenode.keytab.file' not in security_params['hdfs-site']
-               or 'dfs.namenode.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                security_params['hdfs-site']['dfs.namenode.keytab.file'],
-                                security_params['hdfs-site']['dfs.namenode.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def rebalancehdfs(self, env):
     import params
     env.set_params(params)
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
index 7ba1f96..fa451f4 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
@@ -76,64 +76,6 @@
     env.set_params(status_params)
 
     check_process_status(status_params.nfsgateway_pid_file)
-
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    props_value_check = None
-    props_empty_check = ['nfs.keytab.file',
-                         'nfs.kerberos.principal']
-    props_read_check = ['nfs.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-    hdfs_expectations.update(hdfs_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': FILE_TYPE_XML,
-                                                  'hdfs-site.xml': FILE_TYPE_XML})
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ('hdfs-site' not in security_params or
-                'nfs.keytab.file' not in security_params['hdfs-site'] or
-                'nfs.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                security_params['hdfs-site']['nfs.keytab.file'],
-                                security_params['hdfs-site'][
-                                  'nfs.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
       
   def get_log_folder(self):
     import params
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
index 7f282b3..838510c 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
@@ -26,4 +26,4 @@
 
 nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize']
 retryAble = default("/commandParams/command_retry_enabled", False)
-script_https_protocol = Script.get_force_https_protocol()
\ No newline at end of file
+script_https_protocol = Script.get_force_https_protocol_name()
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index f0566d7..e88dbdd 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -70,6 +70,8 @@
 # are started using different commands.
 desired_namenode_role = default("/commandParams/desired_namenode_role", None)
 
+command_timeout = default("/commandParams/command_timeout", 900)
+
 # get the correct version to use for checking stack features
 version_for_stack_feature_checks = get_stack_feature_version(config)
 
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
index 0f1f438..1408468 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
@@ -74,66 +74,6 @@
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-hdfs-secondarynamenode", params.version)
-
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    props_value_check = None
-    props_empty_check = ['dfs.secondary.namenode.kerberos.internal.spnego.principal',
-                         'dfs.secondary.namenode.keytab.file',
-                         'dfs.secondary.namenode.kerberos.principal']
-    props_read_check = ['dfs.secondary.namenode.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-    hdfs_expectations.update(hdfs_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': FILE_TYPE_XML,
-                                                  'hdfs-site.xml': FILE_TYPE_XML})
-
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ('hdfs-site' not in security_params or
-                  'dfs.secondary.namenode.keytab.file' not in security_params['hdfs-site'] or
-                  'dfs.secondary.namenode.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                security_params['hdfs-site']['dfs.secondary.namenode.keytab.file'],
-                                security_params['hdfs-site'][
-                                  'dfs.secondary.namenode.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
       
   def get_log_folder(self):
     import params
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
index d8d0515..d861ba9 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
@@ -38,7 +38,10 @@
 from ambari_commons.inet_utils import ensure_ssl_using_protocol
 from zkfc_slave import ZkfcSlaveDefault
 
-ensure_ssl_using_protocol(Script.get_force_https_protocol())
+ensure_ssl_using_protocol(
+  Script.get_force_https_protocol_name(),
+  Script.get_ca_cert_file_path()
+)
 
 def safe_zkfc_op(action, env):
   """
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
index be0d2ed..ca5f605 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
@@ -119,49 +119,6 @@
     env.set_params(status_params)
     check_process_status(status_params.zkfc_pid_file)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'core-site.xml': FILE_TYPE_XML})
-    result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      if not result_issues:  # If all validations passed successfully
-        if status_params.hdfs_user_principal or status_params.hdfs_user_keytab:
-          try:
-            cached_kinit_executor(status_params.kinit_path_local,
-                                  status_params.hdfs_user,
-                                  status_params.hdfs_user_keytab,
-                                  status_params.hdfs_user_principal,
-                                  status_params.hostname,
-                                  status_params.tmp_dir)
-            self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-          except Exception as e:
-            self.put_structured_out({"securityState": "ERROR"})
-            self.put_structured_out({"securityStateErrorInfo": str(e)})
-        else:
-          self.put_structured_out(
-            {"securityIssuesFound": "hdfs principal and/or keytab file is not specified"})
-          self.put_structured_out({"securityState": "UNSECURED"})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def disable_security(self, env):
     import params
 
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/input.config-hdfs.json.j2 b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/input.config-hdfs.json.j2
new file mode 100644
index 0000000..ed7abf6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/input.config-hdfs.json.j2
@@ -0,0 +1,216 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"hdfs_datanode",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-datanode-*.log"
+    },
+    {
+      "type":"hdfs_namenode",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-namenode-*.log"
+    },
+    {
+      "type":"hdfs_journalnode",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-journalnode-*.log"
+    },
+    {
+      "type":"hdfs_secondarynamenode",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-secondarynamenode-*.log"
+    },
+    {
+      "type":"hdfs_zkfc",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-zkfc-*.log"
+    },
+    {
+      "type":"hdfs_nfs3",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-nfs3-*.log"
+    },
+    {
+      "type":"hdfs_audit",
+      "rowtype":"audit",
+      "is_enabled":"true",
+      "add_fields":{
+        "logType":"HDFSAudit",
+        "enforcer":"hadoop-acl",
+        "repoType":"1",
+        "repo":"hdfs"
+      },
+      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hdfs-audit.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "hdfs_datanode",
+            "hdfs_journalnode",
+            "hdfs_secondarynamenode",
+            "hdfs_namenode",
+            "hdfs_zkfc",
+            "hdfs_nfs3"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    },
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "hdfs_audit"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:evtTime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:evtTime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "evtTime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    },
+    {
+      "filter":"keyvalue",
+      "sort_order":1,
+      "conditions":{
+        "fields":{
+          "type":[
+            "hdfs_audit"
+          ]
+        }
+      },
+      "source_field":"log_message",
+      "value_split":"=",
+      "field_split":"\t",
+      "post_map_values":{
+        "src":{
+          "map_fieldname":{
+            "new_fieldname":"resource"
+          }
+        },
+        "ip":{
+          "map_fieldname":{
+            "new_fieldname":"cliIP"
+          }
+        },
+        "allowed":[
+          {
+            "map_fieldvalue":{
+              "pre_value":"true",
+              "post_value":"1"
+            }
+          },
+          {
+            "map_fieldvalue":{
+              "pre_value":"false",
+              "post_value":"0"
+            }
+          },
+          {
+            "map_fieldname":{
+              "new_fieldname":"result"
+            }
+          }
+        ],
+        "cmd":{
+          "map_fieldname":{
+            "new_fieldname":"action"
+          }
+        },
+        "proto":{
+          "map_fieldname":{
+            "new_fieldname":"cliType"
+          }
+        },
+        "callerContext":{
+          "map_fieldname":{
+            "new_fieldname":"req_caller_id"
+          }
+        }
+      }
+    },
+    {
+      "filter":"grok",
+      "sort_order":2,
+      "source_field":"ugi",
+      "remove_source_field":"false",
+      "conditions":{
+        "fields":{
+          "type":[
+            "hdfs_audit"
+          ]
+        }
+      },
+      "message_pattern":"%{USERNAME:p_user}.+auth:%{USERNAME:p_authType}.+via %{USERNAME:k_user}.+auth:%{USERNAME:k_authType}|%{USERNAME:user}.+auth:%{USERNAME:authType}|%{USERNAME:x_user}",
+      "post_map_values":{
+        "user":{
+          "map_fieldname":{
+            "new_fieldname":"reqUser"
+          }
+        },
+        "x_user":{
+          "map_fieldname":{
+            "new_fieldname":"reqUser"
+          }
+        },
+        "p_user":{
+          "map_fieldname":{
+            "new_fieldname":"reqUser"
+          }
+        },
+        "k_user":{
+          "map_fieldname":{
+            "new_fieldname":"proxyUsers"
+          }
+        },
+        "p_authType":{
+          "map_fieldname":{
+            "new_fieldname":"authType"
+          }
+        },
+        "k_authType":{
+          "map_fieldname":{
+            "new_fieldname":"proxyAuthType"
+          }
+        }
+      }
+    }
+  ]
+}
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-env.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-env.xml
index e447c52..e292e6e 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-env.xml
@@ -269,143 +269,143 @@
     <display-name>hadoop-env template</display-name>
     <description>This is the jinja template for hadoop-env.sh file</description>
     <value>
-      # Set Hadoop-specific environment variables here.
+# Set Hadoop-specific environment variables here.
 
-      # The only required environment variable is JAVA_HOME.  All others are
-      # optional.  When running a distributed configuration it is best to
-      # set JAVA_HOME in this file, so that it is correctly defined on
-      # remote nodes.
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
 
-      # The java implementation to use.  Required.
-      export JAVA_HOME={{java_home}}
-      export HADOOP_HOME_WARN_SUPPRESS=1
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
 
-      # Hadoop home directory
-      export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
 
-      # Hadoop Configuration Directory
-      #TODO: if env var set that can cause problems
-      export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
 
 
-      # Path to jsvc required by secure datanode
-      export JSVC_HOME={{jsvc_path}}
+# Path to jsvc required by secure datanode
+export JSVC_HOME={{jsvc_path}}
 
 
-      # The maximum amount of heap to use, in MB. Default is 1000.
-      if [[ ("$SERVICE" = "hiveserver2") || ("$SERVICE" = "metastore") || ( "$SERVICE" = "cli") ]]; then
-      if [ "$HADOOP_HEAPSIZE" = "" ]; then
-      export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
-      fi
-      else
-      export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
-      fi
+# The maximum amount of heap to use, in MB. Default is 1000.
+if [[ ("$SERVICE" = "hiveserver2") || ("$SERVICE" = "metastore") || ( "$SERVICE" = "cli") ]]; then
+if [ "$HADOOP_HEAPSIZE" = "" ]; then
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+fi
+else
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+fi
 
 
-      export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
 
-      # Extra Java runtime options.  Empty by default.
-      export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
 
-      # Command specific options appended to HADOOP_OPTS when specified
+# Command specific options appended to HADOOP_OPTS when specified
 
-      {% if java_version &lt; 8 %}
-      export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dorg.mortbay.jetty.Request.maxFormContentSize=-1  ${HADOOP_NAMENODE_OPTS}"
-      export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+{% if java_version &lt; 8 %}
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dorg.mortbay.jetty.Request.maxFormContentSize=-1  ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
 
-      # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-      export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
-      {% else %}
-      export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
-      export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
+{% else %}
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
 
-      # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-      export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-      {% endif %}
-      HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+{% endif %}
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
 
-      HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
-      HADOOP_DATANODE_OPTS="-XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:ConcGCThreads=4 -XX:+UseConcMarkSweepGC -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
-      HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:ConcGCThreads=4 -XX:+UseConcMarkSweepGC -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
 
-      # On secure datanodes, user to run the datanode as after dropping privileges
-      export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
 
-      # Extra ssh options.  Empty by default.
-      export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
 
-      # Where log files are stored.  $HADOOP_HOME/logs by default.
-      export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
 
-      # History server logs
-      export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
 
-      # Where log files are stored in the secure data environment.
-      export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
 
-      # File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
-      # export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
 
-      # host:path where hadoop code should be rsync'd from.  Unset by default.
-      # export HADOOP_MASTER=master:/home/$USER/src/hadoop
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
 
-      # Seconds to sleep between slave commands.  Unset by default.  This
-      # can be useful in large clusters, where, e.g., slave rsyncs can
-      # otherwise arrive faster than the master can service them.
-      # export HADOOP_SLAVE_SLEEP=0.1
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
 
-      # The directory where pid files are stored. /tmp by default.
-      export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
-      export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
 
-      # History server pid
-      export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
 
-      YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY -Drm.audit.logger=INFO,RMAUDIT"
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY -Drm.audit.logger=INFO,RMAUDIT"
 
-      # A string representing this instance of hadoop. $USER by default.
-      export HADOOP_IDENT_STRING=$USER
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
 
-      # The scheduling priority for daemon processes.  See 'man nice'.
+# The scheduling priority for daemon processes.  See 'man nice'.
 
-      # export HADOOP_NICENESS=10
+# export HADOOP_NICENESS=10
 
-      # Add database libraries
-      JAVA_JDBC_LIBS=""
-      if [ -d "/usr/share/java" ]; then
-      for jarFile in `ls /usr/share/java | grep -E "(mysql|ojdbc|postgresql|sqljdbc)" 2&gt;/dev/null`
-      do
-      JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-      done
-      fi
+# Add database libraries
+JAVA_JDBC_LIBS=""
+if [ -d "/usr/share/java" ]; then
+for jarFile in `ls /usr/share/java | grep -E "(mysql|ojdbc|postgresql|sqljdbc)" 2&gt;/dev/null`
+do
+JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+fi
 
-      # Add libraries required by nodemanager
-      MAPREDUCE_LIBS={{mapreduce_libs_path}}
+# Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
 
-      # Add libraries to the hadoop classpath - some may not need a colon as they already include it
-      export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+# Add libraries to the hadoop classpath - some may not need a colon as they already include it
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
 
-      if [ -d "/usr/lib/tez" ]; then
-      export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
-      fi
+if [ -d "/usr/lib/tez" ]; then
+export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+fi
 
-      # Setting path to hdfs command line
-      export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
 
-      #Mostly required for hadoop 2.0
-      export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-{{architecture}}-64
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-{{architecture}}-64
 
-      {% if is_datanode_max_locked_memory_set %}
-      # Fix temporary bug, when ulimit from conf files is not picked up, without full relogin.
-      # Makes sense to fix only when runing DN as root
-      if [ "$command" == "datanode" ] &amp;&amp; [ "$EUID" -eq 0 ] &amp;&amp; [ -n "$HADOOP_SECURE_DN_USER" ]; then
-      ulimit -l {{datanode_max_locked_memory}}
-      fi
-      {% endif %}
-      # Enable ACLs on zookeper znodes if required
-      {% if hadoop_zkfc_opts is defined %}
-      export HADOOP_ZKFC_OPTS="{{hadoop_zkfc_opts}} $HADOOP_ZKFC_OPTS"
-      {% endif %}
+{% if is_datanode_max_locked_memory_set %}
+# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin.
+# Makes sense to fix only when runing DN as root
+if [ "$command" == "datanode" ] &amp;&amp; [ "$EUID" -eq 0 ] &amp;&amp; [ -n "$HADOOP_SECURE_DN_USER" ]; then
+ulimit -l {{datanode_max_locked_memory}}
+fi
+{% endif %}
+# Enable ACLs on zookeper znodes if required
+{% if hadoop_zkfc_opts is defined %}
+export HADOOP_ZKFC_OPTS="{{hadoop_zkfc_opts}} $HADOOP_ZKFC_OPTS"
+{% endif %}
     </value>
     <value-attributes>
       <type>content</type>
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml
index 4aadb83..84ea231 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml
@@ -86,6 +86,8 @@
 *.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}
 *.sink.timeline.protocol={{metric_collector_protocol}}
 *.sink.timeline.port={{metric_collector_port}}
+*.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+*.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 # HTTPS properties
 *.sink.timeline.truststore.path = {{metric_truststore_path}}
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml
index 5f6ec3f..f529494 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml
@@ -26,197 +26,197 @@
     <display-name>hdfs-log4j template</display-name>
     <description>Custom log4j.properties</description>
     <value>
-      #
-      # Licensed to the Apache Software Foundation (ASF) under one
-      # or more contributor license agreements.  See the NOTICE file
-      # distributed with this work for additional information
-      # regarding copyright ownership.  The ASF licenses this file
-      # to you under the Apache License, Version 2.0 (the
-      # "License"); you may not use this file except in compliance
-      # with the License.  You may obtain a copy of the License at
-      #
-      #  http://www.apache.org/licenses/LICENSE-2.0
-      #
-      # Unless required by applicable law or agreed to in writing,
-      # software distributed under the License is distributed on an
-      # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-      # KIND, either express or implied.  See the License for the
-      # specific language governing permissions and limitations
-      # under the License.
-      #
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
 
 
-      # Define some default values that can be overridden by system properties
-      # To change daemon root logger use hadoop_root_logger in hadoop-env
-      hadoop.root.logger=INFO,console
-      hadoop.log.dir=.
-      hadoop.log.file=hadoop.log
+# Define some default values that can be overridden by system properties
+# To change daemon root logger use hadoop_root_logger in hadoop-env
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
 
 
-      # Define the root logger to the system property "hadoop.root.logger".
-      log4j.rootLogger=${hadoop.root.logger}, EventCounter
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
 
-      # Logging Threshold
-      log4j.threshhold=ALL
+# Logging Threshold
+log4j.threshhold=ALL
 
-      #
-      # Daily Rolling File Appender
-      #
+#
+# Daily Rolling File Appender
+#
 
-      log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-      log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
 
-      # Rollver at midnight
-      log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
 
-      # 30-day backup
-      #log4j.appender.DRFA.MaxBackupIndex=30
-      log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
 
-      # Pattern format: Date LogLevel LoggerName LogMessage
-      log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-      # Debugging Pattern format
-      #log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
 
 
-      #
-      # console
-      # Add "console" to rootlogger above if you want to use this
-      #
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
 
-      log4j.appender.console=org.apache.log4j.ConsoleAppender
-      log4j.appender.console.target=System.err
-      log4j.appender.console.layout=org.apache.log4j.PatternLayout
-      log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
 
-      #
-      # TaskLog Appender
-      #
+#
+# TaskLog Appender
+#
 
-      #Default values
-      hadoop.tasklog.taskid=null
-      hadoop.tasklog.iscleanup=false
-      hadoop.tasklog.noKeepSplits=4
-      hadoop.tasklog.totalLogFileSize=100
-      hadoop.tasklog.purgeLogSplits=true
-      hadoop.tasklog.logsRetainHours=12
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
 
-      log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-      log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-      log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-      log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
 
-      log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-      log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
 
-      #
-      #Security audit appender
-      #
-      hadoop.security.logger=INFO,console
-      hadoop.security.log.maxfilesize=256MB
-      hadoop.security.log.maxbackupindex=20
-      log4j.category.SecurityLogger=${hadoop.security.logger}
-      hadoop.security.log.file=SecurityAuth.audit
-      log4j.additivity.SecurityLogger=false
-      log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
-      log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-      log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
-      log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-      log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+#
+#Security audit appender
+#
+hadoop.security.logger=INFO,console
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth.audit
+log4j.additivity.SecurityLogger=false
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
 
-      log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
-      log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-      log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
-      log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-      log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
-      log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
 
-      #
-      # hdfs audit logging
-      #
-      hdfs.audit.logger=INFO,console
-      log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
-      log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
-      log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
-      log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
-      log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
-      log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-      log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
 
-      #
-      # NameNode metrics logging.
-      # The default is to retain two namenode-metrics.log files up to 64MB each.
-      #
-      namenode.metrics.logger=INFO,NullAppender
-      log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
-      log4j.additivity.NameNodeMetricsLog=false
-      log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender
-      log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log
-      log4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout
-      log4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
-      log4j.appender.NNMETRICSRFA.MaxBackupIndex=1
-      log4j.appender.NNMETRICSRFA.MaxFileSize=64MB
+#
+# NameNode metrics logging.
+# The default is to retain two namenode-metrics.log files up to 64MB each.
+#
+namenode.metrics.logger=INFO,NullAppender
+log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
+log4j.additivity.NameNodeMetricsLog=false
+log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender
+log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log
+log4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
+log4j.appender.NNMETRICSRFA.MaxBackupIndex=1
+log4j.appender.NNMETRICSRFA.MaxFileSize=64MB
 
-      #
-      # mapred audit logging
-      #
-      mapred.audit.logger=INFO,console
-      log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
-      log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
-      log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
-      log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
-      log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
-      log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-      log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
 
-      #
-      # Rolling File Appender
-      #
+#
+# Rolling File Appender
+#
 
-      log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-      log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
 
-      # Logfile size and and 30-day backups
-      log4j.appender.RFA.MaxFileSize=256MB
-      log4j.appender.RFA.MaxBackupIndex=10
+# Logfile size and and 30-day backups
+log4j.appender.RFA.MaxFileSize=256MB
+log4j.appender.RFA.MaxBackupIndex=10
 
-      log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-      log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-      log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
 
 
-      # Custom Logging levels
+# Custom Logging levels
 
-      hadoop.metrics.log.level=INFO
-      #log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-      #log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-      #log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-      log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
 
-      # Jets3t library
-      log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
 
-      #
-      # Null Appender
-      # Trap security logger on the hadoop client side
-      #
-      log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
 
-      #
-      # Event Counter Appender
-      # Sends counts of logging messages at different severity levels to Hadoop Metrics.
-      #
-      log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
 
-      # Removes "deprecated" messages
-      log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
 
-      #
-      # HDFS block state change log from block manager
-      #
-      # Uncomment the following to suppress normal block state change
-      # messages from BlockManager in NameNode.
-      #log4j.logger.BlockStateChange=WARN
+#
+# HDFS block state change log from block manager
+#
+# Uncomment the following to suppress normal block state change
+# messages from BlockManager in NameNode.
+#log4j.logger.BlockStateChange=WARN
     </value>
     <value-attributes>
       <type>content</type>
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/metainfo.xml
index 967c974..3e1a7ae 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/metainfo.xml
@@ -35,7 +35,7 @@
           <dependencies>
             <dependency>
               <name>HDFS/ZKFC</name>
-              <scope>host</scope>
+              <scope>cluster</scope>
               <auto-deploy>
                 <enabled>false</enabled>
               </auto-deploy>
@@ -48,7 +48,7 @@
             </dependency>
             <dependency>
               <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>host</scope>
+              <scope>cluster</scope>
               <auto-deploy>
                 <enabled>false</enabled>
               </auto-deploy>
@@ -61,7 +61,7 @@
             </dependency>
             <dependency>
               <name>HDFS/JOURNALNODE</name>
-              <scope>host</scope>
+              <scope>cluster</scope>
               <auto-deploy>
                 <enabled>false</enabled>
               </auto-deploy>
@@ -199,6 +199,10 @@
             <scriptType>PYTHON</scriptType>
             <timeout>1200</timeout>
           </commandScript>
+           <bulkCommands>
+              <displayName>JournalNodes</displayName>
+              <masterComponent>NAMENODE</masterComponent>
+          </bulkCommands>
           <logs>
             <log>
               <logId>hdfs_journalnode</logId>
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode.py
index 130c021..78a8f4b 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/datanode.py
@@ -98,64 +98,6 @@
     # ensure the DataNode has started and rejoined the cluster
     datanode_upgrade.post_upgrade_check(hdfs_binary)
 
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    props_value_check = None
-    props_empty_check = ['dfs.datanode.keytab.file',
-                         'dfs.datanode.kerberos.principal']
-    props_read_check = ['dfs.datanode.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-    hdfs_expectations.update(hdfs_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': FILE_TYPE_XML,
-                                                  'hdfs-site.xml': FILE_TYPE_XML})
-
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ('hdfs-site' not in security_params or
-                  'dfs.datanode.keytab.file' not in security_params['hdfs-site'] or
-                  'dfs.datanode.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                security_params['hdfs-site']['dfs.datanode.keytab.file'],
-                                security_params['hdfs-site']['dfs.datanode.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-      
   def get_log_folder(self):
     import params
     return params.hdfs_log_dir
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_client.py
index 4dabdbc..51acc9e 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_client.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_client.py
@@ -66,51 +66,6 @@
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-client", params.version)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    hdfs_expectations ={}
-    hdfs_expectations.update(core_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'core-site.xml': FILE_TYPE_XML})
-
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues: # If all validations passed successfully
-        if status_params.hdfs_user_principal or status_params.hdfs_user_keytab:
-          try:
-            cached_kinit_executor(status_params.kinit_path_local,
-                       status_params.hdfs_user,
-                       status_params.hdfs_user_keytab,
-                       status_params.hdfs_user_principal,
-                       status_params.hostname,
-                       status_params.tmp_dir)
-            self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-          except Exception as e:
-            self.put_structured_out({"securityState": "ERROR"})
-            self.put_structured_out({"securityStateErrorInfo": str(e)})
-        else:
-          self.put_structured_out({"securityIssuesFound": "hdfs principal and/or keytab file is not specified"})
-          self.put_structured_out({"securityState": "UNSECURED"})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
 @OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
 class HdfsClientWindows(HdfsClient):
   def install(self, env):
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/journalnode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/journalnode.py
index 9448fa6..7fd8d70 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/journalnode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/journalnode.py
@@ -105,63 +105,6 @@
     env.set_params(status_params)
     check_process_status(status_params.journalnode_pid_file)
 
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    props_value_check = None
-    props_empty_check = ['dfs.journalnode.keytab.file',
-                         'dfs.journalnode.kerberos.principal']
-    props_read_check = ['dfs.journalnode.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(hdfs_site_expectations)
-    hdfs_expectations.update(core_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': FILE_TYPE_XML})
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ('hdfs-site' not in security_params or
-                  'dfs.journalnode.kerberos.keytab.file' not in security_params['hdfs-site'] or
-                  'dfs.journalnode.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                security_params['hdfs-site']['dfs.journalnode.kerberos.keytab.file'],
-                                security_params['hdfs-site']['dfs.journalnode.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-      
   def get_log_folder(self):
     import params
     return params.hdfs_log_dir
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
index 602dad7..a42ca79 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
@@ -216,63 +216,6 @@
             try_sleep=10
     )
 
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    props_value_check = None
-    props_empty_check = ['dfs.namenode.kerberos.internal.spnego.principal',
-                         'dfs.namenode.keytab.file',
-                         'dfs.namenode.kerberos.principal']
-    props_read_check = ['dfs.namenode.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-    hdfs_expectations.update(hdfs_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': FILE_TYPE_XML,
-                                                  'hdfs-site.xml': FILE_TYPE_XML})
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'hdfs-site' not in security_params
-               or 'dfs.namenode.keytab.file' not in security_params['hdfs-site']
-               or 'dfs.namenode.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                security_params['hdfs-site']['dfs.namenode.keytab.file'],
-                                security_params['hdfs-site']['dfs.namenode.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def rebalancehdfs(self, env):
     import params
     env.set_params(params)
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/nfsgateway.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/nfsgateway.py
index 7ba1f96..602c179 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/nfsgateway.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/nfsgateway.py
@@ -77,64 +77,6 @@
 
     check_process_status(status_params.nfsgateway_pid_file)
 
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    props_value_check = None
-    props_empty_check = ['nfs.keytab.file',
-                         'nfs.kerberos.principal']
-    props_read_check = ['nfs.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-    hdfs_expectations.update(hdfs_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': FILE_TYPE_XML,
-                                                  'hdfs-site.xml': FILE_TYPE_XML})
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ('hdfs-site' not in security_params or
-                'nfs.keytab.file' not in security_params['hdfs-site'] or
-                'nfs.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                security_params['hdfs-site']['nfs.keytab.file'],
-                                security_params['hdfs-site'][
-                                  'nfs.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-      
   def get_log_folder(self):
     import params
     return params.hdfs_log_dir
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/snamenode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/snamenode.py
index 0f1f438..030a470 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/snamenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/snamenode.py
@@ -75,66 +75,6 @@
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-hdfs-secondarynamenode", params.version)
 
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    props_value_check = None
-    props_empty_check = ['dfs.secondary.namenode.kerberos.internal.spnego.principal',
-                         'dfs.secondary.namenode.keytab.file',
-                         'dfs.secondary.namenode.kerberos.principal']
-    props_read_check = ['dfs.secondary.namenode.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-    hdfs_expectations.update(hdfs_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': FILE_TYPE_XML,
-                                                  'hdfs-site.xml': FILE_TYPE_XML})
-
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ('hdfs-site' not in security_params or
-                  'dfs.secondary.namenode.keytab.file' not in security_params['hdfs-site'] or
-                  'dfs.secondary.namenode.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                security_params['hdfs-site']['dfs.secondary.namenode.keytab.file'],
-                                security_params['hdfs-site'][
-                                  'dfs.secondary.namenode.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-      
   def get_log_folder(self):
     import params
     return params.hdfs_log_dir
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/zkfc_slave.py
index 19a78c7..fa948ca 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/zkfc_slave.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/zkfc_slave.py
@@ -119,49 +119,6 @@
     env.set_params(status_params)
     check_process_status(status_params.zkfc_pid_file)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
-                                                props_read_check)
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'core-site.xml': FILE_TYPE_XML})
-    result_issues = validate_security_config_properties(security_params, hdfs_expectations)
-    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
-      if not result_issues:  # If all validations passed successfully
-        if status_params.hdfs_user_principal or status_params.hdfs_user_keytab:
-          try:
-            cached_kinit_executor(status_params.kinit_path_local,
-                                  status_params.hdfs_user,
-                                  status_params.hdfs_user_keytab,
-                                  status_params.hdfs_user_principal,
-                                  status_params.hostname,
-                                  status_params.tmp_dir)
-            self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-          except Exception as e:
-            self.put_structured_out({"securityState": "ERROR"})
-            self.put_structured_out({"securityStateErrorInfo": str(e)})
-        else:
-          self.put_structured_out(
-            {"securityIssuesFound": "hdfs principal and/or keytab file is not specified"})
-          self.put_structured_out({"securityState": "UNSECURED"})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def disable_security(self, env):
     import params
 
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml
index ad668a2..b2c364c 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml
@@ -117,7 +117,7 @@
     <name>hive_ambari_database</name>
     <value>MySQL</value>
     <description>Database type.</description>
-    <deleted>true</true>
+    <deleted>true</deleted>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
@@ -221,6 +221,7 @@
     <display-name>hive-env template</display-name>
     <description>This is the jinja template for hive-env.sh file</description>
     <value>
+ export JAVA_HOME={{java64_home}}
  if [ "$SERVICE" = "cli" ]; then
    if [ -z "$DEBUG" ]; then
      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-logsearch-conf.xml
deleted file mode 100644
index c1b971c..0000000
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-logsearch-conf.xml
+++ /dev/null
@@ -1,117 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>Hive</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>HIVE_METASTORE:hive_metastore;HIVE_SERVER:hive_hiveserver2;WEBHCAT_SERVER:webhcat_server</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder Config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-  "input":[
-    {
-      "type":"hive_hiveserver2",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hive-env/hive_log_dir', '/var/log/hive')}}/hiveserver2.log"
-    },
-    {
-      "type":"hive_metastore",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hive-env/hive_log_dir', '/var/log/hive')}}/hivemetastore.log"
-    },
-    {
-      "type": "webhcat_server",
-      "rowntype":"service",
-      "path":"{{default('configurations/hive-env/hcat_log_dir', '/var/log/webhcat')}}/webhcat.log"
-    }
-  ],
-  "filter":[
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "hive_hiveserver2",
-            "hive_metastore"
-          ]
-         }
-       },
-      "log4j_format":"%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{DATA:thread_name}\\]:%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       }
-     },
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "webhcat_server"
-          ]
-         }
-       },
-      "log4j_format":" %-5p | %d{DATE} | %c | %m%n",
-      "multiline_pattern":"^(%{SPACE}%{LOGLEVEL:level}%{CUSTOM_SEPARATOR}%{CUSTOM_DATESTAMP:logtime})",
-      "message_pattern":"(?m)^%{SPACE}%{LOGLEVEL:level}%{CUSTOM_SEPARATOR}%{CUSTOM_DATESTAMP:logtime}%{CUSTOM_SEPARATOR}%{JAVACLASS:file}%{CUSTOM_SEPARATOR}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"dd MMM yyyy HH:mm:ss,SSS"
-          }
-         },
-        "level":{
-           "map_fieldvalue":{
-             "pre_value":"WARNING",
-             "post_value":"WARN"
-            }
-        }
-       }
-     }
-   ]
- }
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
index 095be3f..98d1899 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
@@ -47,10 +47,10 @@
 
 SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
 
-HIVE_PRINCIPAL_KEY = '{{hive-interactive-site/hive.llap.zk.sm.principal}}'
+HIVE_PRINCIPAL_KEY = '{{hive-interactive-site/hive.llap.daemon.service.principal}}'
 HIVE_PRINCIPAL_DEFAULT = 'default.hive.principal'
 
-HIVE_PRINCIPAL_KEYTAB_KEY = '{{hive-interactive-site/hive.llap.zk.sm.keytab.file}}'
+HIVE_PRINCIPAL_KEYTAB_KEY = '{{hive-interactive-site/hive.llap.daemon.keytab.file}}'
 HIVE_PRINCIPAL_KEYTAB_DEFAULT = 'default.hive.keytab'
 
 HIVE_AUTHENTICATION_DEFAULT = 'NOSASL'
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
index b7b04a2..959e111 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
@@ -300,6 +300,15 @@
        content=StaticFile('startMetastore.sh')
   )
 
+  if not is_empty(params.hive_exec_scratchdir):
+    dirPathStr = urlparse(params.hive_exec_scratchdir).path
+    pathComponents = dirPathStr.split("/")
+    if dirPathStr.startswith("/tmp") and len(pathComponents) > 2:
+      Directory (params.hive_exec_scratchdir,
+                 owner = params.hive_user,
+                 create_parents = True,
+                 mode=0777)
+
 def create_metastore_schema():
   import params
 
@@ -345,14 +354,14 @@
             mode=mode_identified_for_dir
   )
 
-  XmlConfig("mapred-site.xml",
-            conf_dir=component_conf_dir,
-            configurations=params.config['configurations']['mapred-site'],
-            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=mode_identified_for_file)
-
+  if 'mapred-site' in params.config['configurations']:
+    XmlConfig("mapred-site.xml",
+              conf_dir=component_conf_dir,
+              configurations=params.config['configurations']['mapred-site'],
+              configuration_attributes=params.config['configuration_attributes']['mapred-site'],
+              owner=params.hive_user,
+              group=params.user_group,
+              mode=mode_identified_for_file)
 
   File(format("{component_conf_dir}/hive-default.xml.template"),
        owner=params.hive_user,
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_metastore.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_metastore.py
index 8b69e45..99eb8b5 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_metastore.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_metastore.py
@@ -35,6 +35,7 @@
 from resource_management.libraries.functions.security_commons import validate_security_config_properties
 from resource_management.libraries.functions.security_commons import FILE_TYPE_XML
 from resource_management.core.resources.system import File
+from setup_ranger_hive import setup_ranger_hive_metastore_service
 
 from hive import create_metastore_schema, hive, jdbc_connector
 from hive_service import hive_service
@@ -61,6 +62,8 @@
 
     hive_service('metastore', action='start', upgrade_type=upgrade_type)
 
+    # below function call is used for cluster depolyed in cloud env to create ranger hive service in ranger admin.
+    setup_ranger_hive_metastore_service()
 
   def stop(self, env, upgrade_type=None):
     import params
@@ -113,58 +116,6 @@
             check_stack_feature(StackFeature.HIVE_METASTORE_UPGRADE_SCHEMA, params.stack_version_formatted_major):
       self.upgrade_schema(env)
 
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"hive.server2.authentication": "KERBEROS",
-                           "hive.metastore.sasl.enabled": "true",
-                           "hive.security.authorization.enabled": "true"}
-      props_empty_check = ["hive.metastore.kerberos.keytab.file",
-                           "hive.metastore.kerberos.principal"]
-
-      props_read_check = ["hive.metastore.kerberos.keytab.file"]
-      hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
-                                            props_read_check)
-
-      hive_expectations ={}
-      hive_expectations.update(hive_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hive_conf_dir,
-                                                   {'hive-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, hive_expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if 'hive-site' not in security_params \
-            or 'hive.metastore.kerberos.keytab.file' not in security_params['hive-site'] \
-            or 'hive.metastore.kerberos.principal' not in security_params['hive-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hive_user,
-                                security_params['hive-site']['hive.metastore.kerberos.keytab.file'],
-                                security_params['hive-site']['hive.metastore.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-
   def upgrade_schema(self, env):
     """
     Executes the schema upgrade binary.  This is its own function because it could
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py
index f6251e7..7c3a805 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py
@@ -135,67 +135,6 @@
       if resource_created:
         params.HdfsResource(None, action="execute")
 
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"hive.server2.authentication": "KERBEROS",
-                           "hive.metastore.sasl.enabled": "true",
-                           "hive.security.authorization.enabled": "true"}
-      props_empty_check = ["hive.server2.authentication.kerberos.keytab",
-                           "hive.server2.authentication.kerberos.principal",
-                           "hive.server2.authentication.spnego.principal",
-                           "hive.server2.authentication.spnego.keytab"]
-
-      props_read_check = ["hive.server2.authentication.kerberos.keytab",
-                          "hive.server2.authentication.spnego.keytab"]
-      hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
-                                            props_read_check)
-
-      hive_expectations ={}
-      hive_expectations.update(hive_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hive_conf_dir,
-                                                   {'hive-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, hive_expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if 'hive-site' not in security_params \
-            or 'hive.server2.authentication.kerberos.keytab' not in security_params['hive-site'] \
-            or 'hive.server2.authentication.kerberos.principal' not in security_params['hive-site']  \
-            or 'hive.server2.authentication.spnego.keytab' not in security_params['hive-site'] \
-            or 'hive.server2.authentication.spnego.principal' not in security_params['hive-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hive_user,
-                                security_params['hive-site']['hive.server2.authentication.kerberos.keytab'],
-                                security_params['hive-site']['hive.server2.authentication.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hive_user,
-                                security_params['hive-site']['hive.server2.authentication.spnego.keytab'],
-                                security_params['hive-site']['hive.server2.authentication.spnego.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def _base_node(self, path):
     if not path.startswith('/'):
       path = '/' + path
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
index 46864c0..0504d18 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
@@ -152,67 +152,6 @@
       # Recursively check all existing gmetad pid files
       check_process_status(status_params.hive_interactive_pid)
 
-    def security_status(self, env):
-      import status_params
-      env.set_params(status_params)
-
-      if status_params.security_enabled:
-        props_value_check = {"hive.server2.authentication": "KERBEROS",
-                             "hive.metastore.sasl.enabled": "true",
-                             "hive.security.authorization.enabled": "true"}
-        props_empty_check = ["hive.server2.authentication.kerberos.keytab",
-                             "hive.server2.authentication.kerberos.principal",
-                             "hive.server2.authentication.spnego.principal",
-                             "hive.server2.authentication.spnego.keytab"]
-
-        props_read_check = ["hive.server2.authentication.kerberos.keytab",
-                            "hive.server2.authentication.spnego.keytab"]
-        hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
-                                             props_read_check)
-
-        hive_expectations ={}
-        hive_expectations.update(hive_site_props)
-
-        security_params = get_params_from_filesystem(status_params.hive_server_interactive_conf_dir,
-                                                     {'hive-site.xml': FILE_TYPE_XML})
-        result_issues = validate_security_config_properties(security_params, hive_expectations)
-        if not result_issues: # If all validations passed successfully
-          try:
-            # Double check the dict before calling execute
-            if 'hive-site' not in security_params \
-              or 'hive.server2.authentication.kerberos.keytab' not in security_params['hive-site'] \
-              or 'hive.server2.authentication.kerberos.principal' not in security_params['hive-site'] \
-              or 'hive.server2.authentication.spnego.keytab' not in security_params['hive-site'] \
-              or 'hive.server2.authentication.spnego.principal' not in security_params['hive-site']:
-              self.put_structured_out({"securityState": "UNSECURED"})
-              self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-              return
-
-            cached_kinit_executor(status_params.kinit_path_local,
-                                  status_params.hive_user,
-                                  security_params['hive-site']['hive.server2.authentication.kerberos.keytab'],
-                                  security_params['hive-site']['hive.server2.authentication.kerberos.principal'],
-                                  status_params.hostname,
-                                  status_params.tmp_dir)
-            cached_kinit_executor(status_params.kinit_path_local,
-                                  status_params.hive_user,
-                                  security_params['hive-site']['hive.server2.authentication.spnego.keytab'],
-                                  security_params['hive-site']['hive.server2.authentication.spnego.principal'],
-                                  status_params.hostname,
-                                  status_params.tmp_dir)
-            self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-          except Exception as e:
-            self.put_structured_out({"securityState": "ERROR"})
-            self.put_structured_out({"securityStateErrorInfo": str(e)})
-        else:
-          issues = []
-          for cf in result_issues:
-            issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-          self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-          self.put_structured_out({"securityState": "UNSECURED"})
-      else:
-        self.put_structured_out({"securityState": "UNSECURED"})
-
     def restart_llap(self, env):
       """
       Custom command to Restart LLAP
@@ -426,9 +365,6 @@
       hive_interactive_kinit_cmd = format("{kinit_path_local} -kt {params.hive_server2_keytab} {params.hive_principal}; ")
       Execute(hive_interactive_kinit_cmd, user=params.hive_user)
 
-      llap_kinit_cmd = format("{kinit_path_local} -kt {params.hive_llap_keytab_file} {params.hive_llap_principal}; ")
-      Execute(llap_kinit_cmd, user=params.hive_user)
-
     """
     Get llap app status data for LLAP Tech Preview code base.
     """
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
index 84bac38..c1128a5 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
@@ -534,12 +534,21 @@
 ########################################################
 ############# AMS related params #####################
 ########################################################
-ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+
+set_instanceId = "false"
+
+if 'cluster-env' in config['configurations'] and \
+        'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
+  ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
+  set_instanceId = "true"
+else:
+  ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+
 has_metric_collector = not len(ams_collector_hosts) == 0
 if has_metric_collector:
   if 'cluster-env' in config['configurations'] and \
-      'metrics_collector_vip_port' in config['configurations']['cluster-env']:
-    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
+      'metrics_collector_external_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
   else:
     metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
     if metric_collector_web_address.find(':') != -1:
@@ -557,6 +566,8 @@
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
 ########################################################
 ############# Atlas related params #####################
 ########################################################
@@ -676,8 +687,8 @@
   llap_extra_slider_opts = default('/configurations/hive-interactive-env/llap_extra_slider_opts', "")
   hive_llap_principal = None
   if security_enabled:
-    hive_llap_keytab_file = config['configurations']['hive-interactive-site']['hive.llap.zk.sm.keytab.file']
-    hive_llap_principal = (config['configurations']['hive-interactive-site']['hive.llap.zk.sm.principal']).replace('_HOST',hostname.lower())
+    hive_llap_keytab_file = config['configurations']['hive-interactive-site']['hive.llap.daemon.keytab.file']
+    hive_llap_principal = (config['configurations']['hive-interactive-site']['hive.llap.daemon.service.principal']).replace('_HOST',hostname.lower())
   pass
 
 if len(hive_server_hosts) == 0 and len(hive_server_interactive_hosts) > 0:
@@ -824,3 +835,10 @@
     xa_audit_db_is_enabled = False
 
 # ranger hive plugin section end
+
+# below property is used for cluster deployed in cloud env to create ranger hive service in ranger admin
+# need to add it as custom property
+ranger_hive_metastore_lookup = default('/configurations/ranger-hive-plugin-properties/ranger.service.config.param.enable.hive.metastore.lookup', False)
+
+if security_enabled:
+  hive_metastore_principal_with_host = hive_metastore_principal.replace('_HOST', hostname.lower())
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py
index 80bd7c8..379b4ac 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py
@@ -18,6 +18,9 @@
 
 """
 from resource_management.core.logger import Logger
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.functions.ranger_functions_v2 import RangeradminV2
+from resource_management.libraries.functions.setup_ranger_plugin_xml import generate_ranger_service_config
 
 def setup_ranger_hive(upgrade_type = None):
   import params
@@ -96,3 +99,58 @@
                         stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
   else:
     Logger.info('Ranger Hive plugin is not enabled')
+
+def setup_ranger_hive_metastore_service():
+  """
+  Creates ranger hive service in ranger admin installed in same cluster for cluster depolyed in cloud env.
+  """
+  import params
+
+  if params.has_ranger_admin and params.ranger_hive_metastore_lookup:
+
+    repo_name = str(params.config['clusterName']) + '_hive'
+    repo_name_value = params.config['configurations']['ranger-hive-security']['ranger.plugin.hive.service.name']
+    if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
+      repo_name = repo_name_value
+
+    hive_ranger_plugin_config = {
+      'username': params.config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
+      'password': params.config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_PASSWORD'],
+      'jdbc.driverClassName': params.config['configurations']['ranger-hive-plugin-properties']['jdbc.driverClassName'],
+      'jdbc.url': 'none',
+      'commonNameForCertificate': params.config['configurations']['ranger-hive-plugin-properties']['common.name.for.certificate'],
+      'ambari.service.check.user': params.config['configurations']['ranger-hive-plugin-properties']['policy_user']
+    }
+
+    if params.security_enabled:
+      hive_ranger_plugin_config['policy.download.auth.users'] = params.hive_user
+      hive_ranger_plugin_config['tag.download.auth.users'] = params.hive_user
+      hive_ranger_plugin_config['policy.grantrevoke.auth.users'] = params.hive_user
+
+    custom_ranger_service_config = generate_ranger_service_config(params.config['configurations']['ranger-hive-plugin-properties'])
+    if len(custom_ranger_service_config) > 0:
+      hive_ranger_plugin_config.update(custom_ranger_service_config)
+
+    hive_ranger_plugin_repo = {
+      'isEnabled': 'true',
+      'configs': hive_ranger_plugin_config,
+      'description': 'Hive service',
+      'name': repo_name,
+      'type': 'hive'
+    }
+
+    ranger_admin_obj = RangeradminV2(url = params.config['configurations']['ranger-hive-security']['ranger.plugin.hive.policy.rest.url'], skip_if_rangeradmin_down = not params.retryAble)
+    ranger_admin_obj.create_ranger_repository(
+      component = 'hive',
+      repo_name = repo_name,
+      repo_properties = hive_ranger_plugin_repo,
+      ambari_ranger_admin = params.config['configurations']['ranger-env']['ranger_admin_username'],
+      ambari_ranger_password = params.config['configurations']['ranger-env']['ranger_admin_password'],
+      admin_uname = params.config['configurations']['ranger-env']['admin_username'],
+      admin_password = params.config['configurations']['ranger-env']['admin_password'],
+      policy_user = params.config['configurations']['ranger-hive-plugin-properties']['policy_user'],
+      is_security_enabled = params.security_enabled,
+      is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
+      component_user = params.hive_user,
+      component_user_principal = params.hive_metastore_principal_with_host if params.security_enabled else None,
+      component_user_keytab = params.hive_metastore_keytab_path if params.security_enabled else None)
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_server.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_server.py
index 93fa411..18e11ab 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_server.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_server.py
@@ -84,73 +84,6 @@
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hive-webhcat", params.version)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-
-    if status_params.security_enabled:
-      expectations ={}
-      expectations.update(
-        build_expectations(
-          'webhcat-site',
-          {
-            "templeton.kerberos.secret": "secret"
-          },
-          [
-            "templeton.kerberos.keytab",
-            "templeton.kerberos.principal"
-          ],
-          [
-            "templeton.kerberos.keytab"
-          ]
-        )
-      )
-      expectations.update(
-        build_expectations(
-          'hive-site',
-          {
-            "hive.server2.authentication": "KERBEROS",
-            "hive.metastore.sasl.enabled": "true",
-            "hive.security.authorization.enabled": "true"
-          },
-          None,
-          None
-        )
-      )
-
-      security_params = {}
-      security_params.update(get_params_from_filesystem(status_params.webhcat_conf_dir,
-                                                        {'webhcat-site.xml': FILE_TYPE_XML}))
-      result_issues = validate_security_config_properties(security_params, expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if 'webhcat-site' not in security_params \
-            or 'templeton.kerberos.keytab' not in security_params['webhcat-site'] \
-            or 'templeton.kerberos.principal' not in security_params['webhcat-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.webhcat_user,
-                                security_params['webhcat-site']['templeton.kerberos.keytab'],
-                                security_params['webhcat-site']['templeton.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
     return params.hcat_log_dir
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2 b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
index 82f71c5..3093e56 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
@@ -51,6 +51,8 @@
   hivemetastore.sink.timeline.collector.hosts={{ams_collector_hosts}}
   hivemetastore.sink.timeline.port={{metric_collector_port}}
   hivemetastore.sink.timeline.protocol={{metric_collector_protocol}}
+  hivemetastore.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  hivemetastore.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 
 {% endif %}
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2 b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
index f1bfa03..59a7c1b 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
@@ -51,5 +51,7 @@
   hiveserver2.sink.timeline.collector.hosts={{ams_collector_hosts}}
   hiveserver2.sink.timeline.port={{metric_collector_port}}
   hiveserver2.sink.timeline.protocol={{metric_collector_protocol}}
+  hiveserver2.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  hiveserver2.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 {% endif %}
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llapdaemon.j2 b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llapdaemon.j2
index d63dfb7..69f6071 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llapdaemon.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llapdaemon.j2
@@ -50,5 +50,7 @@
   llapdaemon.sink.timeline.collector.hosts={{ams_collector_hosts}}
   llapdaemon.sink.timeline.port={{metric_collector_port}}
   llapdaemon.sink.timeline.protocol={{metric_collector_protocol}}
+  llapdaemon.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  llapdaemon.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
-{% endif %}
\ No newline at end of file
+{% endif %}
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2 b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
index 4c9c981..c08a498 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
@@ -50,5 +50,7 @@
   llaptaskscheduler.sink.timeline.collector.hosts={{ams_collector_hosts}}
   llaptaskscheduler.sink.timeline.port={{metric_collector_port}}
   llaptaskscheduler.sink.timeline.protocol={{metric_collector_protocol}}
+  llaptaskscheduler.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  llaptaskscheduler.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
-{% endif %}
\ No newline at end of file
+{% endif %}
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/input.config-hive.json.j2 b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/input.config-hive.json.j2
new file mode 100644
index 0000000..8697cf5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/input.config-hive.json.j2
@@ -0,0 +1,85 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"hive_hiveserver2",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hive-env/hive_log_dir', '/var/log/hive')}}/hiveserver2.log"
+    },
+    {
+      "type":"hive_metastore",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hive-env/hive_log_dir', '/var/log/hive')}}/hivemetastore.log"
+    },
+    {
+      "type": "webhcat_server",
+      "rowntype":"service",
+      "path":"{{default('configurations/hive-env/hcat_log_dir', '/var/log/webhcat')}}/webhcat.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "hive_hiveserver2",
+            "hive_metastore"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{DATA:thread_name}\\]:%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    },
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "webhcat_server"
+          ]
+        }
+      },
+      "log4j_format":" %-5p | %d{DATE} | %c | %m%n",
+      "multiline_pattern":"^(%{SPACE}%{LOGLEVEL:level}%{CUSTOM_SEPARATOR}%{CUSTOM_DATESTAMP:logtime})",
+      "message_pattern":"(?m)^%{SPACE}%{LOGLEVEL:level}%{CUSTOM_SEPARATOR}%{CUSTOM_DATESTAMP:logtime}%{CUSTOM_SEPARATOR}%{JAVACLASS:file}%{CUSTOM_SEPARATOR}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"dd MMM yyyy HH:mm:ss,SSS"
+          }
+        },
+        "level":{
+          "map_fieldvalue":{
+            "pre_value":"WARNING",
+            "post_value":"WARN"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hcat-env.xml b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hcat-env.xml
index 964abdb..1244979 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hcat-env.xml
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hcat-env.xml
@@ -26,31 +26,31 @@
     <display-name>hcat-env template</display-name>
     <description>This is the jinja template for hcat-env.sh file</description>
     <value>
-      # Licensed to the Apache Software Foundation (ASF) under one
-      # or more contributor license agreements. See the NOTICE file
-      # distributed with this work for additional information
-      # regarding copyright ownership. The ASF licenses this file
-      # to you under the Apache License, Version 2.0 (the
-      # "License"); you may not use this file except in compliance
-      # with the License. You may obtain a copy of the License at
-      #
-      # http://www.apache.org/licenses/LICENSE-2.0
-      #
-      # Unless required by applicable law or agreed to in writing, software
-      # distributed under the License is distributed on an "AS IS" BASIS,
-      # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-      # See the License for the specific language governing permissions and
-      # limitations under the License.
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
 
-      JAVA_HOME={{java64_home}}
-      HCAT_PID_DIR={{hcat_pid_dir}}/
-      HCAT_LOG_DIR={{hcat_log_dir}}/
-      HCAT_CONF_DIR={{hcat_conf_dir}}
-      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-      #DBROOT is the path where the connector jars are downloaded
-      DBROOT={{hcat_dbroot}}
-      USER={{webhcat_user}}
-      METASTORE_PORT={{hive_metastore_port}}
+JAVA_HOME={{java64_home}}
+HCAT_PID_DIR={{hcat_pid_dir}}/
+HCAT_LOG_DIR={{hcat_log_dir}}/
+HCAT_CONF_DIR={{hcat_conf_dir}}
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+#DBROOT is the path where the connector jars are downloaded
+DBROOT={{hcat_dbroot}}
+USER={{webhcat_user}}
+METASTORE_PORT={{hive_metastore_port}}
     </value>
     <value-attributes>
       <type>content</type>
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-env.xml b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-env.xml
index 872120c..54a62e2 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-env.xml
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-env.xml
@@ -86,7 +86,7 @@
     <name>hive_ambari_database</name>
     <value>MySQL</value>
     <description>Database type.</description>
-    <deleted>true</true>
+    <deleted>true</deleted>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
@@ -437,56 +437,56 @@
     <display-name>hive-env template</display-name>
     <description>This is the jinja template for hive-env.sh file</description>
     <value>
-      export HADOOP_USER_CLASSPATH_FIRST=true  #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB
-      if [ "$SERVICE" = "cli" ]; then
-      if [ -z "$DEBUG" ]; then
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit"
-      else
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-      fi
-      fi
+export HADOOP_USER_CLASSPATH_FIRST=true  #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB
+if [ "$SERVICE" = "cli" ]; then
+if [ -z "$DEBUG" ]; then
+export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit"
+else
+export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+fi
+fi
 
-      # The heap size of the jvm stared by hive shell script can be controlled via:
+# The heap size of the jvm stared by hive shell script can be controlled via:
 
-      if [ "$SERVICE" = "metastore" ]; then
-      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
-      else
-      export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client
-      fi
+if [ "$SERVICE" = "metastore" ]; then
+export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
+else
+export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client
+fi
 
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
 
-      # Larger heap size may be required when running queries over large number of files or partitions.
-      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-      # appropriate for hive server (hwi etc).
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
 
 
-      # Set HADOOP_HOME to point to a specific hadoop install directory
-      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
 
-      export HIVE_HOME=${HIVE_HOME:-{{hive_home_dir}}}
+export HIVE_HOME=${HIVE_HOME:-{{hive_home_dir}}}
 
-      # Hive Configuration Directory can be controlled by:
-      export HIVE_CONF_DIR=${HIVE_CONF_DIR:-{{hive_config_dir}}}
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR=${HIVE_CONF_DIR:-{{hive_config_dir}}}
 
-      # Folder containing extra libraries required for hive compilation/execution can be controlled by:
-      if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
-        if [ -f "${HIVE_AUX_JARS_PATH}" ]; then
-          export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
-        elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
-          export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
-        fi
-      elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
-        export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
-      fi
+# Folder containing extra libraries required for hive compilation/execution can be controlled by:
+if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+  if [ -f "${HIVE_AUX_JARS_PATH}" ]; then
+    export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
+  elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
+    export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
+  fi
+elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
+  export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
+fi
 
-      export METASTORE_PORT={{hive_metastore_port}}
+export METASTORE_PORT={{hive_metastore_port}}
 
-      {% if sqla_db_used or lib_dir_available %}
-      export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{{jdbc_libs_dir}}"
-      export JAVA_LIBRARY_PATH="$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}"
-      {% endif %}
+{% if sqla_db_used or lib_dir_available %}
+export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{{jdbc_libs_dir}}"
+export JAVA_LIBRARY_PATH="$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}"
+{% endif %}
     </value>
     <value-attributes>
       <type>content</type>
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-interactive-env.xml b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-interactive-env.xml
index e2048a2..940fc79 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-interactive-env.xml
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-interactive-env.xml
@@ -281,48 +281,47 @@
     <display-name>hive-interactive-env template</display-name>
     <description>This is the jinja template for hive-env.sh file</description>
     <value>
-      if [ "$SERVICE" = "cli" ]; then
-      if [ -z "$DEBUG" ]; then
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
-      else
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-      fi
-      fi
+if [ "$SERVICE" = "cli" ]; then
+if [ -z "$DEBUG" ]; then
+export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+else
+export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+fi
+fi
 
-      # The heap size of the jvm stared by hive shell script can be controlled via:
+# The heap size of the jvm stared by hive shell script can be controlled via:
 
-      if [ "$SERVICE" = "metastore" ]; then
-      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
-      else
-      export HADOOP_HEAPSIZE={{hive_interactive_heapsize}} # Setting for HiveServer2 and Client
-      fi
+if [ "$SERVICE" = "metastore" ]; then
+export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
+else
+export HADOOP_HEAPSIZE={{hive_interactive_heapsize}} # Setting for HiveServer2 and Client
+fi
 
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
 
-      # Larger heap size may be required when running queries over large number of files or partitions.
-      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-      # appropriate for hive server (hwi etc).
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
 
 
-      # Set HADOOP_HOME to point to a specific hadoop install directory
-      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
 
-      # Hive Configuration Directory can be controlled by:
-      export HIVE_CONF_DIR={{hive_server_interactive_conf_dir}}
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR={{hive_server_interactive_conf_dir}}
 
-      # Add additional hcatalog jars
-      if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
-        export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
-      else
-        export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-server2-hive2/lib/hive-hcatalog-core.jar
-      fi
+# Add additional hcatalog jars
+if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
+else
+  export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-server2-hive2/lib/hive-hcatalog-core.jar
+fi
 
-      export METASTORE_PORT={{hive_metastore_port}}
+export METASTORE_PORT={{hive_metastore_port}}
 
-      # Spark assembly contains a conflicting copy of HiveConf from hive-1.2
-      export HIVE_SKIP_SPARK_ASSEMBLY=true
-
+# Spark assembly contains a conflicting copy of HiveConf from hive-1.2
+export HIVE_SKIP_SPARK_ASSEMBLY=true
     </value>
     <value-attributes>
       <type>content</type>
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/metainfo.xml
index 6d9098f..4225f19 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/metainfo.xml
@@ -424,6 +424,9 @@
         <osSpecific>
           <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
           <packages>
+            <!--
+            TODO AMBARI-20753
+            Re-add after Hive has all of its packages using the correct RPM name.
             <package>
               <name>hive-${stack_version}</name>
             </package>
@@ -438,11 +441,12 @@
               <condition>should_install_hive_atlas</condition>
             </package>
             <package>
-              <name>hive2-${stack_version}</name>
-            </package>
-            <package>
               <name>tez-hive2-${stack_version}</name>
             </package>
+            -->
+            <package>
+              <name>hive2-${stack_version}</name>
+            </package>
           </packages>
         </osSpecific>
         <osSpecific>
@@ -491,7 +495,10 @@
         <service>HDFS</service>
         <service>YARN</service>
         <service>TEZ</service>
+        <!-- TODO AMBARI-20753
+        Re-add after Pig service is being packaged.
         <service>PIG</service>
+        -->
         <service>SLIDER</service>
       </requiredServices>
 
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/alerts/alert_llap_app_status.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/alerts/alert_llap_app_status.py
index 095be3f..98d1899 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/alerts/alert_llap_app_status.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/alerts/alert_llap_app_status.py
@@ -47,10 +47,10 @@
 
 SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
 
-HIVE_PRINCIPAL_KEY = '{{hive-interactive-site/hive.llap.zk.sm.principal}}'
+HIVE_PRINCIPAL_KEY = '{{hive-interactive-site/hive.llap.daemon.service.principal}}'
 HIVE_PRINCIPAL_DEFAULT = 'default.hive.principal'
 
-HIVE_PRINCIPAL_KEYTAB_KEY = '{{hive-interactive-site/hive.llap.zk.sm.keytab.file}}'
+HIVE_PRINCIPAL_KEYTAB_KEY = '{{hive-interactive-site/hive.llap.daemon.keytab.file}}'
 HIVE_PRINCIPAL_KEYTAB_DEFAULT = 'default.hive.keytab'
 
 HIVE_AUTHENTICATION_DEFAULT = 'NOSASL'
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_metastore.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_metastore.py
index 8b69e45..99eb8b5 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_metastore.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_metastore.py
@@ -35,6 +35,7 @@
 from resource_management.libraries.functions.security_commons import validate_security_config_properties
 from resource_management.libraries.functions.security_commons import FILE_TYPE_XML
 from resource_management.core.resources.system import File
+from setup_ranger_hive import setup_ranger_hive_metastore_service
 
 from hive import create_metastore_schema, hive, jdbc_connector
 from hive_service import hive_service
@@ -61,6 +62,8 @@
 
     hive_service('metastore', action='start', upgrade_type=upgrade_type)
 
+    # below function call is used for cluster depolyed in cloud env to create ranger hive service in ranger admin.
+    setup_ranger_hive_metastore_service()
 
   def stop(self, env, upgrade_type=None):
     import params
@@ -113,58 +116,6 @@
             check_stack_feature(StackFeature.HIVE_METASTORE_UPGRADE_SCHEMA, params.stack_version_formatted_major):
       self.upgrade_schema(env)
 
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"hive.server2.authentication": "KERBEROS",
-                           "hive.metastore.sasl.enabled": "true",
-                           "hive.security.authorization.enabled": "true"}
-      props_empty_check = ["hive.metastore.kerberos.keytab.file",
-                           "hive.metastore.kerberos.principal"]
-
-      props_read_check = ["hive.metastore.kerberos.keytab.file"]
-      hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
-                                            props_read_check)
-
-      hive_expectations ={}
-      hive_expectations.update(hive_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hive_conf_dir,
-                                                   {'hive-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, hive_expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if 'hive-site' not in security_params \
-            or 'hive.metastore.kerberos.keytab.file' not in security_params['hive-site'] \
-            or 'hive.metastore.kerberos.principal' not in security_params['hive-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hive_user,
-                                security_params['hive-site']['hive.metastore.kerberos.keytab.file'],
-                                security_params['hive-site']['hive.metastore.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-
   def upgrade_schema(self, env):
     """
     Executes the schema upgrade binary.  This is its own function because it could
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_server.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_server.py
index f6251e7..7c3a805 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_server.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_server.py
@@ -135,67 +135,6 @@
       if resource_created:
         params.HdfsResource(None, action="execute")
 
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"hive.server2.authentication": "KERBEROS",
-                           "hive.metastore.sasl.enabled": "true",
-                           "hive.security.authorization.enabled": "true"}
-      props_empty_check = ["hive.server2.authentication.kerberos.keytab",
-                           "hive.server2.authentication.kerberos.principal",
-                           "hive.server2.authentication.spnego.principal",
-                           "hive.server2.authentication.spnego.keytab"]
-
-      props_read_check = ["hive.server2.authentication.kerberos.keytab",
-                          "hive.server2.authentication.spnego.keytab"]
-      hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
-                                            props_read_check)
-
-      hive_expectations ={}
-      hive_expectations.update(hive_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hive_conf_dir,
-                                                   {'hive-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, hive_expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if 'hive-site' not in security_params \
-            or 'hive.server2.authentication.kerberos.keytab' not in security_params['hive-site'] \
-            or 'hive.server2.authentication.kerberos.principal' not in security_params['hive-site']  \
-            or 'hive.server2.authentication.spnego.keytab' not in security_params['hive-site'] \
-            or 'hive.server2.authentication.spnego.principal' not in security_params['hive-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hive_user,
-                                security_params['hive-site']['hive.server2.authentication.kerberos.keytab'],
-                                security_params['hive-site']['hive.server2.authentication.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hive_user,
-                                security_params['hive-site']['hive.server2.authentication.spnego.keytab'],
-                                security_params['hive-site']['hive.server2.authentication.spnego.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def _base_node(self, path):
     if not path.startswith('/'):
       path = '/' + path
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_server_interactive.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_server_interactive.py
index 46864c0..0504d18 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_server_interactive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/hive_server_interactive.py
@@ -152,67 +152,6 @@
       # Recursively check all existing gmetad pid files
       check_process_status(status_params.hive_interactive_pid)
 
-    def security_status(self, env):
-      import status_params
-      env.set_params(status_params)
-
-      if status_params.security_enabled:
-        props_value_check = {"hive.server2.authentication": "KERBEROS",
-                             "hive.metastore.sasl.enabled": "true",
-                             "hive.security.authorization.enabled": "true"}
-        props_empty_check = ["hive.server2.authentication.kerberos.keytab",
-                             "hive.server2.authentication.kerberos.principal",
-                             "hive.server2.authentication.spnego.principal",
-                             "hive.server2.authentication.spnego.keytab"]
-
-        props_read_check = ["hive.server2.authentication.kerberos.keytab",
-                            "hive.server2.authentication.spnego.keytab"]
-        hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
-                                             props_read_check)
-
-        hive_expectations ={}
-        hive_expectations.update(hive_site_props)
-
-        security_params = get_params_from_filesystem(status_params.hive_server_interactive_conf_dir,
-                                                     {'hive-site.xml': FILE_TYPE_XML})
-        result_issues = validate_security_config_properties(security_params, hive_expectations)
-        if not result_issues: # If all validations passed successfully
-          try:
-            # Double check the dict before calling execute
-            if 'hive-site' not in security_params \
-              or 'hive.server2.authentication.kerberos.keytab' not in security_params['hive-site'] \
-              or 'hive.server2.authentication.kerberos.principal' not in security_params['hive-site'] \
-              or 'hive.server2.authentication.spnego.keytab' not in security_params['hive-site'] \
-              or 'hive.server2.authentication.spnego.principal' not in security_params['hive-site']:
-              self.put_structured_out({"securityState": "UNSECURED"})
-              self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-              return
-
-            cached_kinit_executor(status_params.kinit_path_local,
-                                  status_params.hive_user,
-                                  security_params['hive-site']['hive.server2.authentication.kerberos.keytab'],
-                                  security_params['hive-site']['hive.server2.authentication.kerberos.principal'],
-                                  status_params.hostname,
-                                  status_params.tmp_dir)
-            cached_kinit_executor(status_params.kinit_path_local,
-                                  status_params.hive_user,
-                                  security_params['hive-site']['hive.server2.authentication.spnego.keytab'],
-                                  security_params['hive-site']['hive.server2.authentication.spnego.principal'],
-                                  status_params.hostname,
-                                  status_params.tmp_dir)
-            self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-          except Exception as e:
-            self.put_structured_out({"securityState": "ERROR"})
-            self.put_structured_out({"securityStateErrorInfo": str(e)})
-        else:
-          issues = []
-          for cf in result_issues:
-            issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-          self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-          self.put_structured_out({"securityState": "UNSECURED"})
-      else:
-        self.put_structured_out({"securityState": "UNSECURED"})
-
     def restart_llap(self, env):
       """
       Custom command to Restart LLAP
@@ -426,9 +365,6 @@
       hive_interactive_kinit_cmd = format("{kinit_path_local} -kt {params.hive_server2_keytab} {params.hive_principal}; ")
       Execute(hive_interactive_kinit_cmd, user=params.hive_user)
 
-      llap_kinit_cmd = format("{kinit_path_local} -kt {params.hive_llap_keytab_file} {params.hive_llap_principal}; ")
-      Execute(llap_kinit_cmd, user=params.hive_user)
-
     """
     Get llap app status data for LLAP Tech Preview code base.
     """
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/params_linux.py
index 84bac38..a12d388 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/params_linux.py
@@ -534,12 +534,20 @@
 ########################################################
 ############# AMS related params #####################
 ########################################################
-ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+set_instanceId = "false"
+
+if 'cluster-env' in config['configurations'] and \
+        'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
+  ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
+  set_instanceId = "true"
+else:
+  ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+
 has_metric_collector = not len(ams_collector_hosts) == 0
 if has_metric_collector:
   if 'cluster-env' in config['configurations'] and \
-      'metrics_collector_vip_port' in config['configurations']['cluster-env']:
-    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
+      'metrics_collector_external_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
   else:
     metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
     if metric_collector_web_address.find(':') != -1:
@@ -557,6 +565,9 @@
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
+
 ########################################################
 ############# Atlas related params #####################
 ########################################################
@@ -676,8 +687,8 @@
   llap_extra_slider_opts = default('/configurations/hive-interactive-env/llap_extra_slider_opts', "")
   hive_llap_principal = None
   if security_enabled:
-    hive_llap_keytab_file = config['configurations']['hive-interactive-site']['hive.llap.zk.sm.keytab.file']
-    hive_llap_principal = (config['configurations']['hive-interactive-site']['hive.llap.zk.sm.principal']).replace('_HOST',hostname.lower())
+    hive_llap_keytab_file = config['configurations']['hive-interactive-site']['hive.llap.daemon.keytab.file']
+    hive_llap_principal = (config['configurations']['hive-interactive-site']['hive.llap.daemon.service.principal']).replace('_HOST',hostname.lower())
   pass
 
 if len(hive_server_hosts) == 0 and len(hive_server_interactive_hosts) > 0:
@@ -824,3 +835,10 @@
     xa_audit_db_is_enabled = False
 
 # ranger hive plugin section end
+
+# below property is used for cluster deployed in cloud env to create ranger hive service in ranger admin
+# need to add it as custom property
+ranger_hive_metastore_lookup = default('/configurations/ranger-hive-plugin-properties/ranger.service.config.param.enable.hive.metastore.lookup', False)
+
+if security_enabled:
+  hive_metastore_principal_with_host = hive_metastore_principal.replace('_HOST', hostname.lower())
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/setup_ranger_hive.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/setup_ranger_hive.py
index 80bd7c8..379b4ac 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/setup_ranger_hive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/setup_ranger_hive.py
@@ -18,6 +18,9 @@
 
 """
 from resource_management.core.logger import Logger
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.functions.ranger_functions_v2 import RangeradminV2
+from resource_management.libraries.functions.setup_ranger_plugin_xml import generate_ranger_service_config
 
 def setup_ranger_hive(upgrade_type = None):
   import params
@@ -96,3 +99,58 @@
                         stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
   else:
     Logger.info('Ranger Hive plugin is not enabled')
+
+def setup_ranger_hive_metastore_service():
+  """
+  Creates ranger hive service in ranger admin installed in same cluster for cluster depolyed in cloud env.
+  """
+  import params
+
+  if params.has_ranger_admin and params.ranger_hive_metastore_lookup:
+
+    repo_name = str(params.config['clusterName']) + '_hive'
+    repo_name_value = params.config['configurations']['ranger-hive-security']['ranger.plugin.hive.service.name']
+    if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
+      repo_name = repo_name_value
+
+    hive_ranger_plugin_config = {
+      'username': params.config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
+      'password': params.config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_PASSWORD'],
+      'jdbc.driverClassName': params.config['configurations']['ranger-hive-plugin-properties']['jdbc.driverClassName'],
+      'jdbc.url': 'none',
+      'commonNameForCertificate': params.config['configurations']['ranger-hive-plugin-properties']['common.name.for.certificate'],
+      'ambari.service.check.user': params.config['configurations']['ranger-hive-plugin-properties']['policy_user']
+    }
+
+    if params.security_enabled:
+      hive_ranger_plugin_config['policy.download.auth.users'] = params.hive_user
+      hive_ranger_plugin_config['tag.download.auth.users'] = params.hive_user
+      hive_ranger_plugin_config['policy.grantrevoke.auth.users'] = params.hive_user
+
+    custom_ranger_service_config = generate_ranger_service_config(params.config['configurations']['ranger-hive-plugin-properties'])
+    if len(custom_ranger_service_config) > 0:
+      hive_ranger_plugin_config.update(custom_ranger_service_config)
+
+    hive_ranger_plugin_repo = {
+      'isEnabled': 'true',
+      'configs': hive_ranger_plugin_config,
+      'description': 'Hive service',
+      'name': repo_name,
+      'type': 'hive'
+    }
+
+    ranger_admin_obj = RangeradminV2(url = params.config['configurations']['ranger-hive-security']['ranger.plugin.hive.policy.rest.url'], skip_if_rangeradmin_down = not params.retryAble)
+    ranger_admin_obj.create_ranger_repository(
+      component = 'hive',
+      repo_name = repo_name,
+      repo_properties = hive_ranger_plugin_repo,
+      ambari_ranger_admin = params.config['configurations']['ranger-env']['ranger_admin_username'],
+      ambari_ranger_password = params.config['configurations']['ranger-env']['ranger_admin_password'],
+      admin_uname = params.config['configurations']['ranger-env']['admin_username'],
+      admin_password = params.config['configurations']['ranger-env']['admin_password'],
+      policy_user = params.config['configurations']['ranger-hive-plugin-properties']['policy_user'],
+      is_security_enabled = params.security_enabled,
+      is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
+      component_user = params.hive_user,
+      component_user_principal = params.hive_metastore_principal_with_host if params.security_enabled else None,
+      component_user_keytab = params.hive_metastore_keytab_path if params.security_enabled else None)
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/webhcat_server.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/webhcat_server.py
index 93fa411..18e11ab 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/webhcat_server.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/webhcat_server.py
@@ -84,73 +84,6 @@
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hive-webhcat", params.version)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-
-    if status_params.security_enabled:
-      expectations ={}
-      expectations.update(
-        build_expectations(
-          'webhcat-site',
-          {
-            "templeton.kerberos.secret": "secret"
-          },
-          [
-            "templeton.kerberos.keytab",
-            "templeton.kerberos.principal"
-          ],
-          [
-            "templeton.kerberos.keytab"
-          ]
-        )
-      )
-      expectations.update(
-        build_expectations(
-          'hive-site',
-          {
-            "hive.server2.authentication": "KERBEROS",
-            "hive.metastore.sasl.enabled": "true",
-            "hive.security.authorization.enabled": "true"
-          },
-          None,
-          None
-        )
-      )
-
-      security_params = {}
-      security_params.update(get_params_from_filesystem(status_params.webhcat_conf_dir,
-                                                        {'webhcat-site.xml': FILE_TYPE_XML}))
-      result_issues = validate_security_config_properties(security_params, expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if 'webhcat-site' not in security_params \
-            or 'templeton.kerberos.keytab' not in security_params['webhcat-site'] \
-            or 'templeton.kerberos.principal' not in security_params['webhcat-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.webhcat_user,
-                                security_params['webhcat-site']['templeton.kerberos.keytab'],
-                                security_params['webhcat-site']['templeton.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
     return params.hcat_log_dir
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2 b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
index 82f71c5..3093e56 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
@@ -51,6 +51,8 @@
   hivemetastore.sink.timeline.collector.hosts={{ams_collector_hosts}}
   hivemetastore.sink.timeline.port={{metric_collector_port}}
   hivemetastore.sink.timeline.protocol={{metric_collector_protocol}}
+  hivemetastore.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  hivemetastore.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 
 {% endif %}
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2 b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
index f1bfa03..59a7c1b 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
@@ -51,5 +51,7 @@
   hiveserver2.sink.timeline.collector.hosts={{ams_collector_hosts}}
   hiveserver2.sink.timeline.port={{metric_collector_port}}
   hiveserver2.sink.timeline.protocol={{metric_collector_protocol}}
+  hiveserver2.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  hiveserver2.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 {% endif %}
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llapdaemon.j2 b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llapdaemon.j2
index d63dfb7..69f6071 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llapdaemon.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llapdaemon.j2
@@ -50,5 +50,7 @@
   llapdaemon.sink.timeline.collector.hosts={{ams_collector_hosts}}
   llapdaemon.sink.timeline.port={{metric_collector_port}}
   llapdaemon.sink.timeline.protocol={{metric_collector_protocol}}
+  llapdaemon.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  llapdaemon.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
-{% endif %}
\ No newline at end of file
+{% endif %}
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2 b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
index 4c9c981..c08a498 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
@@ -50,5 +50,7 @@
   llaptaskscheduler.sink.timeline.collector.hosts={{ams_collector_hosts}}
   llaptaskscheduler.sink.timeline.port={{metric_collector_port}}
   llaptaskscheduler.sink.timeline.protocol={{metric_collector_protocol}}
+  llaptaskscheduler.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  llaptaskscheduler.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
-{% endif %}
\ No newline at end of file
+{% endif %}
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/service_advisor.py
index ef3e340..6d3e13d 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/service_advisor.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/service_advisor.py
@@ -568,9 +568,16 @@
     hive_hooks = [x for x in hive_hooks if x != ""]
     is_atlas_present_in_cluster = "ATLAS" in servicesList
 
+    enable_external_atlas_for_hive = False
     enable_atlas_hook = False
+
+    if 'hive-atlas-application.properties' in services['configurations'] and 'enable.external.atlas.for.hive' in services['configurations']['hive-atlas-application.properties']['properties']:
+      enable_external_atlas_for_hive = services['configurations']['hive-atlas-application.properties']['properties']['enable.external.atlas.for.hive'].lower() == "true"
+
     if is_atlas_present_in_cluster:
       putHiveEnvProperty("hive.atlas.hook", "true")
+    elif enable_external_atlas_for_hive:
+      putHiveEnvProperty("hive.atlas.hook", "true")
     else:
       putHiveEnvProperty("hive.atlas.hook", "false")
 
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/alerts.json b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/alerts.json
new file mode 100644
index 0000000..04fb583
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/alerts.json
@@ -0,0 +1,32 @@
+{
+  "KAFKA": {
+    "service": [],
+    "KAFKA_BROKER": [
+      {
+        "name": "kafka_broker_process",
+        "label": "Kafka Broker Process",
+        "description": "This host-level alert is triggered if the Kafka Broker cannot be determined to be up.",
+        "interval": 1,
+        "scope": "HOST",
+        "source": {
+          "type": "PORT",
+          "uri": "{{kafka-broker/listeners}}",
+          "default_port": 6667,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/kafka-broker.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/kafka-broker.xml
new file mode 100644
index 0000000..1cddb89
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/kafka-broker.xml
@@ -0,0 +1,559 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+  <property>
+    <name>log.dirs</name>
+    <value>/kafka-logs</value>
+    <description>
+      A comma-separated list of one or more directories in which Kafka data is stored.
+      Each new partition that is created will be placed in the directory which currently has the fewest partitions.
+    </description>
+    <value-attributes>
+      <type>directories</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>port</name>
+    <value>6667</value>
+    <description>
+      The port on which the server accepts client connections.
+    </description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zookeeper.connect</name>
+    <value>localhost:2181</value>
+    <description>
+      Zookeeper also allows you to add a "chroot" path which will make all kafka data for this cluster appear under a particular path.
+      This is a way to setup multiple Kafka clusters or other applications on the same zookeeper cluster. To do this give a connection
+     string in the form hostname1:port1,hostname2:port2,hostname3:port3/chroot/path which would put all this cluster's data under the
+      path /chroot/path. Note that consumers must use the same connection string.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>message.max.bytes</name>
+    <value>1000000</value>
+    <description>
+      The maximum size of a message that the server can receive.
+      It is important that this property be in sync with the maximum fetch size your consumers use or
+      else an unruly producer will be able to publish messages too large for consumers to consume.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>num.network.threads</name>
+    <value>3</value>
+    <description>
+      The number of network threads that the server uses for handling network requests.
+      You probably don't need to change this.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>num.io.threads</name>
+    <value>8</value>
+    <description>
+      The number of I/O threads that the server uses for executing requests. You should have at least as many threads as you have disks.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>queued.max.requests</name>
+    <value>500</value>
+    <description>The number of requests that can be queued up for processing by the I/O threads before the network threads stop reading in new requests.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>socket.send.buffer.bytes</name>
+    <value>102400</value>
+    <description>
+      The SO_SNDBUFF buffer the server prefers for socket connections.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>socket.receive.buffer.bytes</name>
+    <value>102400</value>
+    <description>
+      The SO_RCVBUFF buffer the server prefers for socket connections.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>socket.request.max.bytes</name>
+    <value>104857600</value>
+    <description>
+      The maximum request size the server will allow. This prevents the server from running out of memory and should be smaller than the Java heap size.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>num.partitions</name>
+    <value>1</value>
+    <description>
+        The default number of partitions per topic.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>log.segment.bytes</name>
+    <value>1073741824</value>
+    <description>
+      The maximum request size the server will allow.
+      This prevents the server from running out of memory and should be smaller than the Java heap size.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>log.roll.hours</name>
+    <value>168</value>
+    <description>
+      This setting will force Kafka to roll a new log segment even if the log.segment.bytes size has not been reached.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>log.retention.bytes</name>
+    <value>-1</value>
+    <description>
+      The amount of data to retain in the log for each topic-partitions. Note that this is the limit per-partition so multiply by the number of partitions to get the total data retained for the topic. Also note that if both log.retention.hours and log.retention.bytes are both set we delete a segment when either limit is exceeded.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>log.retention.hours</name>
+    <value>168</value>
+    <description>
+      The number of hours to keep a log segment before it is deleted, i.e. the default data retention window for all topics. Note that if both log.retention.hours and log.retention.bytes are both set we delete a segment when either limit is exceeded.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>log.cleanup.interval.mins</name>
+    <value>10</value>
+    <description>The frequency in minutes that the log cleaner checks whether any log segment is eligible for deletion to meet the retention policies.
+    </description>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>log.retention.check.interval.ms</name>
+    <value>600000</value>
+    <description>
+      The frequency in milliseconds that the log cleaner checks whether any log segment is eligible for deletion to meet the retention policies.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>log.index.size.max.bytes</name>
+    <value>10485760</value>
+    <description>
+      The maximum size in bytes we allow for the offset index for each log segment. Note that we will always pre-allocate a
+      sparse file with this much space and shrink it down when the log rolls. If the index fills up we will roll a new log segment
+      even if we haven't reached the log.segment.bytes limit.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>log.index.interval.bytes</name>
+    <value>4096</value>
+    <description>
+      The byte interval at which we add an entry to the offset index. When executing a fetch request the server must do a linear scan for up to this many bytes to find the correct position in the log to begin and end the fetch. So setting this value to be larger will mean larger index files (and a bit more memory usage) but less scanning. However the server will never add more than one index entry per log append (even if more than log.index.interval worth of messages are appended). In general you probably don't need to mess with this value.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>auto.create.topics.enable</name>
+    <value>true</value>
+    <description>
+      Enable auto creation of topic on the server. If this is set to true then attempts to produce, consume, or fetch metadata for a non-existent topic will automatically create it with the default replication factor and number of partitions.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>controller.socket.timeout.ms</name>
+    <value>30000</value>
+    <description>The socket timeout for commands from the partition management controller to the replicas.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>controller.message.queue.size</name>
+    <value>10</value>
+    <description>The buffer size for controller-to-broker-channels</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>default.replication.factor</name>
+    <value>1</value>
+    <description>The default replication factor for automatically created topics.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>replica.lag.time.max.ms</name>
+    <value>10000</value>
+    <description>If a follower hasn't sent any fetch requests for this window of time, the leader will remove the follower from ISR (in-sync replicas) and treat it as dead.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>replica.lag.max.messages</name>
+    <value>4000</value>
+    <description>
+      If a replica falls more than this many messages behind the leader, the leader will remove the follower from ISR and treat it as dead.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>replica.socket.timeout.ms</name>
+    <value>30000</value>
+    <description>The socket timeout for network requests to the leader for replicating data.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>replica.socket.receive.buffer.bytes</name>
+    <value>65536</value>
+    <description>The socket receive buffer for network requests to the leader for replicating data.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>replica.fetch.max.bytes</name>
+    <value>1048576</value>
+    <description>The number of byes of messages to attempt to fetch for each partition in the fetch requests the replicas send to the leader.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>replica.fetch.wait.max.ms</name>
+    <value>500</value>
+    <description>The maximum amount of time to wait time for data to arrive on the leader in the fetch requests sent by the replicas to the leader.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>replica.fetch.min.bytes</name>
+    <value>1</value>
+    <description>Minimum bytes expected for each fetch response for the fetch requests from the replica to the leader. If not enough bytes, wait up to replica.fetch.wait.max.ms for this many bytes to arrive.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>num.replica.fetchers</name>
+    <value>1</value>
+    <description>
+      Number of threads used to replicate messages from leaders. Increasing this value can increase the degree of I/O parallelism in the follower broker.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>replica.high.watermark.checkpoint.interval.ms</name>
+    <value>5000</value>
+    <description>The frequency with which each replica saves its high watermark to disk to handle recovery.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>fetch.purgatory.purge.interval.requests</name>
+    <value>10000</value>
+    <description>The purge interval (in number of requests) of the fetch request purgatory.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>producer.purgatory.purge.interval.requests</name>
+    <value>10000</value>
+    <description>The purge interval (in number of requests) of the producer request purgatory.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zookeeper.session.timeout.ms</name>
+    <value>30000</value>
+    <description>Zookeeper session timeout. If the server fails to heartbeat to zookeeper within this period of time it is considered dead. If you set this too low the server may be falsely considered dead; if you set it too high it may take too long to recognize a truly dead server.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zookeeper.connection.timeout.ms</name>
+    <value>25000</value>
+    <description>The maximum amount of time that the client waits to establish a connection to zookeeper.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zookeeper.sync.time.ms</name>
+    <value>2000</value>
+    <description>How far a ZK follower can be behind a ZK leader.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>controlled.shutdown.max.retries</name>
+    <value>3</value>
+    <description>Number of retries to complete the controlled shutdown successfully before executing an unclean shutdown.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>controlled.shutdown.retry.backoff.ms</name>
+    <value>5000</value>
+    <description>
+      Backoff time between shutdown retries.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>kafka.metrics.reporters</name>
+    <value/>
+    <description>
+      kafka ganglia metrics reporter and kafka timeline metrics reporter
+    </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>kafka.ganglia.metrics.reporter.enabled</name>
+    <value>true</value>
+    <description>
+      kafka ganglia metrics reporter enable
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>kafka.ganglia.metrics.host</name>
+    <value>localhost</value>
+    <description> Ganglia host </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>kafka.ganglia.metrics.port</name>
+    <value>8671</value>
+    <description> Ganglia port </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>kafka.ganglia.metrics.group</name>
+    <value>kafka</value>
+    <description>Ganglia group name </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>kafka.timeline.metrics.reporter.enabled</name>
+    <value>true</value>
+    <description>Kafka timeline metrics reporter enable</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>kafka.timeline.metrics.hosts</name>
+    <value>{{ams_collector_hosts}}</value>
+    <description>Timeline host</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>kafka.timeline.metrics.port</name>
+    <value>{{metric_collector_port}}</value>
+    <description>Timeline port</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>kafka.timeline.metrics.protocol</name>
+    <value>{{metric_collector_protocol}}</value>
+    <description>Timeline protocol(http or https)</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>kafka.timeline.metrics.truststore.path</name>
+    <value>{{metric_truststore_path}}</value>
+    <description>Location of the trust store file.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>kafka.timeline.metrics.truststore.type</name>
+    <value>{{metric_truststore_type}}</value>
+    <description>Optional. Default value is "jks".</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>kafka.timeline.metrics.truststore.password</name>
+    <value>{{metric_truststore_password}}</value>
+    <description>Password to open the trust store file.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>kafka.timeline.metrics.reporter.sendInterval</name>
+    <value>5900</value>
+    <description>Timeline metrics reporter send interval</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>kafka.timeline.metrics.maxRowCacheSize</name>
+    <value>10000</value>
+    <description>Timeline metrics reporter send interval</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+
+
+  <property>
+    <name>listeners</name>
+    <value>PLAINTEXT://localhost:6667</value>
+    <description>host and port where kafka broker will be accepting connections. localhost will be substituted with hostname.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>controlled.shutdown.enable</name>
+    <value>true</value>
+    <description>Enable controlled shutdown of the broker. If enabled, the broker will move all leaders on it to some other brokers before shutting itself down. This reduces the unavailability window during shutdown.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>auto.leader.rebalance.enable</name>
+    <value>true</value>
+    <description>Enables auto leader balancing. A background thread checks and triggers leader balance if required at regular intervals</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>num.recovery.threads.per.data.dir</name>
+    <value>1</value>
+    <description>The number of threads per data directory to be used for log recovery at startup and flushing at shutdown</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>min.insync.replicas</name>
+    <value>1</value>
+    <description>define the minimum number of replicas in ISR needed to satisfy a produce request with required.acks=-1 (or all)</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>leader.imbalance.per.broker.percentage</name>
+    <value>10</value>
+    <description>The ratio of leader imbalance allowed per broker. The controller would trigger a leader balance if it goes above this value per broker. The value is specified in percentage.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>leader.imbalance.check.interval.seconds</name>
+    <value>300</value>
+    <description>The frequency with which the partition rebalance check is triggered by the controller</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>offset.metadata.max.bytes</name>
+    <value>4096</value>
+    <description>The maximum size for a metadata entry associated with an offset commit</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>offsets.load.buffer.size</name>
+    <value>5242880</value>
+    <description>Batch size for reading from the offsets segments when loading offsets into the cache.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>offsets.topic.replication.factor</name>
+    <value>3</value>
+    <description>The replication factor for the offsets topic (set higher to ensure availability).
+    To ensure that the effective replication factor of the offsets topic is the configured value,
+    the number of alive brokers has to be at least the replication factor at the time of the
+    first request for the offsets topic. If not, either the offsets topic creation will fail or it will get a replication factor of min(alive brokers, configured replication factor).</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>offsets.topic.num.partitions</name>
+    <value>50</value>
+    <description>The number of partitions for the offset commit topic (should not change after deployment)</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>offsets.topic.segment.bytes</name>
+    <value>104857600</value>
+    <description>The offsets topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>offsets.topic.compression.codec</name>
+    <value>0</value>
+    <description>Compression codec for the offsets topic - compression may be used to achieve \"atomic\" commits. Default is NoCompression. For Gzip add value 1 , SnappyCompression add value 2, LZ4CompressionCodec 3.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>offsets.retention.minutes</name>
+    <value>86400000</value>
+    <description>Log retention window in minutes for offsets topic</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>offsets.retention.check.interval.ms</name>
+    <value>600000</value>
+    <description>Frequency at which to check for stale offsets</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>offsets.commit.timeout.ms</name>
+    <value>5000</value>
+    <description>Offset commit will be delayed until all replicas for the offsets topic receive the commit or this timeout is reached. This is similar to the producer request timeout.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>offsets.commit.required.acks</name>
+    <value>-1</value>
+    <description>The required acks before the commit can be accepted. In general, the default (-1) should not be overridden</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>delete.topic.enable</name>
+    <value>true</value>
+    <description>Enables delete topic. Delete topic through the admin tool will have no effect if this config is turned off</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>compression.type</name>
+    <description>Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', lz4). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.</description>
+    <value>producer</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>external.kafka.metrics.exclude.prefix</name>
+    <value>kafka.network.RequestMetrics,kafka.server.DelayedOperationPurgatory,kafka.server.BrokerTopicMetrics.BytesRejectedPerSec</value>
+    <description>
+      Exclude metrics starting with these prefixes from being collected.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>external.kafka.metrics.include.prefix</name>
+    <value>kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetCommit.98percentile,kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Offsets.95percentile,kafka.network.RequestMetrics.ResponseSendTimeMs.request.Fetch.95percentile,kafka.network.RequestMetrics.RequestsPerSec.request</value>
+    <description>
+      These metrics would be included even if the exclude prefix omits them.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>authorizer.class.name</name>
+    <description>
+      Kafka authorizer class
+    </description>
+    <depends-on>
+      <property>
+        <type>ranger-kafka-plugin-properties</type>
+        <name>ranger-kafka-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/kafka-env.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/kafka-env.xml
new file mode 100644
index 0000000..90ba1c8
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/kafka-env.xml
@@ -0,0 +1,111 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>kafka_user</name>
+    <display-name>Kafka User</display-name>
+    <value>kafka</value>
+    <property-type>USER</property-type>
+    <description/>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>kafka_keytab</name>
+    <description>Kafka keytab path</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>kafka_principal_name</name>
+    <description>Kafka principal name</description>
+    <property-type>KERBEROS_PRINCIPAL</property-type>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>kafka_log_dir</name>
+    <value>/var/log/kafka</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>kafka_pid_dir</name>
+    <value>/var/run/kafka</value>
+    <display-name>Kafka PID dir</display-name>
+    <description/>
+    <value-attributes>
+      <type>directory</type>
+      <editable-only-at-install>true</editable-only-at-install>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>kafka_user_nofile_limit</name>
+    <value>128000</value>
+    <description>Max open files limit setting for KAFKA user.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>kafka_user_nproc_limit</name>
+    <value>65536</value>
+    <description>Max number of processes limit setting for KAFKA user.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <!-- kafka-env.sh -->
+  <property>
+    <name>content</name>
+    <display-name>kafka-env template</display-name>
+    <description>This is the jinja template for kafka-env.sh file</description>
+    <value>
+#!/bin/bash
+
+# Set KAFKA specific environment variables here.
+
+# The java implementation to use.
+export JAVA_HOME={{java64_home}}
+export PATH=$PATH:$JAVA_HOME/bin
+export PID_DIR={{kafka_pid_dir}}
+export LOG_DIR={{kafka_log_dir}}
+export KAFKA_KERBEROS_PARAMS={{kafka_kerberos_params}}
+# Add kafka sink to classpath and related depenencies
+if [ -e "/usr/lib/ambari-metrics-kafka-sink/ambari-metrics-kafka-sink.jar" ]; then
+  export CLASSPATH=$CLASSPATH:/usr/lib/ambari-metrics-kafka-sink/ambari-metrics-kafka-sink.jar
+  export CLASSPATH=$CLASSPATH:/usr/lib/ambari-metrics-kafka-sink/lib/*
+fi
+if [ -f /etc/kafka/conf/kafka-ranger-env.sh ]; then
+. /etc/kafka/conf/kafka-ranger-env.sh
+fi
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>is_supported_kafka_ranger</name>
+    <value>true</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/kafka-log4j.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/kafka-log4j.xml
new file mode 100644
index 0000000..6ae1a6a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/kafka-log4j.xml
@@ -0,0 +1,170 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="false">
+   <property>
+    <name>kafka_log_maxfilesize</name>
+    <value>256</value>
+    <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>Kafka Log: backup file size</display-name>
+    <value-attributes>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>kafka_log_maxbackupindex</name>
+    <value>20</value>
+    <description>The number of backup files</description>
+    <display-name>Kafka Log: # of backup files</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>controller_log_maxfilesize</name>
+    <value>256</value>
+    <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>Kafka Controller Log: backup file size</display-name>
+    <value-attributes>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>controller_log_maxbackupindex</name>
+    <value>20</value>
+    <description>The number of backup files</description>
+    <display-name>Kafka Controller Log: # of backup files</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>kafka-log4j template</display-name>
+    <description>Custom log4j.properties</description>
+    <value>
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+kafka.logs.dir=logs
+
+log4j.rootLogger=INFO, stdout
+
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH
+log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log
+log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+log4j.appender.kafkaAppender.MaxFileSize = {{kafka_log_maxfilesize}}MB
+log4j.appender.kafkaAppender.MaxBackupIndex = {{kafka_log_maxbackupindex}}
+
+log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH
+log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log
+log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH
+log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log
+log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH
+log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log
+log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH
+log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log
+log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+log4j.appender.controllerAppender.MaxFileSize = {{controller_log_maxfilesize}}MB
+log4j.appender.controllerAppender.MaxBackupIndex = {{controller_log_maxbackupindex}}
+# Turn on all our debugging info
+#log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG, kafkaAppender
+#log4j.logger.kafka.client.ClientUtils=DEBUG, kafkaAppender
+#log4j.logger.kafka.perf=DEBUG, kafkaAppender
+#log4j.logger.kafka.perf.ProducerPerformance$ProducerThread=DEBUG, kafkaAppender
+#log4j.logger.org.I0Itec.zkclient.ZkClient=DEBUG
+log4j.logger.kafka=INFO, kafkaAppender
+log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender
+log4j.additivity.kafka.network.RequestChannel$=false
+
+#log4j.logger.kafka.network.Processor=TRACE, requestAppender
+#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender
+#log4j.additivity.kafka.server.KafkaApis=false
+log4j.logger.kafka.request.logger=WARN, requestAppender
+log4j.additivity.kafka.request.logger=false
+
+log4j.logger.kafka.controller=TRACE, controllerAppender
+log4j.additivity.kafka.controller=false
+
+log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender
+log4j.additivity.kafka.log.LogCleaner=false
+
+log4j.logger.state.change.logger=TRACE, stateChangeAppender
+log4j.additivity.state.change.logger=false
+
+   </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-kafka-plugin-properties</type>
+        <name>ranger-kafka-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/kafka_client_jaas_conf.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/kafka_client_jaas_conf.xml
new file mode 100644
index 0000000..3f63b03
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/kafka_client_jaas_conf.xml
@@ -0,0 +1,41 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <property>
+    <name>content</name>
+    <display-name>kafka_client_jaas template</display-name>
+    <description>Kafka client jaas config</description>
+    <value>
+KafkaClient {
+com.sun.security.auth.module.Krb5LoginModule required
+useTicketCache=true
+renewTicket=true
+serviceName="{{kafka_bare_jaas_principal}}";
+};
+   </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/kafka_jaas_conf.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/kafka_jaas_conf.xml
new file mode 100644
index 0000000..a43cf28
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/kafka_jaas_conf.xml
@@ -0,0 +1,59 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <property>
+    <name>content</name>
+    <display-name>kafka_jaas template</display-name>
+    <description>Kafka jaas config</description>
+    <value>
+KafkaServer {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+keyTab="{{kafka_keytab_path}}"
+storeKey=true
+useTicketCache=false
+serviceName="{{kafka_bare_jaas_principal}}"
+principal="{{kafka_jaas_principal}}";
+};
+KafkaClient {
+com.sun.security.auth.module.Krb5LoginModule required
+useTicketCache=true
+renewTicket=true
+serviceName="{{kafka_bare_jaas_principal}}";
+};
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+keyTab="{{kafka_keytab_path}}"
+storeKey=true
+useTicketCache=false
+serviceName="zookeeper"
+principal="{{kafka_jaas_principal}}";
+};
+   </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/ranger-kafka-audit.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/ranger-kafka-audit.xml
new file mode 100644
index 0000000..59e7295
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/ranger-kafka-audit.xml
@@ -0,0 +1,130 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.audit.is.enabled</name>
+    <value>true</value>
+    <description>Is Audit enabled?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs</name>
+    <value>true</value>
+    <display-name>Audit to HDFS</display-name>
+    <description>Is Audit to HDFS enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.dir</name>
+    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
+    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs.dir</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
+    <value>/var/log/kafka/audit/hdfs/spool</value>
+    <description>/var/log/kafka/audit/hdfs/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr</name>
+    <value>false</value>
+    <display-name>Audit to SOLR</display-name>
+    <description>Is Solr audit enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.solr</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.urls</name>
+    <value/>
+    <description>Solr URL</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.urls</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.zookeepers</name>
+    <value>NONE</value>
+    <description>Solr Zookeeper string</description>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.zookeepers</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
+    <value>/var/log/kafka/audit/solr/spool</value>
+    <description>/var/log/kafka/audit/solr/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.provider.summary.enabled</name>
+    <value>true</value>
+    <display-name>Audit provider summary enabled</display-name>
+    <description>Enable Summary audit?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.kafka.ambari.cluster.name</name>
+    <value>{{cluster_name}}</value>
+    <description>Capture cluster name from where Ranger kafka plugin is enabled.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/ranger-kafka-plugin-properties.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/ranger-kafka-plugin-properties.xml
new file mode 100644
index 0000000..29aa31c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/ranger-kafka-plugin-properties.xml
@@ -0,0 +1,148 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>policy_user</name>
+    <value>ambari-qa</value>
+    <display-name>Policy user for KAFKA</display-name>
+    <description>This user must be system user and also present at Ranger admin portal</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hadoop.rpc.protection</name>
+    <value/>
+    <description>Used for repository creation on ranger admin</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>common.name.for.certificate</name>
+    <value/>
+    <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zookeeper.connect</name>
+    <value>localhost:2181</value>
+    <description>Used for repository creation on ranger admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger-kafka-plugin-enabled</name>
+    <value>No</value>
+    <display-name>Enable Ranger for KAFKA</display-name>
+    <description>Enable ranger kafka plugin</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>ranger-kafka-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_PASSWORD</name>
+    <value>kafka</value>
+    <property-type>PASSWORD</property-type>
+    <display-name>Ranger repository config password</display-name>
+    <description>Used for repository creation on ranger admin</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+
+
+  <property>
+    <name>external_admin_username</name>
+    <value></value>
+    <display-name>External Ranger admin username</display-name>
+    <description>Add ranger default admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>external_admin_password</name>
+    <value></value>
+    <display-name>External Ranger admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_username</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin username</display-name>
+    <description>Add ranger default ambari admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_password</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default ambari admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_USERNAME</name>
+    <value>kafka</value>
+    <display-name>Ranger repository config user</display-name>
+    <description>Used for repository creation on ranger admin</description>
+    <depends-on>
+      <property>
+        <type>ranger-kafka-plugin-properties</type>
+        <name>ranger-kafka-plugin-enabled</name>
+      </property>
+      <property>
+        <type>kafka-env</type>
+        <name>kafka_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/ranger-kafka-policymgr-ssl.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/ranger-kafka-policymgr-ssl.xml
new file mode 100644
index 0000000..4a76b60
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/ranger-kafka-policymgr-ssl.xml
@@ -0,0 +1,66 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore</name>
+    <value>/usr/hdp/current/kafka-broker/config/ranger-plugin-keystore.jks</value>
+    <description>Java Keystore files</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.password</name>
+    <value>myKeyFilePassword</value>
+    <property-type>PASSWORD</property-type>
+    <description>password for keystore</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore</name>
+    <value>/usr/hdp/current/kafka-broker/config/ranger-plugin-truststore.jks</value>
+    <description>java truststore file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.password</name>
+    <value>changeit</value>
+    <property-type>PASSWORD</property-type>
+    <description>java truststore password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
+    <value>jceks://file/{{credential_file}}</value>
+    <description>java keystore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
+    <value>jceks://file/{{credential_file}}</value>
+    <description>java truststore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/ranger-kafka-security.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/ranger-kafka-security.xml
new file mode 100644
index 0000000..47ea2a8
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/configuration/ranger-kafka-security.xml
@@ -0,0 +1,64 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>ranger.plugin.kafka.service.name</name>
+    <value>{{repo_name}}</value>
+    <description>Name of the Ranger service containing policies for this Kafka instance</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.kafka.policy.source.impl</name>
+    <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
+    <description>Class to retrieve policies from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.kafka.policy.rest.url</name>
+    <value>{{policymgr_mgr_url}}</value>
+    <description>URL to Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>admin-properties</type>
+        <name>policymgr_external_url</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>ranger.plugin.kafka.policy.rest.ssl.config.file</name>
+    <value>/etc/kafka/conf/ranger-policymgr-ssl.xml</value>
+    <description>Path to the file containing SSL details to contact Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.kafka.policy.pollIntervalMs</name>
+    <value>30000</value>
+    <description>How often to poll for changes in policies?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.kafka.policy.cache.dir</name>
+    <value>/etc/ranger/{{repo_name}}/policycache</value>
+    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/kerberos.json b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/kerberos.json
new file mode 100644
index 0000000..eb31ad6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/kerberos.json
@@ -0,0 +1,76 @@
+{
+  "services": [
+    {
+      "name": "KAFKA",
+      "identities": [
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "kafka-broker": {
+              "authorizer.class.name": "kafka.security.auth.SimpleAclAuthorizer",
+              "principal.to.local.class":"kafka.security.auth.KerberosPrincipalToLocal",
+              "super.users": "user:${kafka-env/kafka_user}",
+              "security.inter.broker.protocol": "PLAINTEXTSASL",
+              "zookeeper.set.acl": "true",
+              "listeners": "${kafka-broker/listeners|replace(\\bPLAINTEXT\\b, PLAINTEXTSASL)}"
+          }
+        },
+        {
+          "ranger-kafka-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "KAFKA_BROKER",
+          "identities": [
+            {
+              "name": "kafka_broker",
+              "principal": {
+                "value": "${kafka-env/kafka_user}/_HOST@${realm}",
+                "type": "service",
+                "configuration": "kafka-env/kafka_principal_name"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/kafka.service.keytab",
+                "owner": {
+                  "name": "${kafka-env/kafka_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "kafka-env/kafka_keytab"
+              }
+            },
+            {
+              "name": "/KAFKA/KAFKA_BROKER/kafka_broker",
+              "principal": {
+                "configuration": "ranger-kafka-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-kafka-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            },
+            {
+              "name": "/HDFS/NAMENODE/hdfs",
+              "when" : {
+                "contains" : ["services", "HDFS"]
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/metainfo.xml
new file mode 100644
index 0000000..a19850e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/metainfo.xml
@@ -0,0 +1,109 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>KAFKA</name>
+            <displayName>Kafka</displayName>
+            <comment>A high-throughput distributed messaging system</comment>
+            <version>0.10.0.3.0</version>
+            <components>
+                <component>
+                    <name>KAFKA_BROKER</name>
+                    <displayName>Kafka Broker</displayName>
+                    <category>MASTER</category>
+                    <cardinality>1+</cardinality>
+                    <versionAdvertised>true</versionAdvertised>
+                    <dependencies>
+                        <dependency>
+                            <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+                            <scope>cluster</scope>
+                            <auto-deploy>
+                                <enabled>true</enabled>
+                            </auto-deploy>
+                        </dependency>
+                    </dependencies>
+                    <commandScript>
+                        <script>scripts/kafka_broker.py</script>
+                        <scriptType>PYTHON</scriptType>
+                        <timeout>1200</timeout>
+                    </commandScript>
+                    <logs>
+                        <log>
+                            <logId>kafka_server</logId>
+                            <primary>true</primary>
+                        </log>
+                        <log>
+                            <logId>kafka_controller</logId>
+                        </log>
+                        <log>
+                            <logId>kafka_request</logId>
+                        </log>
+                        <log>
+                            <logId>kafka_logcleaner</logId>
+                        </log>
+                        <log>
+                            <logId>kafka_statechange</logId>
+                        </log>
+                    </logs>
+                </component>
+            </components>
+            <commandScript>
+                <script>scripts/service_check.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>300</timeout>
+            </commandScript>
+            <requiredServices>
+                <service>ZOOKEEPER</service>
+            </requiredServices>
+            <configuration-dependencies>
+                <config-type>kafka-broker</config-type>
+                <config-type>kafka-env</config-type>
+                <config-type>kafka-log4j</config-type>
+                <config-type>ranger-kafka-plugin-properties</config-type>
+                <config-type>ranger-kafka-audit</config-type>
+                <config-type>ranger-kafka-policymgr-ssl</config-type>
+                <config-type>ranger-kafka-security</config-type>
+                <config-type>zookeeper-env</config-type>
+                <config-type>zoo.cfg</config-type>
+                <config-type>kafka_jaas_conf</config-type>
+                <config-type>kafka_client_jaas_conf</config-type>
+            </configuration-dependencies>
+            <osSpecifics>
+                <osSpecific>
+                    <osFamily>redhat7,amazon2015,redhat6,suse11,suse12</osFamily>
+                    <packages>
+                        <package>
+                            <name>kafka_${stack_version}</name>
+                        </package>
+                    </packages>
+                </osSpecific>
+                <osSpecific>
+                    <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+                    <packages>
+                        <package>
+                            <name>kafka-${stack_version}</name>
+                        </package>
+                    </packages>
+                </osSpecific>
+            </osSpecifics>
+            <restartRequiredAfterChange>true</restartRequiredAfterChange>
+        </service>
+    </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/metrics.json b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/metrics.json
new file mode 100644
index 0000000..e99f4eb
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/metrics.json
@@ -0,0 +1,239 @@
+{
+  "KAFKA_BROKER": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/jvm/uptime": {
+              "metric": "jvm.uptime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/heap_usage": {
+              "metric": "jvm.heap_usage",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/non_heap_usage": {
+              "metric": "jvm.non_heap_usage",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/thread-states/runnable": {
+              "metric": "jvm.thread-states.runnable",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/thread-states/blocked": {
+              "metric": "jvm.thread-states.blocked",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/thread-states/timed_waiting": {
+              "metric": "jvm.thread-states.timed_waiting",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/thread-states/terminated": {
+              "metric": "jvm.thread-states.terminated",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/thread_count": {
+              "metric": "jvm.thread_count",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/daemon_thread_count": {
+              "metric": "jvm.daemon_thread_count",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsMessagesInPerSec/1MinuteRate": {
+              "metric": "kafka.server.BrokerTopicMetrics.MessagesInPerSec.1MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsMessagesInPerSec/5MinuteRate": {
+              "metric": "kafka.server.BrokerTopicMetrics.MessagesInPerSec.5MinuteRate",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsMessagesInPerSec/15MinuteRate": {
+              "metric": "kafka.server.BrokerTopicMetrics.MessagesInPerSec.15MinuteRate",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsMessagesInPerSec/meanRate": {
+              "metric": "kafka.server.BrokerTopicMetrics.MessagesInPerSec.meanRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsMessagesInPerSec/count": {
+              "metric": "kafka.server.BrokerTopicMetrics.MessagesInPerSec.count",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsBytesInPerSec/1MinuteRate": {
+              "metric": "kafka.server.BrokerTopicMetrics.BytesInPerSec.1MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsBytesInPerSec/5MinuteRate": {
+              "metric": "kafka.server.BrokerTopicMetrics.BytesInPerSec.5MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsBytesInPerSec/15MinuteRate": {
+              "metric": "kafka.server.BrokerTopicMetrics.BytesInPerSec.15MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsBytesInPerSec/meanRate": {
+              "metric": "kafka.server.BrokerTopicMetrics.BytesInPerSec.meanRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsBytesInPerSec/count": {
+              "metric": "kafka.server.BrokerTopicMetrics.BytesInPerSec.count",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsBytesOutPerSec/1MinuteRate": {
+              "metric": "kafka.server.BrokerTopicMetrics.BytesOutPerSec.1MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsBytesOutPerSec/5MinuteRate": {
+              "metric": "kafka.server.BrokerTopicMetrics.BytesOutPerSec.5MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsBytesOutPerSec/15MinuteRate": {
+              "metric": "kafka.server.BrokerTopicMetrics.BytesOutPerSec.15MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsBytesOutPerSec/meanRate": {
+              "metric": "kafka.server.BrokerTopicMetrics.BytesOutPerSec.meanRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsBytesOutPerSec/count": {
+              "metric": "kafka.server.BrokerTopicMetrics.BytesOutPerSec.count",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/controller/KafkaController/ActiveControllerCount": {
+              "metric": "kafka.controller.KafkaController.ActiveControllerCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/controller/ControllerStats/LeaderElectionRateAndTimeMs/meanRate": {
+              "metric": "kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.meanRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/controller/ControllerStats/LeaderElectionRateAndTimeMs/1MinuteRate": {
+              "metric": "kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.1MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/controller/ControllerStats/LeaderElectionRateAndTimeMs/5MinuteRate": {
+              "metric": "kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.5MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/controller/ControllerStats/LeaderElectionRateAndTimeMs/15MinuteRate": {
+              "metric": "kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.15MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/controller/ControllerStats/LeaderElectionRateAndTimeMs/count": {
+              "metric": "kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.count",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/controller/ControllerStats/UncleanLeaderElectionsPerSec/1MinuteRate": {
+              "metric": "kafka.controller.ControllerStats.UncleanLeaderElectionsPerSec.1MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/controller/ControllerStats/UncleanLeaderElectionsPerSec/5MinuteRate": {
+              "metric": "kafka.controller.ControllerStats.UncleanLeaderElectionsPerSec.5MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/controller/ControllerStats/UncleanLeaderElectionsPerSec/15MinuteRate": {
+              "metric": "kafka.controller.ControllerStats.UncleanLeaderElectionsPerSec.15MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/controller/ControllerStats/OfflinePartitionsCount": {
+              "metric": "kafka.controller.ControllerStats.OfflinePartitionsCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/ReplicaManager/PartitionCount": {
+              "metric": "kafka.server.ReplicaManager.PartitionCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/ReplicaManager/LeaderCount": {
+              "metric": "kafka.server.ReplicaManager.LeaderCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/ReplicaManager/UnderReplicatedPartitions": {
+              "metric": "kafka.server.ReplicaManager.UnderReplicatedPartitions",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/ReplicaManager/ISRShrinksPerSec": {
+              "metric": "kafka.server.ReplicaManager.ISRShrinksPerSec",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/ReplicaManager/ISRExpandsPerSec": {
+              "metric": "kafka.server.ReplicaManager.ISRExpandsPerSec",
+              "pointInTime": true,
+              "temporal": true
+            },
+
+            "metrics/kafka/server/ReplicaFetcherManager/Replica-MaxLag": {
+              "metric": "kafka.server.ReplicaFetcherManager.MaxLag.clientId.Replica",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/ProducerRequestPurgatory/PurgatorySize": {
+              "metric": "kafka.server.ProducerRequestPurgatory.PurgatorySize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/FetchRequestPurgatory/PurgatorySize": {
+              "metric": "kafka.server.FetchRequestPurgatory.PurgatorySize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/cluster/Partition/$1-UnderReplicated": {
+              "metric": "kafka.cluster.Partition.(\\w+)-UnderReplicated",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/consumer/ConsumerFetcherManager/$1-MaxLag": {
+              "metric": "kafka.consumer.ConsumerFetcherManager.(\\w+)-MaxLag",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/consumer/ConsumerFetcherManager/$1-MinFetch": {
+              "metric": "kafka.consumer.ConsumerFetcherManager.(\\w+)-MinFetch",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      }
+    ]
+  }
+}
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/kafka.py b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/kafka.py
new file mode 100644
index 0000000..680dd32
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/kafka.py
@@ -0,0 +1,276 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import collections
+import os
+
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.resources.properties_file import PropertiesFile
+from resource_management.libraries.resources.template_config import TemplateConfig
+from resource_management.core.resources.system import Directory, Execute, File, Link
+from resource_management.core.source import StaticFile, Template, InlineTemplate
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions import Direction
+
+
+from resource_management.core.logger import Logger
+
+
+def kafka(upgrade_type=None):
+    import params
+    ensure_base_directories()
+
+    kafka_server_config = mutable_config_dict(params.config['configurations']['kafka-broker'])
+    # This still has an issue of hostnames being alphabetically out-of-order for broker.id in HDP-2.2.
+    # Starting in HDP 2.3, Kafka handles the generation of broker.id so Ambari doesn't have to.
+
+    effective_version = params.stack_version_formatted if upgrade_type is None else format_stack_version(params.version)
+    Logger.info(format("Effective stack version: {effective_version}"))
+
+    # In HDP-2.2 (Apache Kafka 0.8.1.1) we used to generate broker.ids based on hosts and add them to
+    # kafka's server.properties. In future version brokers can generate their own ids based on zookeeper seq
+    # We need to preserve the broker.id when user is upgrading from HDP-2.2 to any higher version.
+    # Once its preserved it will be written to kafka.log.dirs/meta.properties and it will be used from there on
+    # similarly we need preserve port as well during the upgrade
+
+    if upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE and \
+      check_stack_feature(StackFeature.CREATE_KAFKA_BROKER_ID, params.current_version) and \
+      check_stack_feature(StackFeature.KAFKA_LISTENERS, params.version):
+      if len(params.kafka_hosts) > 0 and params.hostname in params.kafka_hosts:
+        brokerid = str(sorted(params.kafka_hosts).index(params.hostname))
+        kafka_server_config['broker.id'] = brokerid
+        Logger.info(format("Calculating broker.id as {brokerid}"))
+      if 'port' in kafka_server_config:
+        port = kafka_server_config['port']
+        Logger.info(format("Port config from previous verson: {port}"))
+        listeners = kafka_server_config['listeners']
+        kafka_server_config['listeners'] = listeners.replace("6667", port)
+        Logger.info(format("Kafka listeners after the port update: {listeners}"))
+        del kafka_server_config['port']
+
+
+    if effective_version is not None and effective_version != "" and \
+      check_stack_feature(StackFeature.CREATE_KAFKA_BROKER_ID, effective_version):
+      if len(params.kafka_hosts) > 0 and params.hostname in params.kafka_hosts:
+        brokerid = str(sorted(params.kafka_hosts).index(params.hostname))
+        kafka_server_config['broker.id'] = brokerid
+        Logger.info(format("Calculating broker.id as {brokerid}"))
+
+    # listeners and advertised.listeners are only added in 2.3.0.0 onwards.
+    if effective_version is not None and effective_version != "" and \
+       check_stack_feature(StackFeature.KAFKA_LISTENERS, effective_version):
+
+       listeners = kafka_server_config['listeners'].replace("localhost", params.hostname)
+       Logger.info(format("Kafka listeners: {listeners}"))
+       kafka_server_config['listeners'] = listeners       
+
+       if params.security_enabled and params.kafka_kerberos_enabled:
+         Logger.info("Kafka kerberos security is enabled.")
+         kafka_server_config['advertised.listeners'] = listeners
+         Logger.info(format("Kafka advertised listeners: {listeners}"))
+       elif 'advertised.listeners' in kafka_server_config:
+         advertised_listeners = kafka_server_config['advertised.listeners'].replace("localhost", params.hostname)
+         kafka_server_config['advertised.listeners'] = advertised_listeners
+         Logger.info(format("Kafka advertised listeners: {advertised_listeners}"))
+    else:
+      kafka_server_config['host.name'] = params.hostname
+
+    if params.has_metric_collector:
+      kafka_server_config['kafka.timeline.metrics.hosts'] = params.ams_collector_hosts
+      kafka_server_config['kafka.timeline.metrics.port'] = params.metric_collector_port
+      kafka_server_config['kafka.timeline.metrics.protocol'] = params.metric_collector_protocol
+      kafka_server_config['kafka.timeline.metrics.truststore.path'] = params.metric_truststore_path
+      kafka_server_config['kafka.timeline.metrics.truststore.type'] = params.metric_truststore_type
+      kafka_server_config['kafka.timeline.metrics.truststore.password'] = params.metric_truststore_password
+
+    kafka_data_dir = kafka_server_config['log.dirs']
+    kafka_data_dirs = filter(None, kafka_data_dir.split(","))
+    Directory(kafka_data_dirs,
+              mode=0755,
+              cd_access='a',
+              owner=params.kafka_user,
+              group=params.user_group,
+              create_parents = True,
+              recursive_ownership = True,
+    )
+
+    PropertiesFile("server.properties",
+                      dir=params.conf_dir,
+                      properties=kafka_server_config,
+                      owner=params.kafka_user,
+                      group=params.user_group,
+    )
+
+    File(format("{conf_dir}/kafka-env.sh"),
+          owner=params.kafka_user,
+          content=InlineTemplate(params.kafka_env_sh_template)
+     )
+
+    if (params.log4j_props != None):
+        File(format("{conf_dir}/log4j.properties"),
+             mode=0644,
+             group=params.user_group,
+             owner=params.kafka_user,
+             content=InlineTemplate(params.log4j_props)
+         )
+
+    if params.security_enabled and params.kafka_kerberos_enabled:
+      if params.kafka_jaas_conf_template:
+        File(format("{conf_dir}/kafka_jaas.conf"),
+             owner=params.kafka_user,
+             content=InlineTemplate(params.kafka_jaas_conf_template)
+        )
+      else:
+        TemplateConfig(format("{conf_dir}/kafka_jaas.conf"),
+                         owner=params.kafka_user)
+
+      if params.kafka_client_jaas_conf_template:
+        File(format("{conf_dir}/kafka_client_jaas.conf"),
+             owner=params.kafka_user,
+             content=InlineTemplate(params.kafka_client_jaas_conf_template)
+        )
+      else:
+        TemplateConfig(format("{conf_dir}/kafka_client_jaas.conf"),
+                       owner=params.kafka_user)
+
+    # On some OS this folder could be not exists, so we will create it before pushing there files
+    Directory(params.limits_conf_dir,
+              create_parents = True,
+              owner='root',
+              group='root'
+    )
+
+    File(os.path.join(params.limits_conf_dir, 'kafka.conf'),
+         owner='root',
+         group='root',
+         mode=0644,
+         content=Template("kafka.conf.j2")
+    )
+
+    File(os.path.join(params.conf_dir, 'tools-log4j.properties'),
+         owner='root',
+         group='root',
+         mode=0644,
+         content=Template("tools-log4j.properties.j2")
+         )
+
+    setup_symlink(params.kafka_managed_pid_dir, params.kafka_pid_dir)
+    setup_symlink(params.kafka_managed_log_dir, params.kafka_log_dir)
+
+
+def mutable_config_dict(kafka_broker_config):
+    kafka_server_config = {}
+    for key, value in kafka_broker_config.iteritems():
+        kafka_server_config[key] = value
+    return kafka_server_config
+
+
+# Used to workaround the hardcoded pid/log dir used on the kafka bash process launcher
+def setup_symlink(kafka_managed_dir, kafka_ambari_managed_dir):
+  import params
+  backup_folder_path = None
+  backup_folder_suffix = "_tmp"
+  if kafka_ambari_managed_dir != kafka_managed_dir:
+    if os.path.exists(kafka_managed_dir) and not os.path.islink(kafka_managed_dir):
+
+      # Backup existing data before delete if config is changed repeatedly to/from default location at any point in time time, as there may be relevant contents (historic logs)
+      backup_folder_path = backup_dir_contents(kafka_managed_dir, backup_folder_suffix)
+
+      Directory(kafka_managed_dir,
+                action="delete",
+                create_parents = True)
+
+    elif os.path.islink(kafka_managed_dir) and os.path.realpath(kafka_managed_dir) != kafka_ambari_managed_dir:
+      Link(kafka_managed_dir,
+           action="delete")
+
+    if not os.path.islink(kafka_managed_dir):
+      Link(kafka_managed_dir,
+           to=kafka_ambari_managed_dir)
+
+  elif os.path.islink(kafka_managed_dir): # If config is changed and coincides with the kafka managed dir, remove the symlink and physically create the folder
+    Link(kafka_managed_dir,
+         action="delete")
+
+    Directory(kafka_managed_dir,
+              mode=0755,
+              cd_access='a',
+              owner=params.kafka_user,
+              group=params.user_group,
+              create_parents = True,
+              recursive_ownership = True,
+    )
+
+  if backup_folder_path:
+    # Restore backed up files to current relevant dirs if needed - will be triggered only when changing to/from default path;
+    for file in os.listdir(backup_folder_path):
+      if os.path.isdir(os.path.join(backup_folder_path, file)):
+        Execute(('cp', '-r', os.path.join(backup_folder_path, file), kafka_managed_dir),
+                sudo=True)
+        Execute(("chown", "-R", format("{kafka_user}:{user_group}"), os.path.join(kafka_managed_dir, file)),
+                sudo=True)
+      else:
+        File(os.path.join(kafka_managed_dir,file),
+             owner=params.kafka_user,
+             content = StaticFile(os.path.join(backup_folder_path,file)))
+
+    # Clean up backed up folder
+    Directory(backup_folder_path,
+              action="delete",
+              create_parents = True)
+
+
+# Uses agent temp dir to store backup files
+def backup_dir_contents(dir_path, backup_folder_suffix):
+  import params
+  backup_destination_path = params.tmp_dir + os.path.normpath(dir_path)+backup_folder_suffix
+  Directory(backup_destination_path,
+            mode=0755,
+            cd_access='a',
+            owner=params.kafka_user,
+            group=params.user_group,
+            create_parents = True,
+            recursive_ownership = True,
+  )
+  # Safely copy top-level contents to backup folder
+  for file in os.listdir(dir_path):
+    if os.path.isdir(os.path.join(dir_path, file)):
+      Execute(('cp', '-r', os.path.join(dir_path, file), backup_destination_path),
+              sudo=True)
+      Execute(("chown", "-R", format("{kafka_user}:{user_group}"), os.path.join(backup_destination_path, file)),
+              sudo=True)
+    else:
+      File(os.path.join(backup_destination_path, file),
+         owner=params.kafka_user,
+         content = StaticFile(os.path.join(dir_path,file)))
+
+  return backup_destination_path
+
+def ensure_base_directories():
+  import params
+  Directory([params.kafka_log_dir, params.kafka_pid_dir, params.conf_dir],
+            mode=0755,
+            cd_access='a',
+            owner=params.kafka_user,
+            group=params.user_group,
+            create_parents = True,
+            recursive_ownership = True,
+            )
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/kafka_broker.py b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/kafka_broker.py
new file mode 100644
index 0000000..81715f9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/kafka_broker.py
@@ -0,0 +1,151 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management import Script
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute, File, Directory
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import Direction
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.show_logs import show_logs
+from kafka import ensure_base_directories
+
+import upgrade
+from kafka import kafka
+from setup_ranger_kafka import setup_ranger_kafka
+
+class KafkaBroker(Script):
+
+  def get_component_name(self):
+    return "kafka-broker"
+
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    kafka(upgrade_type=upgrade_type)
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      stack_select.select("kafka-broker", params.version)
+
+    if params.version and check_stack_feature(StackFeature.CONFIG_VERSIONING, params.version):
+      conf_select.select(params.stack_name, "kafka", params.version)
+
+    # This is extremely important since it should only be called if crossing the HDP 2.3.4.0 boundary. 
+    if params.current_version and params.version and params.upgrade_direction:
+      src_version = dst_version = None
+      if params.upgrade_direction == Direction.UPGRADE:
+        src_version = format_stack_version(params.current_version)
+        dst_version = format_stack_version(params.version)
+      else:
+        # These represent the original values during the UPGRADE direction
+        src_version = format_stack_version(params.version)
+        dst_version = format_stack_version(params.downgrade_from_version)
+
+      if not check_stack_feature(StackFeature.KAFKA_ACL_MIGRATION_SUPPORT, src_version) and check_stack_feature(StackFeature.KAFKA_ACL_MIGRATION_SUPPORT, dst_version):
+        # Calling the acl migration script requires the configs to be present.
+        self.configure(env, upgrade_type=upgrade_type)
+        upgrade.run_migration(env, upgrade_type)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env, upgrade_type=upgrade_type)
+
+    if params.security_enabled:
+      if params.version and check_stack_feature(StackFeature.KAFKA_KERBEROS, params.version):
+        kafka_kinit_cmd = format("{kinit_path_local} -kt {kafka_keytab_path} {kafka_jaas_principal};")
+        Execute(kafka_kinit_cmd, user=params.kafka_user)
+
+    if params.is_supported_kafka_ranger:
+      setup_ranger_kafka() #Ranger Kafka Plugin related call 
+    daemon_cmd = format('source {params.conf_dir}/kafka-env.sh ; {params.kafka_bin} start')
+    no_op_test = format('ls {params.kafka_pid_file} >/dev/null 2>&1 && ps -p `cat {params.kafka_pid_file}` >/dev/null 2>&1')
+    try:
+      Execute(daemon_cmd,
+              user=params.kafka_user,
+              not_if=no_op_test
+      )
+    except:
+      show_logs(params.kafka_log_dir, params.kafka_user)
+      raise
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    # Kafka package scripts change permissions on folders, so we have to
+    # restore permissions after installing repo version bits
+    # before attempting to stop Kafka Broker
+    ensure_base_directories()
+    daemon_cmd = format('source {params.conf_dir}/kafka-env.sh; {params.kafka_bin} stop')
+    try:
+      Execute(daemon_cmd,
+              user=params.kafka_user,
+      )
+    except:
+      show_logs(params.kafka_log_dir, params.kafka_user)
+      raise
+    File(params.kafka_pid_file,
+          action = "delete"
+    )
+
+  def disable_security(self, env):
+    import params
+    if not params.zookeeper_connect:
+      Logger.info("No zookeeper connection string. Skipping reverting ACL")
+      return
+    if not params.secure_acls:
+      Logger.info("The zookeeper.set.acl is false. Skipping reverting ACL")
+      return
+    Execute(
+      "{0} --zookeeper.connect {1} --zookeeper.acl=unsecure".format(params.kafka_security_migrator, params.zookeeper_connect), \
+      user=params.kafka_user, \
+      environment={ 'JAVA_HOME': params.java64_home }, \
+      logoutput=True, \
+      tries=3)
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.kafka_pid_file)
+    
+  def get_log_folder(self):
+    import params
+    return params.kafka_log_dir
+  
+  def get_user(self):
+    import params
+    return params.kafka_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.kafka_pid_file]
+
+if __name__ == "__main__":
+  KafkaBroker().execute()
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/params.py
new file mode 100644
index 0000000..5b0be54
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/params.py
@@ -0,0 +1,341 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+from resource_management.libraries.functions import format
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
+from resource_management.libraries.functions.default import default
+from utils import get_bare_principal
+from resource_management.libraries.functions.get_stack_version import get_stack_version
+from resource_management.libraries.functions.is_empty import is_empty
+import status_params
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+stack_root = Script.get_stack_root()
+stack_name = default("/hostLevelParams/stack_name", None)
+retryAble = default("/commandParams/command_retry_enabled", False)
+
+# Version being upgraded/downgraded to
+version = default("/commandParams/version", None)
+
+# Version that is CURRENT.
+current_version = default("/hostLevelParams/current_version", None)
+
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+upgrade_direction = default("/commandParams/upgrade_direction", None)
+
+# get the correct version to use for checking stack features
+version_for_stack_feature_checks = get_stack_feature_version(config)
+
+stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
+stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
+stack_supports_core_site_for_ranger_plugin = check_stack_feature(StackFeature.CORE_SITE_FOR_RANGER_PLUGINS_SUPPORT, version_for_stack_feature_checks)
+
+# When downgrading the 'version' and 'current_version' are both pointing to the downgrade-target version
+# downgrade_from_version provides the source-version the downgrade is happening from
+downgrade_from_version = default("/commandParams/downgrade_from_version", None)
+
+hostname = config['hostname']
+
+# default kafka parameters
+kafka_home = '/usr/lib/kafka'
+kafka_bin = kafka_home+'/bin/kafka'
+conf_dir = "/etc/kafka/conf"
+limits_conf_dir = "/etc/security/limits.d"
+
+# Used while upgrading the stack in a kerberized cluster and running kafka-acls.sh
+zookeeper_connect = default("/configurations/kafka-broker/zookeeper.connect", None)
+
+kafka_user_nofile_limit = default('/configurations/kafka-env/kafka_user_nofile_limit', 128000)
+kafka_user_nproc_limit = default('/configurations/kafka-env/kafka_user_nproc_limit', 65536)
+
+# parameters for 2.2+
+if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+  kafka_home = os.path.join(stack_root,  "current", "kafka-broker")
+  kafka_bin = os.path.join(kafka_home, "bin", "kafka")
+  conf_dir = os.path.join(kafka_home, "config")
+
+kafka_user = config['configurations']['kafka-env']['kafka_user']
+kafka_log_dir = config['configurations']['kafka-env']['kafka_log_dir']
+kafka_pid_dir = status_params.kafka_pid_dir
+kafka_pid_file = kafka_pid_dir+"/kafka.pid"
+# This is hardcoded on the kafka bash process lifecycle on which we have no control over
+kafka_managed_pid_dir = "/var/run/kafka"
+kafka_managed_log_dir = "/var/log/kafka"
+user_group = config['configurations']['cluster-env']['user_group']
+java64_home = config['hostLevelParams']['java_home']
+kafka_env_sh_template = config['configurations']['kafka-env']['content']
+kafka_jaas_conf_template = default("/configurations/kafka_jaas_conf/content", None)
+kafka_client_jaas_conf_template = default("/configurations/kafka_client_jaas_conf/content", None)
+kafka_hosts = config['clusterHostInfo']['kafka_broker_hosts']
+kafka_hosts.sort()
+
+zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
+zookeeper_hosts.sort()
+secure_acls = default("/configurations/kafka-broker/zookeeper.set.acl", False)
+kafka_security_migrator = os.path.join(kafka_home, "bin", "zookeeper-security-migration.sh")
+
+#Kafka log4j
+kafka_log_maxfilesize = default('/configurations/kafka-log4j/kafka_log_maxfilesize',256)
+kafka_log_maxbackupindex = default('/configurations/kafka-log4j/kafka_log_maxbackupindex',20)
+controller_log_maxfilesize = default('/configurations/kafka-log4j/controller_log_maxfilesize',256)
+controller_log_maxbackupindex = default('/configurations/kafka-log4j/controller_log_maxbackupindex',20)
+
+if (('kafka-log4j' in config['configurations']) and ('content' in config['configurations']['kafka-log4j'])):
+    log4j_props = config['configurations']['kafka-log4j']['content']
+else:
+    log4j_props = None
+
+if 'ganglia_server_host' in config['clusterHostInfo'] and \
+    len(config['clusterHostInfo']['ganglia_server_host'])>0:
+  ganglia_installed = True
+  ganglia_server = config['clusterHostInfo']['ganglia_server_host'][0]
+  ganglia_report_interval = 60
+else:
+  ganglia_installed = False
+
+metric_collector_port = ""
+metric_collector_protocol = ""
+metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+
+set_instanceId = "false"
+cluster_name = config["clusterName"]
+
+if 'cluster-env' in config['configurations'] and \
+        'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
+  ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
+  set_instanceId = "true"
+else:
+  ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+
+has_metric_collector = not len(ams_collector_hosts) == 0
+
+if has_metric_collector:
+  if 'cluster-env' in config['configurations'] and \
+      'metrics_collector_external_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
+  else:
+    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
+    if metric_collector_web_address.find(':') != -1:
+      metric_collector_port = metric_collector_web_address.split(':')[1]
+    else:
+      metric_collector_port = '6188'
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
+  pass
+
+# Security-related params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+kafka_kerberos_enabled = (('security.inter.broker.protocol' in config['configurations']['kafka-broker']) and
+                         ((config['configurations']['kafka-broker']['security.inter.broker.protocol'] == "PLAINTEXTSASL") or
+                          (config['configurations']['kafka-broker']['security.inter.broker.protocol'] == "SASL_PLAINTEXT")))
+
+
+if security_enabled and stack_version_formatted != "" and 'kafka_principal_name' in config['configurations']['kafka-env'] \
+  and check_stack_feature(StackFeature.KAFKA_KERBEROS, stack_version_formatted):
+    _hostname_lowercase = config['hostname'].lower()
+    _kafka_principal_name = config['configurations']['kafka-env']['kafka_principal_name']
+    kafka_jaas_principal = _kafka_principal_name.replace('_HOST',_hostname_lowercase)
+    kafka_keytab_path = config['configurations']['kafka-env']['kafka_keytab']
+    kafka_bare_jaas_principal = get_bare_principal(_kafka_principal_name)
+    kafka_kerberos_params = "-Djava.security.auth.login.config="+ conf_dir +"/kafka_jaas.conf"
+else:
+    kafka_kerberos_params = ''
+    kafka_jaas_principal = None
+    kafka_keytab_path = None
+
+# for curl command in ranger plugin to get db connector
+jdk_location = config['hostLevelParams']['jdk_location']
+
+# ranger kafka plugin section start
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+
+# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature
+xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
+
+# ambari-server hostname
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+ranger_admin_log_dir = default("/configurations/ranger-env/ranger_admin_log_dir","/var/log/ranger/admin")
+
+# ranger kafka plugin enabled property
+enable_ranger_kafka = default("configurations/ranger-kafka-plugin-properties/ranger-kafka-plugin-enabled", "No")
+enable_ranger_kafka = True if enable_ranger_kafka.lower() == 'yes' else False
+
+# ranger kafka-plugin supported flag, instead of dependending on is_supported_kafka_ranger/kafka-env.xml, using stack feature
+is_supported_kafka_ranger = check_stack_feature(StackFeature.KAFKA_RANGER_PLUGIN_SUPPORT, version_for_stack_feature_checks)
+
+# ranger kafka properties
+if enable_ranger_kafka and is_supported_kafka_ranger:
+  # get ranger policy url
+  policymgr_mgr_url = config['configurations']['ranger-kafka-security']['ranger.plugin.kafka.policy.rest.url']
+
+  if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
+    policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
+
+  # ranger audit db user
+  xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
+
+  xa_audit_db_password = ''
+  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db and has_ranger_admin:
+    xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password']
+
+  # ranger kafka service/repository name
+  repo_name = str(config['clusterName']) + '_kafka'
+  repo_name_value = config['configurations']['ranger-kafka-security']['ranger.plugin.kafka.service.name']
+  if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
+    repo_name = repo_name_value
+
+  ranger_env = config['configurations']['ranger-env']
+
+  # create ranger-env config having external ranger credential properties
+  if not has_ranger_admin and enable_ranger_kafka:
+    external_admin_username = default('/configurations/ranger-kafka-plugin-properties/external_admin_username', 'admin')
+    external_admin_password = default('/configurations/ranger-kafka-plugin-properties/external_admin_password', 'admin')
+    external_ranger_admin_username = default('/configurations/ranger-kafka-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
+    external_ranger_admin_password = default('/configurations/ranger-kafka-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
+    ranger_env = {}
+    ranger_env['admin_username'] = external_admin_username
+    ranger_env['admin_password'] = external_admin_password
+    ranger_env['ranger_admin_username'] = external_ranger_admin_username
+    ranger_env['ranger_admin_password'] = external_ranger_admin_password
+
+  ranger_plugin_properties = config['configurations']['ranger-kafka-plugin-properties']
+  ranger_kafka_audit = config['configurations']['ranger-kafka-audit']
+  ranger_kafka_audit_attrs = config['configuration_attributes']['ranger-kafka-audit']
+  ranger_kafka_security = config['configurations']['ranger-kafka-security']
+  ranger_kafka_security_attrs = config['configuration_attributes']['ranger-kafka-security']
+  ranger_kafka_policymgr_ssl = config['configurations']['ranger-kafka-policymgr-ssl']
+  ranger_kafka_policymgr_ssl_attrs = config['configuration_attributes']['ranger-kafka-policymgr-ssl']
+
+  policy_user = config['configurations']['ranger-kafka-plugin-properties']['policy_user']
+
+  ranger_plugin_config = {
+    'username' : config['configurations']['ranger-kafka-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
+    'password' : config['configurations']['ranger-kafka-plugin-properties']['REPOSITORY_CONFIG_PASSWORD'],
+    'zookeeper.connect' : config['configurations']['ranger-kafka-plugin-properties']['zookeeper.connect'],
+    'commonNameForCertificate' : config['configurations']['ranger-kafka-plugin-properties']['common.name.for.certificate']
+  }
+
+  kafka_ranger_plugin_repo = {
+    'isEnabled': 'true',
+    'configs': ranger_plugin_config,
+    'description': 'kafka repo',
+    'name': repo_name,
+    'repositoryType': 'kafka',
+    'type': 'kafka',
+    'assetType': '1'
+  }
+
+  custom_ranger_service_config = generate_ranger_service_config(ranger_plugin_properties)
+  if len(custom_ranger_service_config) > 0:
+    ranger_plugin_config.update(custom_ranger_service_config)
+
+  if stack_supports_ranger_kerberos and security_enabled:
+    ranger_plugin_config['policy.download.auth.users'] = kafka_user
+    ranger_plugin_config['tag.download.auth.users'] = kafka_user
+    ranger_plugin_config['ambari.service.check.user'] = policy_user
+
+  downloaded_custom_connector = None
+  previous_jdbc_jar_name = None
+  driver_curl_source = None
+  driver_curl_target = None
+  previous_jdbc_jar = None
+
+  if has_ranger_admin and stack_supports_ranger_audit_db:
+    xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
+    jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver = get_audit_configs(config)
+
+    downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_target = format("{kafka_home}/libs/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    previous_jdbc_jar = format("{kafka_home}/libs/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+
+  xa_audit_db_is_enabled = False
+  if xml_configurations_supported and stack_supports_ranger_audit_db:
+    xa_audit_db_is_enabled = config['configurations']['ranger-kafka-audit']['xasecure.audit.destination.db']
+
+  xa_audit_hdfs_is_enabled = default('/configurations/ranger-kafka-audit/xasecure.audit.destination.hdfs', False)
+  ssl_keystore_password = config['configurations']['ranger-kafka-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None
+  ssl_truststore_password = config['configurations']['ranger-kafka-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None
+  credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
+
+  stack_version = get_stack_version('kafka-broker')
+  setup_ranger_env_sh_source = format('{stack_root}/{stack_version}/ranger-kafka-plugin/install/conf.templates/enable/kafka-ranger-env.sh')
+  setup_ranger_env_sh_target = format("{conf_dir}/kafka-ranger-env.sh")
+
+  # for SQLA explicitly disable audit to DB for Ranger
+  if has_ranger_admin and stack_supports_ranger_audit_db and xa_audit_db_flavor.lower() == 'sqla':
+    xa_audit_db_is_enabled = False
+
+# need this to capture cluster name from where ranger kafka plugin is enabled
+cluster_name = config['clusterName']
+
+# ranger kafka plugin section end
+
+namenode_hosts = default("/clusterHostInfo/namenode_host", [])
+has_namenode = not len(namenode_hosts) == 0
+
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] if has_namenode else None
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] if has_namenode else None
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] if has_namenode else None
+hdfs_site = config['configurations']['hdfs-site'] if has_namenode else None
+default_fs = config['configurations']['core-site']['fs.defaultFS'] if has_namenode else None
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin") if has_namenode else None
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir() if has_namenode else None
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources()
+)
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/service_check.py
new file mode 100644
index 0000000..cb66f98
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/service_check.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.validate import call_and_match_output
+from resource_management.libraries.functions.format import format
+from resource_management.core.logger import Logger
+from resource_management.core import sudo
+import subprocess
+
+class ServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    # TODO, Kafka Service check should be more robust , It should get all the broker_hosts
+    # Produce some messages and check if consumer reads same no.of messages.
+    
+    kafka_config = self.read_kafka_config()
+    topic = "ambari_kafka_service_check"
+    create_topic_cmd_created_output = "Created topic \"ambari_kafka_service_check\"."
+    create_topic_cmd_exists_output = "Topic \"ambari_kafka_service_check\" already exists."
+    source_cmd = format("source {conf_dir}/kafka-env.sh")
+    topic_exists_cmd = format("{kafka_home}/bin/kafka-topics.sh --zookeeper {kafka_config[zookeeper.connect]} --topic {topic} --list")
+    topic_exists_cmd_p = subprocess.Popen(topic_exists_cmd.split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+    topic_exists_cmd_out, topic_exists_cmd_err = topic_exists_cmd_p.communicate()
+    
+    delete_topic_cmd = format("{kafka_home}/bin/kafka-topics.sh --zookeeper {kafka_config[zookeeper.connect]} --delete --topic {topic}")
+    create_topic_cmd = format("{kafka_home}/bin/kafka-topics.sh --zookeeper {kafka_config[zookeeper.connect]} --create --topic {topic} --partitions 1 --replication-factor 1")
+    if topic in topic_exists_cmd_out:
+      # run delete topic and recreate the topic command only if the topic exists
+      command = source_cmd + " ; " + delete_topic_cmd + ";" + create_topic_cmd
+    else:
+      # run create topic command 
+      command = source_cmd + " ; " + create_topic_cmd
+    Logger.info("Running kafka create topic command: %s" % command)
+    call_and_match_output(command, format("({create_topic_cmd_created_output})|({create_topic_cmd_exists_output})"), "Failed to check that topic exists", user=params.kafka_user)
+
+  def read_kafka_config(self):
+    import params
+    
+    kafka_config = {}
+    content = sudo.read_file(params.conf_dir + "/server.properties")
+    for line in content.splitlines():
+      if line.startswith("#") or not line.strip():
+        continue
+
+      key, value = line.split("=")
+      kafka_config[key] = value.replace("\n", "")
+    
+    return kafka_config
+
+if __name__ == "__main__":
+    ServiceCheck().execute()
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/setup_ranger_kafka.py b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/setup_ranger_kafka.py
new file mode 100644
index 0000000..e9719aa
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/setup_ranger_kafka.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+    http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+from resource_management.core.logger import Logger
+from resource_management.core.resources import File, Execute
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_core_site_for_required_plugins
+
+def setup_ranger_kafka():
+  import params
+
+  if params.enable_ranger_kafka:
+
+    from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
+
+    if params.retryAble:
+      Logger.info("Kafka: Setup ranger: command retry enables thus retrying if ranger admin is down !")
+    else:
+      Logger.info("Kafka: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
+
+    if params.xml_configurations_supported and params.enable_ranger_kafka and params.xa_audit_hdfs_is_enabled:
+      if params.has_namenode:
+        params.HdfsResource("/ranger/audit",
+                           type="directory",
+                           action="create_on_execute",
+                           owner=params.hdfs_user,
+                           group=params.hdfs_user,
+                           mode=0755,
+                           recursive_chmod=True
+        )
+        params.HdfsResource("/ranger/audit/kafka",
+                           type="directory",
+                           action="create_on_execute",
+                           owner=params.kafka_user,
+                           group=params.kafka_user,
+                           mode=0700,
+                           recursive_chmod=True
+        )
+        params.HdfsResource(None, action="execute")
+
+    setup_ranger_plugin('kafka-broker', 'kafka', params.previous_jdbc_jar,
+                        params.downloaded_custom_connector, params.driver_curl_source,
+                        params.driver_curl_target, params.java64_home,
+                        params.repo_name, params.kafka_ranger_plugin_repo,
+                        params.ranger_env, params.ranger_plugin_properties,
+                        params.policy_user, params.policymgr_mgr_url,
+                        params.enable_ranger_kafka, conf_dict=params.conf_dir,
+                        component_user=params.kafka_user, component_group=params.user_group, cache_service_list=['kafka'],
+                        plugin_audit_properties=params.ranger_kafka_audit, plugin_audit_attributes=params.ranger_kafka_audit_attrs,
+                        plugin_security_properties=params.ranger_kafka_security, plugin_security_attributes=params.ranger_kafka_security_attrs,
+                        plugin_policymgr_ssl_properties=params.ranger_kafka_policymgr_ssl, plugin_policymgr_ssl_attributes=params.ranger_kafka_policymgr_ssl_attrs,
+                        component_list=['kafka-broker'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
+                        credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, 
+                        ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
+                        api_version = 'v2', skip_if_rangeradmin_down= not params.retryAble,
+                        is_security_enabled = params.security_enabled,
+                        is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
+                        component_user_principal=params.kafka_jaas_principal if params.security_enabled else None,
+                        component_user_keytab=params.kafka_keytab_path if params.security_enabled else None)
+    
+    if params.enable_ranger_kafka: 
+      Execute(('cp', '--remove-destination', params.setup_ranger_env_sh_source, params.setup_ranger_env_sh_target),
+        not_if=format("test -f {setup_ranger_env_sh_target}"),
+        sudo=True
+      )
+      File(params.setup_ranger_env_sh_target,
+        owner = params.kafka_user,
+        group = params.user_group,
+        mode = 0755
+      )
+    if params.stack_supports_core_site_for_ranger_plugin and params.enable_ranger_kafka and params.has_namenode and params.security_enabled:
+      Logger.info("Stack supports core-site.xml creation for Ranger plugin, creating create core-site.xml from namenode configuraitions")
+      setup_core_site_for_required_plugins(component_user=params.kafka_user,component_group=params.user_group,create_core_site_path = params.conf_dir, config = params.config)
+    else:
+      Logger.info("Stack does not support core-site.xml creation for Ranger plugin, skipping core-site.xml configurations")
+  else:
+    Logger.info('Ranger Kafka plugin is not enabled')
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/status_params.py
new file mode 100644
index 0000000..57bdf5e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/status_params.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.functions import format
+from resource_management.libraries.script.script import Script
+
+config = Script.get_config()
+
+kafka_pid_dir = config['configurations']['kafka-env']['kafka_pid_dir']
+kafka_pid_file = format("{kafka_pid_dir}/kafka.pid")
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/upgrade.py b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/upgrade.py
new file mode 100644
index 0000000..b6e4046
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/upgrade.py
@@ -0,0 +1,78 @@
+
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+
+
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import Direction
+from resource_management.core.exceptions import Fail
+from resource_management.core.logger import Logger
+
+def run_migration(env, upgrade_type):
+  """
+  If the acl migration script is present, then run it for either upgrade or downgrade.
+  That script was introduced in HDP 2.3.4.0 and requires stopping all Kafka brokers first.
+  Requires configs to be present.
+  :param env: Environment.
+  :param upgrade_type: "rolling" or "nonrolling
+  """
+  import params
+
+  if upgrade_type is None:
+    raise Fail('Parameter "upgrade_type" is missing.')
+
+  if params.upgrade_direction is None:
+    raise Fail('Parameter "upgrade_direction" is missing.')
+
+  if params.upgrade_direction == Direction.DOWNGRADE and params.downgrade_from_version is None:
+    raise Fail('Parameter "downgrade_from_version" is missing.')
+
+  if not params.security_enabled:
+    Logger.info("Skip running the Kafka ACL migration script since cluster security is not enabled.")
+    return
+  
+  Logger.info("Upgrade type: {0}, direction: {1}".format(str(upgrade_type), params.upgrade_direction))
+
+  # If the schema upgrade script exists in the version upgrading to, then attempt to upgrade/downgrade it while still using the present bits.
+  kafka_acls_script = None
+  command_suffix = ""
+  if params.upgrade_direction == Direction.UPGRADE:
+    kafka_acls_script = format("{stack_root}/{version}/kafka/bin/kafka-acls.sh")
+    command_suffix = "--upgradeAcls"
+  elif params.upgrade_direction == Direction.DOWNGRADE:
+    kafka_acls_script = format("{stack_root}/{downgrade_from_version}/kafka/bin/kafka-acls.sh")
+    command_suffix = "--downgradeAcls"
+
+  if kafka_acls_script is not None:
+    if os.path.exists(kafka_acls_script):
+      Logger.info("Found Kafka acls script: {0}".format(kafka_acls_script))
+      if params.zookeeper_connect is None:
+        raise Fail("Could not retrieve property kafka-broker/zookeeper.connect")
+
+      acls_command = "{0} --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect={1} {2}".\
+        format(kafka_acls_script, params.zookeeper_connect, command_suffix)
+
+      Execute(acls_command,
+              user=params.kafka_user,
+              logoutput=True)
+    else:
+      Logger.info("Did not find Kafka acls script: {0}".format(kafka_acls_script))
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/utils.py b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/utils.py
new file mode 100644
index 0000000..2f1fa5e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/utils.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import re
+
+def get_bare_principal(normalized_principal_name):
+    """
+    Given a normalized principal name (nimbus/c6501.ambari.apache.org@EXAMPLE.COM) returns just the
+    primary component (nimbus)
+    :param normalized_principal_name: a string containing the principal name to process
+    :return: a string containing the primary component value or None if not valid
+    """
+
+    bare_principal = None
+
+    if normalized_principal_name:
+        match = re.match(r"([^/@]+)(?:/[^@])?(?:@.*)?", normalized_principal_name)
+
+    if match:
+        bare_principal = match.group(1)
+
+    return bare_principal
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/templates/input.config-kafka.json.j2 b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/templates/input.config-kafka.json.j2
new file mode 100644
index 0000000..5b8f896
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/templates/input.config-kafka.json.j2
@@ -0,0 +1,92 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"kafka_controller",
+      "rowtype":"service",
+      "path":"{{default('/configurations/kafka-env/kafka_log_dir', '/var/log/kafka')}}/controller.log"
+    },
+    {
+      "type":"kafka_request",
+      "rowtype":"service",
+      "path":"{{default('/configurations/kafka-env/kafka_log_dir', '/var/log/kafka')}}/kafka-request.log"
+    },
+    {
+      "type":"kafka_logcleaner",
+      "rowtype":"service",
+      "path":"{{default('/configurations/kafka-env/kafka_log_dir', '/var/log/kafka')}}/log-cleaner.log"
+    },
+    {
+      "type":"kafka_server",
+      "rowtype":"service",
+      "path":"{{default('/configurations/kafka-env/kafka_log_dir', '/var/log/kafka')}}/server.log"
+    },
+    {
+      "type":"kafka_statechange",
+      "rowtype":"service",
+      "path":"{{default('/configurations/kafka-env/kafka_log_dir', '/var/log/kafka')}}/state-change.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "kafka_controller",
+            "kafka_request",
+            "kafka_logcleaner"
+          ]
+        }
+      },
+      "log4j_format":"[%d] %p %m (%c)%n",
+      "multiline_pattern":"^(\\[%{TIMESTAMP_ISO8601:logtime}\\])",
+      "message_pattern":"(?m)^\\[%{TIMESTAMP_ISO8601:logtime}\\]%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    },
+    {
+      "filter":"grok",
+      "comment":"Suppose to be same log4j pattern as other kafka processes, but some reason thread is not printed",
+      "conditions":{
+        "fields":{
+          "type":[
+            "kafka_server",
+            "kafka_statechange"
+          ]
+        }
+      },
+      "log4j_format":"[%d] %p %m (%c)%n",
+      "multiline_pattern":"^(\\[%{TIMESTAMP_ISO8601:logtime}\\])",
+      "message_pattern":"(?m)^\\[%{TIMESTAMP_ISO8601:logtime}\\]%{SPACE}%{LOGLEVEL:level}%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/templates/kafka.conf.j2 b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/templates/kafka.conf.j2
new file mode 100644
index 0000000..9e18e1d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/templates/kafka.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{kafka_user}}   - nofile   {{kafka_user_nofile_limit}}
+{{kafka_user}}   - nproc    {{kafka_user_nproc_limit}}
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/templates/kafka_client_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/templates/kafka_client_jaas.conf.j2
new file mode 100644
index 0000000..7f81d85
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/templates/kafka_client_jaas.conf.j2
@@ -0,0 +1,29 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+KafkaClient {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useTicketCache=true
+   renewTicket=true
+   serviceName="{{kafka_bare_jaas_principal}}";
+};
+Client {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useTicketCache=true
+   renewTicket=true
+   serviceName="zookeeper";
+};
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/templates/kafka_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/templates/kafka_jaas.conf.j2
new file mode 100644
index 0000000..56c558d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/templates/kafka_jaas.conf.j2
@@ -0,0 +1,41 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+KafkaServer {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="{{kafka_keytab_path}}"
+   storeKey=true
+   useTicketCache=false
+   serviceName="{{kafka_bare_jaas_principal}}"
+   principal="{{kafka_jaas_principal}}";
+};
+KafkaClient {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useTicketCache=true
+   renewTicket=true
+   serviceName="{{kafka_bare_jaas_principal}}";
+};
+Client {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="{{kafka_keytab_path}}"
+   storeKey=true
+   useTicketCache=false
+   serviceName="zookeeper"
+   principal="{{kafka_jaas_principal}}";
+};
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/templates/tools-log4j.properties.j2 b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/templates/tools-log4j.properties.j2
new file mode 100644
index 0000000..c4ad326
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/templates/tools-log4j.properties.j2
@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+log4j.rootLogger=WARN, stderr
+
+log4j.appender.stderr=org.apache.log4j.ConsoleAppender
+log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
+log4j.appender.stderr.layout.ConversionPattern=[%d] %p %m (%c)%n
+log4j.appender.stderr.Target=System.err
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/role_command_order.json b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/role_command_order.json
new file mode 100644
index 0000000..9a52922
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/role_command_order.json
@@ -0,0 +1,7 @@
+{
+  "general_deps" : {
+    "_comment" : "dependencies for KAFKA",
+    "KAFKA_BROKER-START" : ["ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START", "NAMENODE-START"],
+    "KAFKA_SERVICE_CHECK-SERVICE_CHECK": ["KAFKA_BROKER-START"]
+  }
+}
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/service_advisor.py
new file mode 100644
index 0000000..6b889a6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/service_advisor.py
@@ -0,0 +1,363 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+# Python imports
+import imp
+import os
+import traceback
+import re
+import socket
+import fnmatch
+
+
+from resource_management.core.logger import Logger
+
+SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
+STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../stacks/')
+PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
+
+try:
+  with open(PARENT_FILE, 'rb') as fp:
+    service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE, ('.py', 'rb', imp.PY_SOURCE))
+except Exception as e:
+  traceback.print_exc()
+  print "Failed to load parent"
+
+class KafkaServiceAdvisor(service_advisor.ServiceAdvisor):
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(KafkaServiceAdvisor, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    # Always call these methods
+    self.modifyMastersWithMultipleInstances()
+    self.modifyCardinalitiesDict()
+    self.modifyHeapSizeProperties()
+    self.modifyNotValuableComponents()
+    self.modifyComponentsNotPreferableOnServer()
+    self.modifyComponentLayoutSchemes()
+
+  def modifyMastersWithMultipleInstances(self):
+    """
+    Modify the set of masters with multiple instances.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyCardinalitiesDict(self):
+    """
+    Modify the dictionary of cardinalities.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyHeapSizeProperties(self):
+    """
+    Modify the dictionary of heap size properties.
+    Must be overriden in child class.
+    """
+    pass
+
+  def modifyNotValuableComponents(self):
+    """
+    Modify the set of components whose host assignment is based on other services.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentsNotPreferableOnServer(self):
+    """
+    Modify the set of components that are not preferable on the server.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentLayoutSchemes(self):
+    """
+    Modify layout scheme dictionaries for components.
+    The scheme dictionary basically maps the number of hosts to
+    host index where component should exist.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def getServiceComponentLayoutValidations(self, services, hosts):
+    """
+    Get a list of errors.
+    Must be overriden in child class.
+    """
+
+    return []
+
+  def getServiceConfigurationRecommendations(self, configurations, clusterData, services, hosts):
+    """
+    Entry point.
+    Must be overriden in child class.
+    """
+    #Logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    recommender = KafkaRecommender()
+    recommender.recommendKafkaConfigurationsFromHDP22(configurations, clusterData, services, hosts)
+    recommender.recommendKAFKAConfigurationsFromHDP23(configurations, clusterData, services, hosts)
+    recommender.recommendKAFKAConfigurationsFromHDP26(configurations, clusterData, services, hosts)
+
+
+  def getServiceConfigurationsValidationItems(self, configurations, recommendedDefaults, services, hosts):
+    """
+    Entry point.
+    Validate configurations for the service. Return a list of errors.
+    The code for this function should be the same for each Service Advisor.
+    """
+    #Logger.info("Class: %s, Method: %s. Validating Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    validator = KafkaValidator()
+    # Calls the methods of the validator using arguments,
+    # method(siteProperties, siteRecommendations, configurations, services, hosts)
+    return validator.validateListOfConfigUsingMethod(configurations, recommendedDefaults, services, hosts, validator.validators)
+
+
+
+class KafkaRecommender(service_advisor.ServiceAdvisor):
+  """
+  Kafka Recommender suggests properties when adding the service for the first time or modifying configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(KafkaRecommender, self)
+    self.as_super.__init__(*args, **kwargs)
+
+
+
+  def recommendKafkaConfigurationsFromHDP22(self, configurations, clusterData, services, hosts):
+    kafka_mounts = [
+      ("log.dirs", "KAFKA_BROKER", "/kafka-logs", "multi")
+    ]
+
+    self.updateMountProperties("kafka-broker", kafka_mounts, configurations, services, hosts)
+
+
+  def recommendKAFKAConfigurationsFromHDP23(self, configurations, clusterData, services, hosts):
+
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    kafka_broker = self.getServicesSiteProperties(services, "kafka-broker")
+
+    security_enabled = self.isSecurityEnabled(services)
+
+    putKafkaBrokerProperty = self.putProperty(configurations, "kafka-broker", services)
+    putKafkaLog4jProperty = self.putProperty(configurations, "kafka-log4j", services)
+    putKafkaBrokerAttributes = self.putPropertyAttribute(configurations, "kafka-broker")
+
+    if security_enabled:
+      kafka_env = slef.getServicesSiteProperties(services, "kafka-env")
+      kafka_user = kafka_env.get('kafka_user') if kafka_env is not None else None
+
+      if kafka_user is not None:
+        kafka_super_users = kafka_broker.get('super.users') if kafka_broker is not None else None
+
+        # kafka_super_super_users is expected to be formatted as:  User:user1;User:user2
+        if kafka_super_users is not None and kafka_super_users != '':
+          # Parse kafka_super_users to get a set of unique user names and rebuild the property value
+          user_names = set()
+          user_names.add(kafka_user)
+          for match in re.findall('User:([^;]*)', kafka_super_users):
+            user_names.add(match)
+          kafka_super_users = 'User:' + ";User:".join(user_names)
+        else:
+          kafka_super_users = 'User:' + kafka_user
+
+        putKafkaBrokerProperty("super.users", kafka_super_users)
+
+      putKafkaBrokerProperty("principal.to.local.class", "kafka.security.auth.KerberosPrincipalToLocal")
+      putKafkaBrokerProperty("security.inter.broker.protocol", "PLAINTEXTSASL")
+      putKafkaBrokerProperty("zookeeper.set.acl", "true")
+
+    else:  # not security_enabled
+      # remove unneeded properties
+      putKafkaBrokerAttributes('super.users', 'delete', 'true')
+      putKafkaBrokerAttributes('principal.to.local.class', 'delete', 'true')
+      putKafkaBrokerAttributes('security.inter.broker.protocol', 'delete', 'true')
+
+    # Update ranger-kafka-plugin-properties/ranger-kafka-plugin-enabled to match ranger-env/ranger-kafka-plugin-enabled
+    if "ranger-env" in services["configurations"] \
+      and "ranger-kafka-plugin-properties" in services["configurations"] \
+      and "ranger-kafka-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
+      putKafkaRangerPluginProperty = self.putProperty(configurations, "ranger-kafka-plugin-properties", services)
+      ranger_kafka_plugin_enabled = services["configurations"]["ranger-env"]["properties"]["ranger-kafka-plugin-enabled"]
+      putKafkaRangerPluginProperty("ranger-kafka-plugin-enabled", ranger_kafka_plugin_enabled)
+
+
+    ranger_plugin_enabled = False
+    # Only if the RANGER service is installed....
+    if "RANGER" in servicesList:
+      # If ranger-kafka-plugin-properties/ranger-kafka-plugin-enabled,
+      # determine if the Ranger/Kafka plug-in enabled enabled or not
+      if 'ranger-kafka-plugin-properties' in configurations and \
+                      'ranger-kafka-plugin-enabled' in configurations['ranger-kafka-plugin-properties']['properties']:
+        ranger_plugin_enabled = configurations['ranger-kafka-plugin-properties']['properties']['ranger-kafka-plugin-enabled'].lower() == 'yes'
+      # If ranger-kafka-plugin-properties/ranger-kafka-plugin-enabled was not changed,
+      # determine if the Ranger/Kafka plug-in enabled enabled or not
+      elif 'ranger-kafka-plugin-properties' in services['configurations'] and \
+                      'ranger-kafka-plugin-enabled' in services['configurations']['ranger-kafka-plugin-properties']['properties']:
+        ranger_plugin_enabled = services['configurations']['ranger-kafka-plugin-properties']['properties']['ranger-kafka-plugin-enabled'].lower() == 'yes'
+
+    # Determine the value for kafka-broker/authorizer.class.name
+    if ranger_plugin_enabled:
+      # If the Ranger plugin for Kafka is enabled, set authorizer.class.name to
+      # "org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer" whether Kerberos is
+      # enabled or not.
+      putKafkaBrokerProperty("authorizer.class.name", 'org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer')
+    elif security_enabled:
+      putKafkaBrokerProperty("authorizer.class.name", 'kafka.security.auth.SimpleAclAuthorizer')
+    else:
+      putKafkaBrokerAttributes('authorizer.class.name', 'delete', 'true')
+
+    #If AMS is part of Services, use the KafkaTimelineMetricsReporter for metric reporting. Default is ''.
+    if "AMBARI_METRICS" in servicesList:
+      putKafkaBrokerProperty('kafka.metrics.reporters', 'org.apache.hadoop.metrics2.sink.kafka.KafkaTimelineMetricsReporter')
+
+    if ranger_plugin_enabled:
+      kafkaLog4jRangerLines = [{
+                                 "name": "log4j.appender.rangerAppender",
+                                 "value": "org.apache.log4j.DailyRollingFileAppender"
+                               },
+                               {
+                                 "name": "log4j.appender.rangerAppender.DatePattern",
+                                 "value": "'.'yyyy-MM-dd-HH"
+                               },
+                               {
+                                 "name": "log4j.appender.rangerAppender.File",
+                                 "value": "${kafka.logs.dir}/ranger_kafka.log"
+                               },
+                               {
+                                 "name": "log4j.appender.rangerAppender.layout",
+                                 "value": "org.apache.log4j.PatternLayout"
+                               },
+                               {
+                                 "name": "log4j.appender.rangerAppender.layout.ConversionPattern",
+                                 "value": "%d{ISO8601} %p [%t] %C{6} (%F:%L) - %m%n"
+                               },
+                               {
+                                 "name": "log4j.logger.org.apache.ranger",
+                                 "value": "INFO, rangerAppender"
+                               }]
+
+      # change kafka-log4j when ranger plugin is installed
+      if 'kafka-log4j' in services['configurations'] and 'content' in services['configurations']['kafka-log4j']['properties']:
+        kafkaLog4jContent = services['configurations']['kafka-log4j']['properties']['content']
+        for item in range(len(kafkaLog4jRangerLines)):
+          if kafkaLog4jRangerLines[item]["name"] not in kafkaLog4jContent:
+            kafkaLog4jContent+= '\n' + kafkaLog4jRangerLines[item]["name"] + '=' + kafkaLog4jRangerLines[item]["value"]
+        putKafkaLog4jProperty("content",kafkaLog4jContent)
+
+
+  def recommendKAFKAConfigurationsFromHDP26(self, configurations, clusterData, services, hosts):
+    if 'kafka-env' in services['configurations'] and 'kafka_user' in services['configurations']['kafka-env']['properties']:
+      kafka_user = services['configurations']['kafka-env']['properties']['kafka_user']
+    else:
+      kafka_user = "kafka"
+
+    if 'ranger-kafka-plugin-properties' in configurations and  'ranger-kafka-plugin-enabled' in configurations['ranger-kafka-plugin-properties']['properties']:
+      ranger_kafka_plugin_enabled = (configurations['ranger-kafka-plugin-properties']['properties']['ranger-kafka-plugin-enabled'].lower() == 'Yes'.lower())
+    elif 'ranger-kafka-plugin-properties' in services['configurations'] and 'ranger-kafka-plugin-enabled' in services['configurations']['ranger-kafka-plugin-properties']['properties']:
+      ranger_kafka_plugin_enabled = (services['configurations']['ranger-kafka-plugin-properties']['properties']['ranger-kafka-plugin-enabled'].lower() == 'Yes'.lower())
+    else:
+      ranger_kafka_plugin_enabled = False
+
+    if ranger_kafka_plugin_enabled and 'ranger-kafka-plugin-properties' in services['configurations'] and 'REPOSITORY_CONFIG_USERNAME' in services['configurations']['ranger-kafka-plugin-properties']['properties']:
+      self.logger.info("Setting Kafka Repo user for Ranger.")
+      putRangerKafkaPluginProperty = self.putProperty(configurations, "ranger-kafka-plugin-properties", services)
+      putRangerKafkaPluginProperty("REPOSITORY_CONFIG_USERNAME",kafka_user)
+    else:
+      self.logger.info("Not setting Kafka Repo user for Ranger.")
+
+
+class KafkaValidator(service_advisor.ServiceAdvisor):
+  """
+  Kafka Validator checks the correctness of properties whenever the service is first added or the user attempts to
+  change configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(KafkaValidator, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    self.validators = [("ranger-kafka-plugin-properties", self.validateKafkaRangerPluginConfigurationsFromHDP22),
+                       ("kafka-broker", self.validateKAFKAConfigurationsFromHDP23)]
+
+  def validateKafkaRangerPluginConfigurationsFromHDP22(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+    ranger_plugin_properties = self.getSiteProperties(configurations, "ranger-kafka-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-kafka-plugin-enabled'] if ranger_plugin_properties else 'No'
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    security_enabled = self.isSecurityEnabled(services)
+    if 'RANGER' in servicesList and ranger_plugin_enabled.lower() == 'yes':
+      # ranger-hdfs-plugin must be enabled in ranger-env
+      ranger_env = self.getServicesSiteProperties(services, 'ranger-env')
+      if not ranger_env or not 'ranger-kafka-plugin-enabled' in ranger_env or \
+                      ranger_env['ranger-kafka-plugin-enabled'].lower() != 'yes':
+        validationItems.append({"config-name": 'ranger-kafka-plugin-enabled',
+                                "item": self.getWarnItem(
+                                  "ranger-kafka-plugin-properties/ranger-kafka-plugin-enabled must correspond ranger-env/ranger-kafka-plugin-enabled")})
+
+    if ("RANGER" in servicesList) and (ranger_plugin_enabled.lower() == 'yes') and not security_enabled:
+      validationItems.append({"config-name": "ranger-kafka-plugin-enabled",
+                              "item": self.getWarnItem(
+                                "Ranger Kafka plugin should not be enabled in non-kerberos environment.")})
+    return self.toConfigurationValidationProblems(validationItems, "ranger-kafka-plugin-properties")
+
+
+  def validateKAFKAConfigurationsFromHDP23(self, properties, recommendedDefaults, configurations, services, hosts):
+    kafka_broker = properties
+    validationItems = []
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+
+    #Adding Ranger Plugin logic here
+    ranger_plugin_properties = self.getSiteProperties(configurations, "ranger-kafka-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-kafka-plugin-enabled'] if ranger_plugin_properties else 'No'
+    prop_name = 'authorizer.class.name'
+    prop_val = "org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer"
+    if ("RANGER" in servicesList) and (ranger_plugin_enabled.lower() == 'Yes'.lower()):
+      if kafka_broker[prop_name] != prop_val:
+        validationItems.append({"config-name": prop_name,
+                                "item": self.getWarnItem(
+                                  "If Ranger Kafka Plugin is enabled." \
+                                  "{0} needs to be set to {1}".format(prop_name,prop_val))})
+
+    if 'KERBEROS' in servicesList and 'security.inter.broker.protocol' in properties:
+      interBrokerValue = properties['security.inter.broker.protocol']
+      prop_name = 'listeners'
+      prop_value =  properties[prop_name]
+      if interBrokerValue and interBrokerValue not in prop_value:
+        validationItems.append({"config-name": "listeners",
+                                "item": self.getWarnItem("If kerberos is enabled " \
+                                                         "{0}  need to contain {1} as one of " \
+                                                         "the protocol".format(prop_name, interBrokerValue))})
+
+
+    return self.toConfigurationValidationProblems(validationItems, "kafka-broker")
+
+
+
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/widgets.json b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/widgets.json
new file mode 100644
index 0000000..d513075
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/widgets.json
@@ -0,0 +1,182 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_kafka_dashboard",
+      "display_name": "Standard Kafka Dashboard",
+      "section_name": "KAFKA_SUMMARY",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Broker Topics",
+          "description": "Broker Topics",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "kafka.server.BrokerTopicMetrics.BytesInPerSec.1MinuteRate",
+              "metric_path": "metrics/kafka/server/BrokerTopicMetrics/AllTopicsBytesInPerSec/1MinuteRate",
+              "service_name": "KAFKA",
+              "component_name": "KAFKA_BROKER"
+            },
+            {
+              "name": "kafka.server.BrokerTopicMetrics.BytesOutPerSec.1MinuteRate",
+              "metric_path": "metrics/kafka/server/BrokerTopicMetrics/AllTopicsBytesOutPerSec/1MinuteRate",
+              "service_name": "KAFKA",
+              "component_name": "KAFKA_BROKER"
+            },
+            {
+              "name": "kafka.server.BrokerTopicMetrics.MessagesInPerSec.1MinuteRate",
+              "metric_path": "metrics/kafka/server/BrokerTopicMetrics/AllTopicsMessagesInPerSec/1MinuteRate",
+              "service_name": "KAFKA",
+              "component_name": "KAFKA_BROKER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Bytes In",
+              "value": "${kafka.server.BrokerTopicMetrics.BytesInPerSec.1MinuteRate}"
+            },
+            {
+              "name": "Bytes Out",
+              "value": "${kafka.server.BrokerTopicMetrics.BytesOutPerSec.1MinuteRate}"
+            },
+            {
+              "name": "Messages In",
+              "value": "${kafka.server.BrokerTopicMetrics.MessagesInPerSec.1MinuteRate}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Active Controller Count",
+          "description": "Active Controller Count",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "kafka.controller.KafkaController.ActiveControllerCount._sum",
+              "metric_path": "metrics/kafka/controller/KafkaController/ActiveControllerCount._sum",
+              "service_name": "KAFKA",
+              "component_name": "KAFKA_BROKER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Active Controller Count",
+              "value": "${kafka.controller.KafkaController.ActiveControllerCount._sum}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Controller Status",
+          "description": "Controller Status",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.1MinuteRate",
+              "metric_path": "metrics/kafka/controller/ControllerStats/LeaderElectionRateAndTimeMs/1MinuteRate",
+              "service_name": "KAFKA",
+              "component_name": "KAFKA_BROKER"
+            },
+            {
+              "name": "kafka.controller.ControllerStats.UncleanLeaderElectionsPerSec.1MinuteRate",
+              "metric_path": "metrics/kafka/controller/ControllerStats/UncleanLeaderElectionsPerSec/1MinuteRate",
+              "service_name": "KAFKA",
+              "component_name": "KAFKA_BROKER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Leader Election Rate And Time",
+              "value": "${kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.1MinuteRate}"
+            },
+            {
+              "name": "Unclean Leader Election",
+              "value": "${kafka.controller.ControllerStats.UncleanLeaderElectionsPerSec.1MinuteRate}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Replica MaxLag",
+          "description": "Replica MaxLag",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "kafka.server.ReplicaFetcherManager.MaxLag.clientId.Replica",
+              "metric_path": "metrics/kafka/server/ReplicaFetcherManager/Replica-MaxLag",
+              "service_name": "KAFKA",
+              "component_name": "KAFKA_BROKER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Replica MaxLag",
+              "value": "${kafka.server.ReplicaFetcherManager.MaxLag.clientId.Replica}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Replica Manager",
+          "description": "Replica Manager",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "kafka.server.ReplicaManager.PartitionCount._sum",
+              "metric_path": "metrics/kafka/server/ReplicaManager/PartitionCount._sum",
+              "service_name": "KAFKA",
+              "component_name": "KAFKA_BROKER"
+            },
+            {
+              "name": "kafka.server.ReplicaManager.UnderReplicatedPartitions",
+              "metric_path": "metrics/kafka/server/ReplicaManager/UnderReplicatedPartitions",
+              "service_name": "KAFKA",
+              "component_name": "KAFKA_BROKER"
+            },
+            {
+              "name": "kafka.server.ReplicaManager.LeaderCount._sum",
+              "metric_path": "metrics/kafka/server/ReplicaManager/LeaderCount._sum",
+              "service_name": "KAFKA",
+              "component_name": "KAFKA_BROKER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Partitions count",
+              "value": "${kafka.server.ReplicaManager.PartitionCount._sum}"
+            },
+            {
+              "name": "Under Replicated Partitions",
+              "value": "${kafka.server.ReplicaManager.UnderReplicatedPartitions}"
+            },
+            {
+              "name": "Leader Count",
+              "value": "${kafka.server.ReplicaManager.LeaderCount._sum}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        }
+
+      ]
+    }
+  ]
+}
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
index 96b7750..39dfeba 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
@@ -50,8 +50,7 @@
       Zookeeper also allows you to add a "chroot" path which will make all kafka data for this cluster appear under a particular path.
       This is a way to setup multiple Kafka clusters or other applications on the same zookeeper cluster. To do this give a connection
      string in the form hostname1:port1,hostname2:port2,hostname3:port3/chroot/path which would put all this cluster's data under the
-      path /chroot/path. Note that you must create this path yourself prior to starting the broker and consumers must use the
-      same connection string.
+      path /chroot/path. Note that consumers must use the same connection string.
     </description>
     <on-ambari-upgrade add="false"/>
   </property>
@@ -158,6 +157,15 @@
     <value>10</value>
     <description>The frequency in minutes that the log cleaner checks whether any log segment is eligible for deletion to meet the retention policies.
     </description>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>log.retention.check.interval.ms</name>
+    <value>600000</value>
+    <description>
+      The frequency in milliseconds that the log cleaner checks whether any log segment is eligible for deletion to meet the retention policies.
+    </description>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
@@ -404,4 +412,15 @@
     <description>Timeline metrics reporter send interval</description>
     <on-ambari-upgrade add="true"/>
   </property>
+  <property>
+    <name>kafka.timeline.metrics.host_in_memory_aggregation</name>
+    <value>{{host_in_memory_aggregation}}</value>
+    <description>if set to "true" host metrics will be aggregated in memory on each host</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>kafka.timeline.metrics.host_in_memory_aggregation_port</name>
+    <value>{{host_in_memory_aggregation_port}}</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-logsearch-conf.xml
deleted file mode 100644
index 29a8b36..0000000
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-logsearch-conf.xml
+++ /dev/null
@@ -1,124 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>Kafka</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>KAFKA_BROKER:kafka_server,kafka_request,kafka_logcleaner,kafka_controller,kafka_statechange</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder Config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-  "input":[
-    {
-      "type":"kafka_controller",
-      "rowtype":"service",
-      "path":"{{default('/configurations/kafka-env/kafka_log_dir', '/var/log/kafka')}}/controller.log"
-    },
-    {
-      "type":"kafka_request",
-      "rowtype":"service",
-      "path":"{{default('/configurations/kafka-env/kafka_log_dir', '/var/log/kafka')}}/kafka-request.log"
-    },
-    {
-      "type":"kafka_logcleaner",
-      "rowtype":"service",
-      "path":"{{default('/configurations/kafka-env/kafka_log_dir', '/var/log/kafka')}}/log-cleaner.log"
-    },
-    {
-      "type":"kafka_server",
-      "rowtype":"service",
-      "path":"{{default('/configurations/kafka-env/kafka_log_dir', '/var/log/kafka')}}/server.log"
-    },
-    {
-      "type":"kafka_statechange",
-      "rowtype":"service",
-      "path":"{{kafka_log_dir}}/state-change.log"
-    }
-   ],
-  "filter":[
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "kafka_controller",
-            "kafka_request",
-            "kafka_logcleaner"
-          ]
-         }
-       },
-      "log4j_format":"[%d] %p %m (%c)%n",
-      "multiline_pattern":"^(\\[%{TIMESTAMP_ISO8601:logtime}\\])",
-      "message_pattern":"(?m)^\\[%{TIMESTAMP_ISO8601:logtime}\\]%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       }
-     },
-    {
-      "filter":"grok",
-      "comment":"Suppose to be same log4j pattern as other kafka processes, but some reason thread is not printed",
-      "conditions":{
-        "fields":{
-          "type":[
-            "kafka_server",
-            "kafka_statechange"
-          ]
-         }
-       },
-      "log4j_format":"[%d] %p %m (%c)%n",
-      "multiline_pattern":"^(\\[%{TIMESTAMP_ISO8601:logtime}\\])",
-      "message_pattern":"(?m)^\\[%{TIMESTAMP_ISO8601:logtime}\\]%{SPACE}%{LOGLEVEL:level}%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       }
-     }
-   ]
- }
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/kafka_broker.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/kafka_broker.py
index 96a8293..81715f9 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/kafka_broker.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/kafka_broker.py
@@ -77,6 +77,12 @@
     import params
     env.set_params(params)
     self.configure(env, upgrade_type=upgrade_type)
+
+    if params.security_enabled:
+      if params.version and check_stack_feature(StackFeature.KAFKA_KERBEROS, params.version):
+        kafka_kinit_cmd = format("{kinit_path_local} -kt {kafka_keytab_path} {kafka_jaas_principal};")
+        Execute(kafka_kinit_cmd, user=params.kafka_user)
+
     if params.is_supported_kafka_ranger:
       setup_ranger_kafka() #Ranger Kafka Plugin related call 
     daemon_cmd = format('source {params.conf_dir}/kafka-env.sh ; {params.kafka_bin} start')
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
index 32f18f2..9acc1ef 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
@@ -130,13 +130,22 @@
 metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
 metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
 
-ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+set_instanceId = "false"
+cluster_name = config["clusterName"]
+
+if 'cluster-env' in config['configurations'] and \
+        'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
+  ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
+  set_instanceId = "true"
+else:
+  ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+
 has_metric_collector = not len(ams_collector_hosts) == 0
 
 if has_metric_collector:
   if 'cluster-env' in config['configurations'] and \
-      'metrics_collector_vip_port' in config['configurations']['cluster-env']:
-    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
+      'metrics_collector_external_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
   else:
     metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
     if metric_collector_web_address.find(':') != -1:
@@ -147,6 +156,9 @@
     metric_collector_protocol = 'https'
   else:
     metric_collector_protocol = 'http'
+
+  host_in_memory_aggregation = str(default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)).lower()
+  host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
   pass
 
 # Security-related params
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/service_check.py
index 0f3a417..cb66f98 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/service_check.py
@@ -40,12 +40,17 @@
     topic_exists_cmd = format("{kafka_home}/bin/kafka-topics.sh --zookeeper {kafka_config[zookeeper.connect]} --topic {topic} --list")
     topic_exists_cmd_p = subprocess.Popen(topic_exists_cmd.split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
     topic_exists_cmd_out, topic_exists_cmd_err = topic_exists_cmd_p.communicate()
-    # run create topic command only if the topic doesn't exists
-    if topic not in topic_exists_cmd_out:
-      create_topic_cmd = format("{kafka_home}/bin/kafka-topics.sh --zookeeper {kafka_config[zookeeper.connect]} --create --topic {topic} --partitions 1 --replication-factor 1")
+    
+    delete_topic_cmd = format("{kafka_home}/bin/kafka-topics.sh --zookeeper {kafka_config[zookeeper.connect]} --delete --topic {topic}")
+    create_topic_cmd = format("{kafka_home}/bin/kafka-topics.sh --zookeeper {kafka_config[zookeeper.connect]} --create --topic {topic} --partitions 1 --replication-factor 1")
+    if topic in topic_exists_cmd_out:
+      # run delete topic and recreate the topic command only if the topic exists
+      command = source_cmd + " ; " + delete_topic_cmd + ";" + create_topic_cmd
+    else:
+      # run create topic command 
       command = source_cmd + " ; " + create_topic_cmd
-      Logger.info("Running kafka create topic command: %s" % command)
-      call_and_match_output(command, format("({create_topic_cmd_created_output})|({create_topic_cmd_exists_output})"), "Failed to check that topic exists", user=params.kafka_user)
+    Logger.info("Running kafka create topic command: %s" % command)
+    call_and_match_output(command, format("({create_topic_cmd_created_output})|({create_topic_cmd_exists_output})"), "Failed to check that topic exists", user=params.kafka_user)
 
   def read_kafka_config(self):
     import params
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/templates/input.config-kafka.json.j2 b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/templates/input.config-kafka.json.j2
new file mode 100644
index 0000000..5b8f896
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/templates/input.config-kafka.json.j2
@@ -0,0 +1,92 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"kafka_controller",
+      "rowtype":"service",
+      "path":"{{default('/configurations/kafka-env/kafka_log_dir', '/var/log/kafka')}}/controller.log"
+    },
+    {
+      "type":"kafka_request",
+      "rowtype":"service",
+      "path":"{{default('/configurations/kafka-env/kafka_log_dir', '/var/log/kafka')}}/kafka-request.log"
+    },
+    {
+      "type":"kafka_logcleaner",
+      "rowtype":"service",
+      "path":"{{default('/configurations/kafka-env/kafka_log_dir', '/var/log/kafka')}}/log-cleaner.log"
+    },
+    {
+      "type":"kafka_server",
+      "rowtype":"service",
+      "path":"{{default('/configurations/kafka-env/kafka_log_dir', '/var/log/kafka')}}/server.log"
+    },
+    {
+      "type":"kafka_statechange",
+      "rowtype":"service",
+      "path":"{{default('/configurations/kafka-env/kafka_log_dir', '/var/log/kafka')}}/state-change.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "kafka_controller",
+            "kafka_request",
+            "kafka_logcleaner"
+          ]
+        }
+      },
+      "log4j_format":"[%d] %p %m (%c)%n",
+      "multiline_pattern":"^(\\[%{TIMESTAMP_ISO8601:logtime}\\])",
+      "message_pattern":"(?m)^\\[%{TIMESTAMP_ISO8601:logtime}\\]%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    },
+    {
+      "filter":"grok",
+      "comment":"Suppose to be same log4j pattern as other kafka processes, but some reason thread is not printed",
+      "conditions":{
+        "fields":{
+          "type":[
+            "kafka_server",
+            "kafka_statechange"
+          ]
+        }
+      },
+      "log4j_format":"[%d] %p %m (%c)%n",
+      "multiline_pattern":"^(\\[%{TIMESTAMP_ISO8601:logtime}\\])",
+      "message_pattern":"(?m)^\\[%{TIMESTAMP_ISO8601:logtime}\\]%{SPACE}%{LOGLEVEL:level}%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/kafka-broker.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/kafka-broker.xml
index 0275358..73a5eff 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/kafka-broker.xml
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/kafka-broker.xml
@@ -126,7 +126,7 @@
   </property>
   <property>
     <name>delete.topic.enable</name>
-    <value>false</value>
+    <value>true</value>
     <description>Enables delete topic. Delete topic through the admin tool will have no effect if this config is turned off</description>
     <on-ambari-upgrade add="true"/>
   </property>
diff --git a/ambari-server/src/main/resources/common-services/KERBEROS/1.10.3-10/package/scripts/kerberos_client.py b/ambari-server/src/main/resources/common-services/KERBEROS/1.10.3-10/package/scripts/kerberos_client.py
index c50c67b..39fdcf5 100644
--- a/ambari-server/src/main/resources/common-services/KERBEROS/1.10.3-10/package/scripts/kerberos_client.py
+++ b/ambari-server/src/main/resources/common-services/KERBEROS/1.10.3-10/package/scripts/kerberos_client.py
@@ -43,27 +43,6 @@
   def status(self, env):
     raise ClientComponentHasNoStatus()
 
-  def security_status(self, env):
-    import status_params
-    if status_params.security_enabled:
-      if status_params.smoke_user and status_params.smoke_user_keytab:
-        try:
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.smoke_user,
-                                status_params.smoke_user_keytab,
-                                status_params.smoke_user_principal,
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        self.put_structured_out({"securityState": "UNKNOWN"})
-        self.put_structured_out({"securityStateErrorInfo": "Missing smoke user credentials"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def set_keytab(self, env):
     self.write_keytab_file()
 
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/gateway-site.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/gateway-site.xml
index ad599e0..51036a0 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/gateway-site.xml
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/gateway-site.xml
@@ -58,7 +58,7 @@
   </property>
   <property>
     <name>sun.security.krb5.debug</name>
-    <value>true</value>
+    <value>false</value>
     <description>Boolean flag indicating whether to enable debug messages for krb5 authentication</description>
     <on-ambari-upgrade add="true"/>
   </property>
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/knox-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/knox-logsearch-conf.xml
deleted file mode 100644
index 528b011..0000000
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/knox-logsearch-conf.xml
+++ /dev/null
@@ -1,93 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>Knox</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>KNOX_GATEWAY:knox_gateway,knox_cli,knox_ldap</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder Config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-  "input":[
-    {
-      "type":"knox_gateway",
-      "rowtype":"service",
-      "path":"/var/log/knox/gateway.log"
-    },
-    {
-      "type":"knox_cli",
-      "rowtype":"service",
-      "path":"/var/log/knox/knoxcli.log"
-    },
-    {
-      "type":"knox_ldap",
-      "rowtype":"service",
-      "path":"/var/log/knox/ldap.log"
-    }
-   ],
-  "filter":[
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "knox_gateway",
-            "knox_cli",
-            "knox_ldap"
-          ]
-
-        }
-       },
-      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       }
-     }
-   ]
- }
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox_gateway.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox_gateway.py
index 31e54e5..8996d23 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox_gateway.py
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox_gateway.py
@@ -202,67 +202,6 @@
     File(params.ldap_pid_file,
       action = "delete"
     )
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-
-    if status_params.security_enabled:
-      expectations = {}
-      expectations.update(build_expectations(
-        'krb5JAASLogin',
-        None,
-        ['keytab', 'principal'],
-        None
-      ))
-      expectations.update(build_expectations(
-        'gateway-site',
-        {
-          "gateway.hadoop.kerberos.secured" : "true"
-        },
-        None,
-        None
-      ))
-
-      security_params = {
-        "krb5JAASLogin":
-          {
-            'keytab': status_params.knox_keytab_path,
-            'principal': status_params.knox_principal_name
-          }
-      }
-      security_params.update(get_params_from_filesystem(status_params.knox_conf_dir,
-        {"gateway-site.xml" : FILE_TYPE_XML}))
-
-      result_issues = validate_security_config_properties(security_params, expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'krb5JAASLogin' not in security_params
-               or 'keytab' not in security_params['krb5JAASLogin']
-               or 'principal' not in security_params['krb5JAASLogin']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file and principal are not set."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.knox_user,
-                                security_params['krb5JAASLogin']['keytab'],
-                                security_params['krb5JAASLogin']['principal'],
-                                status_params.hostname,
-                                status_params.temp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
       
   def get_log_folder(self):
     import params
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/templates/input.config-knox.json.j2 b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/templates/input.config-knox.json.j2
new file mode 100644
index 0000000..6d7cf72
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/templates/input.config-knox.json.j2
@@ -0,0 +1,60 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"knox_gateway",
+      "rowtype":"service",
+      "path":"/var/log/knox/gateway.log"
+    },
+    {
+      "type":"knox_cli",
+      "rowtype":"service",
+      "path":"/var/log/knox/knoxcli.log"
+    },
+    {
+      "type":"knox_ldap",
+      "rowtype":"service",
+      "path":"/var/log/knox/ldap.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "knox_gateway",
+            "knox_cli",
+            "knox_ldap"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/alerts.json b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/alerts.json
new file mode 100644
index 0000000..4986e04
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/alerts.json
@@ -0,0 +1,32 @@
+{
+  "KNOX": {
+    "service": [],
+    "KNOX_GATEWAY": [
+      {
+        "name": "knox_gateway_process",
+        "label": "Knox Gateway Process",
+        "description": "This host-level alert is triggered if the Knox Gateway cannot be determined to be up.",
+        "interval": 1,
+        "scope": "HOST",
+        "source": {
+          "type": "PORT",
+          "uri": "{{gateway-site/gateway.port}}",
+          "default_port": 8443,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/admin-topology.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/admin-topology.xml
new file mode 100644
index 0000000..3030364
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/admin-topology.xml
@@ -0,0 +1,97 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <!-- topology file -->
+  <property>
+    <name>content</name>
+    <display-name>admin-topology template</display-name>
+    <value>
+    &lt;topology&gt;
+
+        &lt;gateway&gt;
+
+             &lt;provider&gt;
+                &lt;role&gt;authentication&lt;/role&gt;
+                &lt;name&gt;ShiroProvider&lt;/name&gt;
+                &lt;enabled&gt;true&lt;/enabled&gt;
+                &lt;param&gt;
+                    &lt;name&gt;sessionTimeout&lt;/name&gt;
+                    &lt;value&gt;30&lt;/value&gt;
+                &lt;/param&gt;
+                &lt;param&gt;
+                    &lt;name&gt;main.ldapRealm&lt;/name&gt;
+                    &lt;value&gt;org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm&lt;/value&gt;
+                &lt;/param&gt;
+                &lt;param&gt;
+                    &lt;name&gt;main.ldapRealm.userDnTemplate&lt;/name&gt;
+                    &lt;value&gt;uid={0},ou=people,dc=hadoop,dc=apache,dc=org&lt;/value&gt;
+                &lt;/param&gt;
+                &lt;param&gt;
+                    &lt;name&gt;main.ldapRealm.contextFactory.url&lt;/name&gt;
+                    &lt;value&gt;ldap://{{knox_host_name}}:33389&lt;/value&gt;
+                &lt;/param&gt;
+                &lt;param&gt;
+                    &lt;name&gt;main.ldapRealm.contextFactory.authenticationMechanism&lt;/name&gt;
+                    &lt;value&gt;simple&lt;/value&gt;
+                &lt;/param&gt;
+                &lt;param&gt;
+                    &lt;name&gt;urls./**&lt;/name&gt;
+                    &lt;value&gt;authcBasic&lt;/value&gt;
+                &lt;/param&gt;
+            &lt;/provider&gt;
+
+            &lt;provider&gt;
+                &lt;role&gt;authorization&lt;/role&gt;
+                &lt;name&gt;AclsAuthz&lt;/name&gt;
+                &lt;enabled&gt;true&lt;/enabled&gt;
+                &lt;param&gt;
+                    &lt;name&gt;knox.acl&lt;/name&gt;
+                    &lt;value&gt;admin;*;*&lt;/value&gt;
+                &lt;/param&gt;
+            &lt;/provider&gt;
+
+            &lt;provider&gt;
+                &lt;role&gt;identity-assertion&lt;/role&gt;
+                &lt;name&gt;Default&lt;/name&gt;
+                &lt;enabled&gt;true&lt;/enabled&gt;
+            &lt;/provider&gt;
+
+        &lt;/gateway&gt;
+
+        &lt;service&gt;
+            &lt;role&gt;KNOX&lt;/role&gt;
+        &lt;/service&gt;
+
+    &lt;/topology&gt;
+
+    </value>
+    <description>
+        The configuration specifies the Knox admin API configuration and access details. The authentication provider should be configured to match your deployment details.
+    </description>
+    <value-attributes>
+      <type>content</type>
+      <empty-value-valid>true</empty-value-valid>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/gateway-log4j.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/gateway-log4j.xml
new file mode 100644
index 0000000..6408f99
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/gateway-log4j.xml
@@ -0,0 +1,110 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="false">
+   <property>
+    <name>knox_gateway_log_maxfilesize</name>
+    <value>256</value>
+    <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>Knox Gateway Log: backup file size</display-name>
+   <value-attributes>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>knox_gateway_log_maxbackupindex</name>
+    <value>20</value>
+    <description>The number of backup files</description>
+    <display-name>Knox Gateway Log: # of backup files</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>gateway-log4j template</display-name>
+    <value>
+
+      # Licensed to the Apache Software Foundation (ASF) under one
+      # or more contributor license agreements. See the NOTICE file
+      # distributed with this work for additional information
+      # regarding copyright ownership. The ASF licenses this file
+      # to you under the Apache License, Version 2.0 (the
+      # "License"); you may not use this file except in compliance
+      # with the License. You may obtain a copy of the License at
+      #
+      # http://www.apache.org/licenses/LICENSE-2.0
+      #
+      # Unless required by applicable law or agreed to in writing, software
+      # distributed under the License is distributed on an "AS IS" BASIS,
+      # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+      # See the License for the specific language governing permissions and
+      # limitations under the License.
+
+      app.log.dir=${launcher.dir}/../logs
+      app.log.file=${launcher.name}.log
+      app.audit.file=${launcher.name}-audit.log
+
+      log4j.rootLogger=ERROR, drfa
+
+      log4j.logger.org.apache.hadoop.gateway=INFO
+      #log4j.logger.org.apache.hadoop.gateway=DEBUG
+
+      #log4j.logger.org.eclipse.jetty=DEBUG
+      #log4j.logger.org.apache.shiro=DEBUG
+      #log4j.logger.org.apache.http=DEBUG
+      #log4j.logger.org.apache.http.client=DEBUG
+      #log4j.logger.org.apache.http.headers=DEBUG
+      #log4j.logger.org.apache.http.wire=DEBUG
+
+      log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+      log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+      log4j.appender.stdout.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+      log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender
+      log4j.appender.drfa.File=${app.log.dir}/${app.log.file}
+      log4j.appender.drfa.DatePattern=.yyyy-MM-dd
+      log4j.appender.drfa.layout=org.apache.log4j.PatternLayout
+      log4j.appender.drfa.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+      log4j.appender.drfa.MaxFileSize = {{knox_gateway_log_maxfilesize}}MB
+      log4j.appender.drfa.MaxBackupIndex = {{knox_gateway_log_maxbackupindex}}
+
+      log4j.logger.audit=INFO, auditfile
+      log4j.appender.auditfile=org.apache.log4j.DailyRollingFileAppender
+      log4j.appender.auditfile.File=${app.log.dir}/${app.audit.file}
+      log4j.appender.auditfile.Append = true
+      log4j.appender.auditfile.DatePattern = '.'yyyy-MM-dd
+      log4j.appender.auditfile.layout = org.apache.hadoop.gateway.audit.log4j.layout.AuditLayout
+
+    </value>
+    <description>
+      content for log4j.properties file for Knox.
+    </description>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/gateway-site.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/gateway-site.xml
new file mode 100644
index 0000000..2686dff
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/gateway-site.xml
@@ -0,0 +1,71 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- 
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<!-- The default settings for Knox. -->
+<!-- Edit gateway-site.xml to change settings for your local -->
+<!-- install. -->
+<configuration supports_final="false">
+  <property>
+    <name>gateway.port</name>
+    <value>8443</value>
+    <description>The HTTP port for the Gateway.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>gateway.path</name>
+    <value>gateway</value>
+    <description>The default context path for the gateway.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>gateway.gateway.conf.dir</name>
+    <value>deployments</value>
+    <description>The directory within GATEWAY_HOME that contains gateway topology files and deployments.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>gateway.hadoop.kerberos.secured</name>
+    <value>false</value>
+    <description>Boolean flag indicating whether the Hadoop cluster protected by Gateway is secured with Kerberos</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>java.security.krb5.conf</name>
+    <value>/etc/knox/conf/krb5.conf</value>
+    <description>Absolute path to krb5.conf file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>java.security.auth.login.config</name>
+    <value>/etc/knox/conf/krb5JAASLogin.conf</value>
+    <description>Absolute path to JASS login config file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>sun.security.krb5.debug</name>
+    <value>false</value>
+    <description>Boolean flag indicating whether to enable debug messages for krb5 authentication</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>gateway.websocket.feature.enabled</name>
+    <value>{{websocket_support}}</value>
+    <description>Enable this if you want websocket support</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/knox-env.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/knox-env.xml
new file mode 100644
index 0000000..e1ca45a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/knox-env.xml
@@ -0,0 +1,83 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <!-- knox-env.sh -->
+  <property require-input="true">
+    <name>knox_master_secret</name>
+    <value/>
+    <display-name>Knox Master Secret</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>password to use as the master secret</description>
+    <value-attributes>
+      <type>password</type>
+      <editable-only-at-install>true</editable-only-at-install>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>knox_user</name>
+    <display-name>Knox User</display-name>
+    <value>knox</value>
+    <property-type>USER</property-type>
+    <description>Knox Username.</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>knox_group</name>
+    <display-name>Knox Group</display-name>
+    <value>knox</value>
+    <property-type>GROUP</property-type>
+    <description>Knox Group.</description>
+    <value-attributes>
+      <type>user</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>knox_pid_dir</name>
+    <value>/var/run/knox</value>
+    <display-name>Knox PID dir</display-name>
+    <description>Knox PID dir.</description>
+    <value-attributes>
+      <type>directory</type>
+      <editable-only-at-install>true</editable-only-at-install>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>knox_principal_name</name>
+    <description>Knox principal name</description>
+    <property-type>KERBEROS_PRINCIPAL</property-type>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>knox_keytab_path</name>
+    <description>Knox keytab path</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/knoxsso-topology.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/knoxsso-topology.xml
new file mode 100644
index 0000000..1ea8601
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/knoxsso-topology.xml
@@ -0,0 +1,126 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <!-- topology file -->
+    <property>
+        <name>content</name>
+        <display-name>knoxsso-topology template</display-name>
+        <value>
+            &lt;topology&gt;
+            &lt;gateway&gt;
+            &lt;provider&gt;
+            &lt;role&gt;webappsec&lt;/role&gt;
+            &lt;name&gt;WebAppSec&lt;/name&gt;
+            &lt;enabled&gt;true&lt;/enabled&gt;
+            &lt;param&gt;&lt;name&gt;xframe.options.enabled&lt;/name&gt;&lt;value&gt;true&lt;/value&gt;&lt;/param&gt;
+            &lt;/provider&gt;
+
+            &lt;provider&gt;
+            &lt;role&gt;authentication&lt;/role&gt;
+            &lt;name&gt;ShiroProvider&lt;/name&gt;
+            &lt;enabled&gt;true&lt;/enabled&gt;
+            &lt;param&gt;
+            &lt;name&gt;sessionTimeout&lt;/name&gt;
+            &lt;value&gt;30&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;param&gt;
+            &lt;name&gt;redirectToUrl&lt;/name&gt;
+            &lt;value&gt;/gateway/knoxsso/knoxauth/login.html&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;param&gt;
+            &lt;name&gt;restrictedCookies&lt;/name&gt;
+            &lt;value&gt;rememberme,WWW-Authenticate&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;param&gt;
+            &lt;name&gt;main.ldapRealm&lt;/name&gt;
+            &lt;value&gt;org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;param&gt;
+            &lt;name&gt;main.ldapContextFactory&lt;/name&gt;
+            &lt;value&gt;org.apache.hadoop.gateway.shirorealm.KnoxLdapContextFactory&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;param&gt;
+            &lt;name&gt;main.ldapRealm.contextFactory&lt;/name&gt;
+            &lt;value&gt;$ldapContextFactory&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;param&gt;
+            &lt;name&gt;main.ldapRealm.userDnTemplate&lt;/name&gt;
+            &lt;value&gt;uid={0},ou=people,dc=hadoop,dc=apache,dc=org&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;param&gt;
+            &lt;name&gt;main.ldapRealm.contextFactory.url&lt;/name&gt;
+            &lt;value&gt;ldap://localhost:33389&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;param&gt;
+            &lt;name&gt;main.ldapRealm.authenticationCachingEnabled&lt;/name&gt;
+            &lt;value&gt;false&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;param&gt;
+            &lt;name&gt;main.ldapRealm.contextFactory.authenticationMechanism&lt;/name&gt;
+            &lt;value&gt;simple&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;param&gt;
+            &lt;name&gt;urls./**&lt;/name&gt;
+            &lt;value&gt;authcBasic&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;/provider&gt;
+
+            &lt;provider&gt;
+            &lt;role&gt;identity-assertion&lt;/role&gt;
+            &lt;name&gt;Default&lt;/name&gt;
+            &lt;enabled&gt;true&lt;/enabled&gt;
+            &lt;/provider&gt;
+            &lt;/gateway&gt;
+
+            &lt;application&gt;
+            &lt;name&gt;knoxauth&lt;/name&gt;
+            &lt;/application&gt;
+
+            &lt;service&gt;
+            &lt;role&gt;KNOXSSO&lt;/role&gt;
+            &lt;param&gt;
+            &lt;name&gt;knoxsso.cookie.secure.only&lt;/name&gt;
+            &lt;value&gt;false&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;param&gt;
+            &lt;name&gt;knoxsso.token.ttl&lt;/name&gt;
+            &lt;value&gt;30000&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;param&gt;
+            &lt;name&gt;knoxsso.redirect.whitelist.regex&lt;/name&gt;
+            &lt;value&gt;^https?:\/\/(localhost|127\.0\.0\.1|0:0:0:0:0:0:0:1|::1):[0-9].*$&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;/service&gt;
+
+            &lt;/topology&gt;
+        </value>
+        <description>
+            The configuration specifies the KnoxSSO provider integration, cookie and token management details.
+        </description>
+        <value-attributes>
+            <type>content</type>
+            <empty-value-valid>true</empty-value-valid>
+            <show-property-name>false</show-property-name>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ldap-log4j.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ldap-log4j.xml
new file mode 100644
index 0000000..57e156c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ldap-log4j.xml
@@ -0,0 +1,93 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software<display-name> template</display-name>
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="false">
+   <property>
+    <name>knox_ldap_log_maxfilesize</name>
+    <value>256</value>
+    <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>Knox LDAP Log: backup file size</display-name>
+<value-attributes>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>knox_ldap_log_maxbackupindex</name>
+    <value>20</value>
+    <description>The number of backup files</description>
+    <display-name>Knox LDAP Log: # of backup files</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>ldap-log4j template</display-name>
+    <value>
+        # Licensed to the Apache Software Foundation (ASF) under one
+        # or more contributor license agreements.  See the NOTICE file
+        # distributed with this work for additional information
+        # regarding copyright ownership.  The ASF licenses this file
+        # to you under the Apache License, Version 2.0 (the
+        # "License"); you may not use this file except in compliance
+        # with the License.  You may obtain a copy of the License at
+        #
+        #     http://www.apache.org/licenses/LICENSE-2.0
+        #
+        # Unless required by applicable law or agreed to in writing, software
+        # distributed under the License is distributed on an "AS IS" BASIS,
+        # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+        # See the License for the specific language governing permissions and
+        # limitations under the License.
+
+        app.log.dir=${launcher.dir}/../logs
+        app.log.file=${launcher.name}.log
+
+        log4j.rootLogger=ERROR, drfa
+        log4j.logger.org.apache.directory.server.ldap.LdapServer=INFO
+        log4j.logger.org.apache.directory=WARN
+
+        log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+        log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+        log4j.appender.stdout.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+        log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender
+        log4j.appender.drfa.File=${app.log.dir}/${app.log.file}
+        log4j.appender.drfa.DatePattern=.yyyy-MM-dd
+        log4j.appender.drfa.layout=org.apache.log4j.PatternLayout
+        log4j.appender.drfa.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+        log4j.appender.drfa.MaxFileSize = {{knox_ldap_log_maxfilesize}}MB
+        log4j.appender.drfa.MaxBackupIndex = {{knox_ldap_log_maxbackupindex}}
+
+    </value>
+    <description>
+      content for log4j.properties file for the demo LDAP that comes with Knox.
+    </description>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-audit.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-audit.xml
new file mode 100644
index 0000000..f3a0f99
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-audit.xml
@@ -0,0 +1,132 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.audit.is.enabled</name>
+    <value>true</value>
+    <description>Is Audit enabled?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs</name>
+    <value>true</value>
+    <display-name>Audit to HDFS</display-name>
+    <description>Is Audit to HDFS enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.dir</name>
+    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
+    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs.dir</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
+    <value>/var/log/knox/audit/hdfs/spool</value>
+    <description>/var/log/knox/audit/hdfs/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr</name>
+    <value>false</value>
+    <display-name>Audit to SOLR</display-name>
+    <description>Is Solr audit enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.solr</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.urls</name>
+    <value/>
+    <description>Solr URL</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.urls</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.zookeepers</name>
+    <value>NONE</value>
+    <description>Solr Zookeeper string</description>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.zookeepers</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
+    <value>/var/log/knox/audit/solr/spool</value>
+    <description>/var/log/knox/audit/solr/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.provider.summary.enabled</name>
+    <value>false</value>
+    <display-name>Audit provider summary enabled</display-name>
+    <description>Enable Summary audit?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>ranger.plugin.knox.ambari.cluster.name</name>
+    <value>{{cluster_name}}</value>
+    <description>Capture cluster name from where Ranger knox plugin is enabled.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-plugin-properties.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-plugin-properties.xml
new file mode 100644
index 0000000..d8b9d54
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-plugin-properties.xml
@@ -0,0 +1,132 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>policy_user</name>
+    <value>ambari-qa</value>
+    <display-name>Policy user for KNOX</display-name>
+    <description>This user must be system user and also present at Ranger admin portal</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>common.name.for.certificate</name>
+    <value/>
+    <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger-knox-plugin-enabled</name>
+    <value>No</value>
+    <display-name>Enable Ranger for KNOX</display-name>
+    <description>Enable ranger knox plugin ?</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>ranger-knox-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_USERNAME</name>
+    <value>admin</value>
+    <display-name>Ranger repository config user</display-name>
+    <description>Used for repository creation on ranger admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_PASSWORD</name>
+    <value>admin-password</value>
+    <property-type>PASSWORD</property-type>
+    <display-name>Ranger repository config password</display-name>
+    <description>Used for repository creation on ranger admin</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>KNOX_HOME</name>
+    <value>/usr/hdp/current/knox-server</value>
+    <display-name>Knox Home</display-name>
+    <description>Knox home folder</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+
+
+  <property>
+    <name>external_admin_username</name>
+    <value></value>
+    <display-name>External Ranger admin username</display-name>
+    <description>Add ranger default admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>external_admin_password</name>
+    <value></value>
+    <display-name>External Ranger admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_username</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin username</display-name>
+    <description>Add ranger default ambari admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_password</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default ambari admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-policymgr-ssl.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-policymgr-ssl.xml
new file mode 100644
index 0000000..bb0878f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-policymgr-ssl.xml
@@ -0,0 +1,66 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore</name>
+    <value>/usr/hdp/current/knox-server/conf/ranger-plugin-keystore.jks</value>
+    <description>Java Keystore files</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.password</name>
+    <value>myKeyFilePassword</value>
+    <property-type>PASSWORD</property-type>
+    <description>password for keystore</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore</name>
+    <value>/usr/hdp/current/knox-server/conf/ranger-plugin-truststore.jks</value>
+    <description>java truststore file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.password</name>
+    <value>changeit</value>
+    <property-type>PASSWORD</property-type>
+    <description>java truststore password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java keystore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java truststore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-security.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-security.xml
new file mode 100644
index 0000000..37bda4c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-security.xml
@@ -0,0 +1,64 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>ranger.plugin.knox.service.name</name>
+    <value>{{repo_name}}</value>
+    <description>Name of the Ranger service containing policies for this Knox instance</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.knox.policy.source.impl</name>
+    <value>org.apache.ranger.admin.client.RangerAdminJersey2RESTClient</value>
+    <description>Class to retrieve policies from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.knox.policy.rest.url</name>
+    <value>{{policymgr_mgr_url}}</value>
+    <description>URL to Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>admin-properties</type>
+        <name>policymgr_external_url</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>ranger.plugin.knox.policy.rest.ssl.config.file</name>
+    <value>/usr/hdp/current/knox-server/conf/ranger-policymgr-ssl.xml</value>
+    <description>Path to the file containing SSL details to contact Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.knox.policy.pollIntervalMs</name>
+    <value>30000</value>
+    <description>How often to poll for changes in policies?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.knox.policy.cache.dir</name>
+    <value>/etc/ranger/{{repo_name}}/policycache</value>
+    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/topology.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/topology.xml
new file mode 100644
index 0000000..594ab18
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/topology.xml
@@ -0,0 +1,174 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <!-- topology file -->
+  <property>
+    <name>content</name>
+    <display-name>topology template</display-name>
+    <value>
+        &lt;topology&gt;
+
+            &lt;gateway&gt;
+
+                &lt;provider&gt;
+                    &lt;role&gt;authentication&lt;/role&gt;
+                    &lt;name&gt;ShiroProvider&lt;/name&gt;
+                    &lt;enabled&gt;true&lt;/enabled&gt;
+                    &lt;param&gt;
+                        &lt;name&gt;sessionTimeout&lt;/name&gt;
+                        &lt;value&gt;30&lt;/value&gt;
+                    &lt;/param&gt;
+                    &lt;param&gt;
+                        &lt;name&gt;main.ldapRealm&lt;/name&gt;
+                        &lt;value&gt;org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm&lt;/value&gt;
+                    &lt;/param&gt;
+                    &lt;param&gt;
+                        &lt;name&gt;main.ldapRealm.userDnTemplate&lt;/name&gt;
+                        &lt;value&gt;uid={0},ou=people,dc=hadoop,dc=apache,dc=org&lt;/value&gt;
+                    &lt;/param&gt;
+                    &lt;param&gt;
+                        &lt;name&gt;main.ldapRealm.contextFactory.url&lt;/name&gt;
+                        &lt;value&gt;ldap://{{knox_host_name}}:33389&lt;/value&gt;
+                    &lt;/param&gt;
+                    &lt;param&gt;
+                        &lt;name&gt;main.ldapRealm.contextFactory.authenticationMechanism&lt;/name&gt;
+                        &lt;value&gt;simple&lt;/value&gt;
+                    &lt;/param&gt;
+                    &lt;param&gt;
+                        &lt;name&gt;urls./**&lt;/name&gt;
+                        &lt;value&gt;authcBasic&lt;/value&gt;
+                    &lt;/param&gt;
+                &lt;/provider&gt;
+
+                &lt;provider&gt;
+                    &lt;role&gt;identity-assertion&lt;/role&gt;
+                    &lt;name&gt;Default&lt;/name&gt;
+                    &lt;enabled&gt;true&lt;/enabled&gt;
+                &lt;/provider&gt;
+
+                &lt;provider&gt;
+                    &lt;role&gt;authorization&lt;/role&gt;
+                    &lt;name&gt;AclsAuthz&lt;/name&gt;
+                    &lt;enabled&gt;true&lt;/enabled&gt;
+                &lt;/provider&gt;
+
+            &lt;/gateway&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;NAMENODE&lt;/role&gt;
+                &lt;url&gt;hdfs://{{namenode_host}}:{{namenode_rpc_port}}&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;JOBTRACKER&lt;/role&gt;
+                &lt;url&gt;rpc://{{rm_host}}:{{jt_rpc_port}}&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;WEBHDFS&lt;/role&gt;
+                {{webhdfs_service_urls}}
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;WEBHCAT&lt;/role&gt;
+                &lt;url&gt;http://{{webhcat_server_host}}:{{templeton_port}}/templeton&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;OOZIE&lt;/role&gt;
+                &lt;url&gt;http://{{oozie_server_host}}:{{oozie_server_port}}/oozie&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;WEBHBASE&lt;/role&gt;
+                &lt;url&gt;http://{{hbase_master_host}}:{{hbase_master_port}}&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;HIVE&lt;/role&gt;
+                &lt;url&gt;http://{{hive_server_host}}:{{hive_http_port}}/{{hive_http_path}}&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;RESOURCEMANAGER&lt;/role&gt;
+                &lt;url&gt;http://{{rm_host}}:{{rm_port}}/ws&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;DRUID-COORDINATOR-UI&lt;/role&gt;
+                {{druid_coordinator_urls}}
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;DRUID-COORDINATOR&lt;/role&gt;
+                {{druid_coordinator_urls}}
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;DRUID-OVERLORD-UI&lt;/role&gt;
+                {{druid_overlord_urls}}
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;DRUID-OVERLORD&lt;/role&gt;
+                {{druid_overlord_urls}}
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;DRUID-ROUTER&lt;/role&gt;
+                {{druid_router_urls}}
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;DRUID-BROKER&lt;/role&gt;
+                {{druid_broker_urls}}
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;ZEPPELINUI&lt;/role&gt;
+                {{zeppelin_ui_urls}}
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;ZEPPELINWS&lt;/role&gt;
+                {{zeppelin_ws_urls}}
+            &lt;/service&gt;
+
+        &lt;/topology&gt;
+    </value>
+    <description>
+        The configuration specifies the Hadoop cluster services Knox will provide access to.
+    </description>
+    <value-attributes>
+      <type>content</type>
+      <empty-value-valid>true</empty-value-valid>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-knox-plugin-properties</type>
+        <name>ranger-knox-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/users-ldif.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/users-ldif.xml
new file mode 100644
index 0000000..eefa8c9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/users-ldif.xml
@@ -0,0 +1,140 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <property>
+    <name>content</name>
+    <display-name>users-ldif template</display-name>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+version: 1
+
+# Please replace with site specific values
+dn: dc=hadoop,dc=apache,dc=org
+objectclass: organization
+objectclass: dcObject
+o: Hadoop
+dc: hadoop
+
+# Entry for a sample people container
+# Please replace with site specific values
+dn: ou=people,dc=hadoop,dc=apache,dc=org
+objectclass:top
+objectclass:organizationalUnit
+ou: people
+
+# Entry for a sample end user
+# Please replace with site specific values
+dn: uid=guest,ou=people,dc=hadoop,dc=apache,dc=org
+objectclass:top
+objectclass:person
+objectclass:organizationalPerson
+objectclass:inetOrgPerson
+cn: Guest
+sn: User
+uid: guest
+userPassword:guest-password
+
+# entry for sample user admin
+dn: uid=admin,ou=people,dc=hadoop,dc=apache,dc=org
+objectclass:top
+objectclass:person
+objectclass:organizationalPerson
+objectclass:inetOrgPerson
+cn: Admin
+sn: Admin
+uid: admin
+userPassword:admin-password
+
+# entry for sample user sam
+dn: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org
+objectclass:top
+objectclass:person
+objectclass:organizationalPerson
+objectclass:inetOrgPerson
+cn: sam
+sn: sam
+uid: sam
+userPassword:sam-password
+
+# entry for sample user tom
+dn: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org
+objectclass:top
+objectclass:person
+objectclass:organizationalPerson
+objectclass:inetOrgPerson
+cn: tom
+sn: tom
+uid: tom
+userPassword:tom-password
+
+# create FIRST Level groups branch
+dn: ou=groups,dc=hadoop,dc=apache,dc=org
+objectclass:top
+objectclass:organizationalUnit
+ou: groups
+description: generic groups branch
+
+# create the analyst group under groups
+dn: cn=analyst,ou=groups,dc=hadoop,dc=apache,dc=org
+objectclass:top
+objectclass: groupofnames
+cn: analyst
+description:analyst  group
+member: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org
+member: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org
+
+
+# create the scientist group under groups
+dn: cn=scientist,ou=groups,dc=hadoop,dc=apache,dc=org
+objectclass:top
+objectclass: groupofnames
+cn: scientist
+description: scientist group
+member: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org
+
+        </value>
+    <description>
+            content for users-ldif file for the demo LDAP that comes with Knox.
+        </description>
+    <value-attributes>
+      <type>content</type>
+      <empty-value-valid>true</empty-value-valid>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/kerberos.json b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/kerberos.json
new file mode 100644
index 0000000..2d8aa0d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/kerberos.json
@@ -0,0 +1,81 @@
+{
+  "services": [
+    {
+      "name": "KNOX",
+      "components": [
+        {
+          "name": "KNOX_GATEWAY",
+          "identities": [
+            {
+              "name": "knox_principal",
+              "principal": {
+                "value": "${knox-env/knox_user}/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "knox-env/knox_principal_name",
+                "local_username": "${knox-env/knox_user}"
+
+              },
+              "keytab": {
+                "file": "${keytab_dir}/knox.service.keytab",
+                "owner": {
+                  "name": "${knox-env/knox_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "knox-env/knox_keytab_path"
+              }
+            },
+            {
+              "name": "/KNOX/KNOX_GATEWAY/knox_principal",
+              "principal": {
+                "configuration": "ranger-knox-audit/xasecure.audit.jaas.Client.option.principal"                
+              },
+              "keytab": {
+                "configuration": "ranger-knox-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "gateway-site": {
+                "gateway.hadoop.kerberos.secured": "true",
+                "java.security.krb5.conf": "/etc/krb5.conf"
+              }
+            },
+            {
+              "core-site": {
+                "hadoop.proxyuser.${knox-env/knox_user}.groups": "${hadoop-env/proxyuser_group}",
+                "hadoop.proxyuser.${knox-env/knox_user}.hosts": "${clusterHostInfo/knox_gateway_hosts}"
+              }
+            },
+            {
+              "webhcat-site": {
+                "webhcat.proxyuser.${knox-env/knox_user}.groups": "${hadoop-env/proxyuser_group}",
+                "webhcat.proxyuser.${knox-env/knox_user}.hosts": "${clusterHostInfo/knox_gateway_hosts}"
+              }
+            },
+            {
+              "oozie-site": {
+                "oozie.service.ProxyUserService.proxyuser.${knox-env/knox_user}.groups": "${hadoop-env/proxyuser_group}",
+                "oozie.service.ProxyUserService.proxyuser.${knox-env/knox_user}.hosts": "${clusterHostInfo/knox_gateway_hosts}"
+              }
+            },
+            {
+              "ranger-knox-audit": {
+                "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+                "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+                "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+                "xasecure.audit.jaas.Client.option.storeKey": "false",
+                "xasecure.audit.jaas.Client.option.serviceName": "solr",
+                "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/metainfo.xml
new file mode 100644
index 0000000..8954d0d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/metainfo.xml
@@ -0,0 +1,109 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>KNOX</name>
+      <displayName>Knox</displayName>
+      <comment>Provides a single point of authentication and access for Apache Hadoop services in a cluster</comment>
+      <version>0.5.0.3.0</version>
+      <components>
+        <component>
+          <name>KNOX_GATEWAY</name>
+          <displayName>Knox Gateway</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/knox_gateway.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>knox_gateway</logId>
+              <primary>true</primary>
+            </log>
+            <log>
+              <logId>knox_cli</logId>
+            </log>
+            <log>
+              <logId>knox_ldap</logId>
+            </log>
+          </logs>
+          <customCommands>
+            <customCommand>
+              <name>STARTDEMOLDAP</name>
+              <commandScript>
+                <script>scripts/knox_gateway.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+            <customCommand>
+              <name>STOPDEMOLDAP</name>
+              <commandScript>
+                <script>scripts/knox_gateway.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat7,amazon2015,redhat6,suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>knox_${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+          <packages>
+            <package>
+              <name>knox-${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>gateway-site</config-type>
+        <config-type>gateway-log4j</config-type>
+        <config-type>topology</config-type>
+        <config-type>admin-topology</config-type>
+        <config-type>knoxsso-topology</config-type>
+        <config-type>ranger-knox-plugin-properties</config-type>
+        <config-type>ranger-knox-audit</config-type>
+        <config-type>ranger-knox-policymgr-ssl</config-type>
+        <config-type>ranger-knox-security</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/files/validateKnoxStatus.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/files/validateKnoxStatus.py
new file mode 100644
index 0000000..257abfb
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/files/validateKnoxStatus.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import optparse
+import socket
+
+#
+# Main.
+#
+def main():
+  parser = optparse.OptionParser(usage="usage: %prog [options]")
+  parser.add_option("-p", "--port", dest="port", help="Port for Knox process")
+  parser.add_option("-n", "--hostname", dest="hostname", help="Hostname of Knox Gateway component")
+
+  (options, args) = parser.parse_args()
+  timeout_seconds = 5
+  try:
+    s = socket.create_connection((options.hostname, int(options.port)),timeout=timeout_seconds)
+    print "Successfully connected to %s on port %s" % (options.hostname, options.port)
+    s.close()
+  except socket.error, e:
+    print "Connection to %s on port %s failed: %s" % (options.hostname, options.port, e)
+    exit(1)
+
+if __name__ == "__main__":
+  main()
+
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/knox.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/knox.py
new file mode 100644
index 0000000..34b5643
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/knox.py
@@ -0,0 +1,192 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.core.resources.service import ServiceConfig
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.get_config import get_config
+from resource_management.libraries.resources.template_config import TemplateConfig
+from resource_management.core.resources.system import File, Execute, Directory
+from resource_management.core.shell import as_user
+from resource_management.core.source import InlineTemplate
+
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def knox():
+  import params
+
+  XmlConfig("gateway-site.xml",
+            conf_dir=params.knox_conf_dir,
+            configurations=params.config['configurations']['gateway-site'],
+            configuration_attributes=params.config['configuration_attributes']['gateway-site'],
+            owner=params.knox_user
+  )
+
+  # Manually overriding service logon user & password set by the installation package
+  ServiceConfig(params.knox_gateway_win_service_name,
+                action="change_user",
+                username = params.knox_user,
+                password = Script.get_password(params.knox_user))
+
+  File(os.path.join(params.knox_conf_dir, "gateway-log4j.properties"),
+       owner=params.knox_user,
+       content=params.gateway_log4j
+  )
+
+  File(os.path.join(params.knox_conf_dir, "topologies", "default.xml"),
+       group=params.knox_group,
+       owner=params.knox_user,
+       content=InlineTemplate(params.topology_template)
+  )
+
+  if params.admin_topology_template:
+    File(os.path.join(params.knox_conf_dir, "topologies", "admin.xml"),
+       group=params.knox_group,
+       owner=params.knox_user,
+       content=InlineTemplate(params.admin_topology_template)
+    )
+
+  if params.version_formatted and check_stack_feature(StackFeature.KNOX_SSO_TOPOLOGY, params.version_formatted):
+    knoxsso_topology_template_content = get_config("knoxsso-topology")
+    if knoxsso_topology_template_content:
+      File(os.path.join(params.knox_conf_dir, "topologies", "knoxsso.xml"),
+        group=params.knox_group,
+        owner=params.knox_user,
+        content=InlineTemplate(params.knoxsso_topology_template)
+      )
+
+  if params.security_enabled:
+    TemplateConfig( os.path.join(params.knox_conf_dir, "krb5JAASLogin.conf"),
+        owner = params.knox_user,
+        template_tag = None
+    )
+
+  if not os.path.isfile(params.knox_master_secret_path):
+    cmd = format('cmd /C {knox_client_bin} create-master --master {knox_master_secret!p}')
+    Execute(cmd)
+    cmd = format('cmd /C {knox_client_bin} create-cert --hostname {knox_host_name_in_cluster}')
+    Execute(cmd)
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def knox():
+    import params
+    Directory([params.knox_data_dir, params.knox_logs_dir, params.knox_pid_dir, params.knox_conf_dir, os.path.join(params.knox_conf_dir, "topologies")],
+              owner = params.knox_user,
+              group = params.knox_group,
+              create_parents = True,
+              cd_access = "a",
+              mode = 0755,
+              recursive_ownership = True,
+    )
+
+    XmlConfig("gateway-site.xml",
+              conf_dir=params.knox_conf_dir,
+              configurations=params.config['configurations']['gateway-site'],
+              configuration_attributes=params.config['configuration_attributes']['gateway-site'],
+              owner=params.knox_user,
+              group=params.knox_group,
+    )
+
+    File(format("{params.knox_conf_dir}/gateway-log4j.properties"),
+         mode=0644,
+         group=params.knox_group,
+         owner=params.knox_user,
+         content=InlineTemplate(params.gateway_log4j)
+    )
+
+    File(format("{params.knox_conf_dir}/topologies/default.xml"),
+         group=params.knox_group,
+         owner=params.knox_user,
+         content=InlineTemplate(params.topology_template)
+    )
+
+    if params.admin_topology_template:
+      File(format("{params.knox_conf_dir}/topologies/admin.xml"),
+           group=params.knox_group,
+           owner=params.knox_user,
+           content=InlineTemplate(params.admin_topology_template)
+      )
+
+    if params.version_formatted and check_stack_feature(StackFeature.KNOX_SSO_TOPOLOGY, params.version_formatted):
+      knoxsso_topology_template_content = get_config("knoxsso-topology")
+      if knoxsso_topology_template_content:
+        File(os.path.join(params.knox_conf_dir, "topologies", "knoxsso.xml"),
+            group=params.knox_group,
+            owner=params.knox_user,
+            content=InlineTemplate(params.knoxsso_topology_template)
+        )
+
+    if params.security_enabled:
+      TemplateConfig( format("{knox_conf_dir}/krb5JAASLogin.conf"),
+                      owner = params.knox_user,
+                      template_tag = None
+      )
+
+    cmd = format('{knox_client_bin} create-master --master {knox_master_secret!p}')
+    master_secret_exist = as_user(format('test -f {knox_master_secret_path}'), params.knox_user)
+
+    Execute(cmd,
+            user=params.knox_user,
+            environment={'JAVA_HOME': params.java_home},
+            not_if=master_secret_exist,
+    )
+
+    cmd = format('{knox_client_bin} create-cert --hostname {knox_host_name_in_cluster}')
+    cert_store_exist = as_user(format('test -f {knox_cert_store_path}'), params.knox_user)
+
+    Execute(cmd,
+            user=params.knox_user,
+            environment={'JAVA_HOME': params.java_home},
+            not_if=cert_store_exist,
+    )
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def update_knox_folder_permissions():
+  import params
+  Directory(params.knox_logs_dir,
+            owner = params.knox_user,
+            group = params.knox_group
+            )
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def update_knox_logfolder_permissions():
+  """
+   Fix for the bug with rpm/deb packages. During installation of the package, they re-apply permissions to the
+   folders below; such behaviour will affect installations with non-standard user name/group and will put
+   cluster in non-working state
+  """
+  import params
+  
+  Directory(params.knox_logs_dir,
+            owner = params.knox_user,
+            group = params.knox_group,
+            create_parents = True,
+            cd_access = "a",
+            mode = 0755,
+            recursive_ownership = True,
+  )
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/knox_gateway.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/knox_gateway.py
new file mode 100644
index 0000000..8996d23
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/knox_gateway.py
@@ -0,0 +1,220 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.constants import Direction
+from resource_management.libraries.functions.security_commons import build_expectations
+from resource_management.libraries.functions.security_commons import cached_kinit_executor
+from resource_management.libraries.functions.security_commons import validate_security_config_properties
+from resource_management.libraries.functions.security_commons import get_params_from_filesystem
+from resource_management.libraries.functions.security_commons import FILE_TYPE_XML
+from resource_management.libraries.functions.show_logs import show_logs
+from resource_management.core.resources.system import File, Execute, Link
+from resource_management.core.resources.service import Service
+from resource_management.core.logger import Logger
+
+
+from ambari_commons import OSConst, OSCheck
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+if OSCheck.is_windows_family():
+  from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
+
+import upgrade
+from knox import knox, update_knox_logfolder_permissions
+from knox_ldap import ldap
+from setup_ranger_knox import setup_ranger_knox
+
+
+class KnoxGateway(Script):
+  def get_component_name(self):
+    return "knox-server"
+
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)
+
+    File(os.path.join(params.knox_conf_dir, 'topologies', 'sandbox.xml'),
+         action = "delete",
+    )
+
+  def configure(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    knox()
+    ldap()
+
+  def configureldap(self, env):
+    import params
+    env.set_params(params)
+    ldap()
+
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class KnoxGatewayWindows(KnoxGateway):
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    # setup_ranger_knox(env)
+    Service(params.knox_gateway_win_service_name, action="start")
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    Service(params.knox_gateway_win_service_name, action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_windows_service_status(status_params.knox_gateway_win_service_name)
+
+  def startdemoldap(self, env):
+    import params
+    env.set_params(params)
+    self.configureldap(env)
+    Service(params.knox_ldap_win_service_name, action="start")
+
+  def stopdemoldap(self, env):
+    import params
+    env.set_params(params)
+    Service(params.knox_ldap_win_service_name, action="stop")
+
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class KnoxGatewayDefault(KnoxGateway):
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    # backup the data directory to /tmp/knox-upgrade-backup/knox-data-backup.tar just in case
+    # something happens; Knox is interesting in that they re-generate missing files like
+    # keystores which can cause side effects if the upgrade goes wrong
+    if params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE:
+      absolute_backup_dir = upgrade.backup_data()
+      Logger.info("Knox data was successfully backed up to {0}".format(absolute_backup_dir))
+
+    # <conf-selector-tool> will change the symlink to the conf folder.
+    conf_select.select(params.stack_name, "knox", params.version)
+    stack_select.select("knox-server", params.version)
+
+    # seed the new Knox data directory with the keystores of yesteryear
+    if params.upgrade_direction == Direction.UPGRADE:
+      upgrade.seed_current_data_directory()
+
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    daemon_cmd = format('{knox_bin} start')
+    no_op_test = format('ls {knox_pid_file} >/dev/null 2>&1 && ps -p `cat {knox_pid_file}` >/dev/null 2>&1')
+    setup_ranger_knox(upgrade_type=upgrade_type)
+    # Used to setup symlink, needed to update the knox managed symlink, in case of custom locations
+    if os.path.islink(params.knox_managed_pid_symlink):
+      Link(params.knox_managed_pid_symlink,
+           to = params.knox_pid_dir,
+      )
+
+    update_knox_logfolder_permissions()
+
+    try:
+      Execute(daemon_cmd,
+              user=params.knox_user,
+              environment={'JAVA_HOME': params.java_home},
+              not_if=no_op_test
+      )
+    except:
+      show_logs(params.knox_logs_dir, params.knox_user)
+      raise
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    daemon_cmd = format('{knox_bin} stop')
+
+    update_knox_logfolder_permissions()
+
+    try:
+      Execute(daemon_cmd,
+              environment={'JAVA_HOME': params.java_home},
+              user=params.knox_user,
+      )
+    except:
+      show_logs(params.knox_logs_dir, params.knox_user)
+      raise
+    
+    File(params.knox_pid_file,
+         action="delete",
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.knox_pid_file)
+
+  def startdemoldap(self, env):
+    import params
+    env.set_params(params)
+    self.configureldap(env)
+    daemon_cmd = format('{ldap_bin} start')
+    no_op_test = format('ls {ldap_pid_file} >/dev/null 2>&1 && ps -p `cat {ldap_pid_file}` >/dev/null 2>&1')
+    Execute(daemon_cmd,
+            user=params.knox_user,
+            environment={'JAVA_HOME': params.java_home},
+            not_if=no_op_test
+    )
+
+  def stopdemoldap(self, env):
+    import params
+    env.set_params(params)
+    self.configureldap(env)
+    daemon_cmd = format('{ldap_bin} stop')
+    Execute(daemon_cmd,
+            environment={'JAVA_HOME': params.java_home},
+            user=params.knox_user,
+            )
+    File(params.ldap_pid_file,
+      action = "delete"
+    )
+      
+  def get_log_folder(self):
+    import params
+    return params.knox_logs_dir
+  
+  def get_user(self):
+    import params
+    return params.knox_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.knox_pid_file]
+
+
+if __name__ == "__main__":
+  KnoxGateway().execute()
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/knox_ldap.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/knox_ldap.py
new file mode 100644
index 0000000..b6f1b89
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/knox_ldap.py
@@ -0,0 +1,59 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+from resource_management.libraries.script.script import Script
+from resource_management.core.resources.service import ServiceConfig
+from resource_management.core.resources.system import File
+from ambari_commons import OSConst
+from resource_management.core.source import InlineTemplate
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+def _ldap_common():
+    import params
+
+    File(os.path.join(params.knox_conf_dir, 'ldap-log4j.properties'),
+         mode=params.mode,
+         group=params.knox_group,
+         owner=params.knox_user,
+         content=InlineTemplate(params.ldap_log4j)
+    )
+
+    File(os.path.join(params.knox_conf_dir, 'users.ldif'),
+         mode=params.mode,
+         group=params.knox_group,
+         owner=params.knox_user,
+         content=params.users_ldif
+    )
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def ldap():
+  import params
+
+  # Manually overriding service logon user & password set by the installation package
+  ServiceConfig(params.knox_ldap_win_service_name,
+                action="change_user",
+                username = params.knox_user,
+                password = Script.get_password(params.knox_user))
+
+  _ldap_common()
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def ldap():
+  _ldap_common()
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/params.py
new file mode 100644
index 0000000..ad1a1dc
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/params.py
@@ -0,0 +1,29 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from ambari_commons import OSCheck
+from resource_management.libraries.functions.default import default
+
+if OSCheck.is_windows_family():
+  from params_windows import *
+else:
+  from params_linux import *
+
+retryAble = default("/commandParams/command_retry_enabled", False)
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/params_linux.py
new file mode 100644
index 0000000..4558069
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/params_linux.py
@@ -0,0 +1,457 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import status_params
+
+from resource_management.core.logger import Logger
+
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.get_port_from_url import get_port_from_url
+from resource_management.libraries.functions.get_stack_version import get_stack_version
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+from status_params import *
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import stack_select, conf_select
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions import is_empty
+from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
+
+# server configurations
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+
+tmp_dir = Script.get_tmp_dir()
+stack_name = status_params.stack_name
+upgrade_direction = default("/commandParams/upgrade_direction", None)
+version = default("/commandParams/version", None)
+# E.g., 2.3.2.0
+version_formatted = format_stack_version(version)
+
+# E.g., 2.3
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+# get the correct version to use for checking stack features
+version_for_stack_feature_checks = get_stack_feature_version(config)
+
+stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
+stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
+stack_supports_core_site_for_ranger_plugin = check_stack_feature(StackFeature.CORE_SITE_FOR_RANGER_PLUGINS_SUPPORT, version_for_stack_feature_checks)
+
+# This is the version whose state is CURRENT. During an RU, this is the source version.
+# DO NOT format it since we need the build number too.
+upgrade_from_version = default("/hostLevelParams/current_version", None)
+
+# server configurations
+# Default value used in HDP 2.3.0.0 and earlier.
+knox_data_dir = '/var/lib/knox/data'
+
+# Important, it has to be strictly greater than 2.3.0.0!!!
+Logger.info(format("Stack version to use is {version_formatted}"))
+if version_formatted and check_stack_feature(StackFeature.KNOX_VERSIONED_DATA_DIR, version_formatted):
+  # This is the current version. In the case of a Rolling Upgrade, it will be the newer version.
+  # In the case of a Downgrade, it will be the version downgrading to.
+  # This is always going to be a symlink to /var/lib/knox/data_${version}
+  knox_data_dir = format('{stack_root}/{version}/knox/data')
+  Logger.info(format("Detected stack with version {version}, will use knox_data_dir = {knox_data_dir}"))
+
+
+knox_master_secret_path = format('{knox_data_dir}/security/master')
+knox_cert_store_path = format('{knox_data_dir}/security/keystores/gateway.jks')
+knox_user = default("/configurations/knox-env/knox_user", "knox")
+
+# server configurations
+knox_data_dir = '/var/lib/knox/data'
+knox_logs_dir = '/var/log/knox'
+
+# default parameters
+knox_bin = '/usr/bin/gateway'
+knox_conf_dir = '/etc/knox/conf'
+ldap_bin = '/usr/lib/knox/bin/ldap.sh'
+knox_client_bin = '/usr/lib/knox/bin/knoxcli.sh'
+
+# HDP 2.2+ parameters
+if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+  knox_bin = format('{stack_root}/current/knox-server/bin/gateway.sh')
+  knox_conf_dir = format('{stack_root}/current/knox-server/conf')
+  ldap_bin = format('{stack_root}/current/knox-server/bin/ldap.sh')
+  knox_client_bin = format('{stack_root}/current/knox-server/bin/knoxcli.sh')
+  knox_master_secret_path = format('{stack_root}/current/knox-server/data/security/master')
+  knox_cert_store_path = format('{stack_root}/current/knox-server/data/security/keystores/gateway.jks')
+  knox_data_dir = format('{stack_root}/current/knox-server/data/')
+
+knox_group = default("/configurations/knox-env/knox_group", "knox")
+mode = 0644
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+dfs_ha_enabled = False
+dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
+if dfs_ha_nameservices is None:
+  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
+dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+
+namenode_rpc = None
+
+if dfs_ha_namenode_ids:
+  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+  if dfs_ha_namenode_ids_array_len > 1:
+    dfs_ha_enabled = True
+if dfs_ha_enabled:
+  for nn_id in dfs_ha_namemodes_ids_list:
+    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+    if hostname.lower() in nn_host.lower():
+      namenode_id = nn_id
+      namenode_rpc = nn_host
+    # With HA enabled namenode_address is recomputed
+  namenode_address = format('hdfs://{dfs_ha_nameservices}')
+
+namenode_port_map = {}
+if dfs_ha_enabled:
+    for nn_id in dfs_ha_namemodes_ids_list:
+        nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.http-address.{dfs_ha_nameservices}.{nn_id}')]
+        nn_host_parts = nn_host.split(':')
+        namenode_port_map[nn_host_parts[0]] = nn_host_parts[1]
+
+
+namenode_hosts = default("/clusterHostInfo/namenode_host", None)
+if type(namenode_hosts) is list:
+  namenode_host = namenode_hosts[0]
+else:
+  namenode_host = namenode_hosts
+
+has_namenode = not namenode_host == None
+namenode_http_port = "50070"
+namenode_rpc_port = "8020"
+
+if has_namenode:
+  if 'dfs.namenode.http-address' in config['configurations']['hdfs-site']:
+    namenode_http_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.namenode.http-address'])
+  if dfs_ha_enabled and namenode_rpc:
+    namenode_rpc_port = get_port_from_url(namenode_rpc)
+  else:
+    if 'dfs.namenode.rpc-address' in config['configurations']['hdfs-site']:
+      namenode_rpc_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.namenode.rpc-address'])
+
+webhdfs_service_urls = ""
+
+def buildUrlElement(protocol, hdfs_host, port, servicePath) :
+  openTag = "<url>"
+  closeTag = "</url>"
+  proto = protocol + "://"
+  newLine = "\n"
+  if hdfs_host is None or port is None:
+      return ""
+  else:
+    return openTag + proto + hdfs_host + ":" + port + servicePath + closeTag + newLine
+
+namenode_host_keys = namenode_port_map.keys();
+if len(namenode_host_keys) > 0:
+    for host in namenode_host_keys:
+      webhdfs_service_urls += buildUrlElement("http", host, namenode_port_map[host], "/webhdfs")
+else:
+  webhdfs_service_urls = buildUrlElement("http", namenode_host, namenode_http_port, "/webhdfs")
+
+
+rm_hosts = default("/clusterHostInfo/rm_host", None)
+if type(rm_hosts) is list:
+  rm_host = rm_hosts[0]
+else:
+  rm_host = rm_hosts
+has_rm = not rm_host == None
+
+jt_rpc_port = "8050"
+rm_port = "8080"
+
+if has_rm:
+  if 'yarn.resourcemanager.address' in config['configurations']['yarn-site']:
+    jt_rpc_port = get_port_from_url(config['configurations']['yarn-site']['yarn.resourcemanager.address'])
+
+  if 'yarn.resourcemanager.webapp.address' in config['configurations']['yarn-site']:
+    rm_port = get_port_from_url(config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'])
+
+hive_http_port = default('/configurations/hive-site/hive.server2.thrift.http.port', "10001")
+hive_http_path = default('/configurations/hive-site/hive.server2.thrift.http.path', "cliservice")
+hive_server_hosts = default("/clusterHostInfo/hive_server_host", None)
+if type(hive_server_hosts) is list:
+  hive_server_host = hive_server_hosts[0]
+else:
+  hive_server_host = hive_server_hosts
+
+templeton_port = default('/configurations/webhcat-site/templeton.port', "50111")
+webhcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", None)
+if type(webhcat_server_hosts) is list:
+  webhcat_server_host = webhcat_server_hosts[0]
+else:
+  webhcat_server_host = webhcat_server_hosts
+
+hbase_master_port = default('/configurations/hbase-site/hbase.rest.port', "8080")
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", None)
+if type(hbase_master_hosts) is list:
+  hbase_master_host = hbase_master_hosts[0]
+else:
+  hbase_master_host = hbase_master_hosts
+
+oozie_server_hosts = default("/clusterHostInfo/oozie_server", None)
+if type(oozie_server_hosts) is list:
+  oozie_server_host = oozie_server_hosts[0]
+else:
+  oozie_server_host = oozie_server_hosts
+
+has_oozie = not oozie_server_host == None
+oozie_server_port = "11000"
+
+if has_oozie:
+  oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
+
+# Knox managed properties
+knox_managed_pid_symlink= format('{stack_root}/current/knox-server/pids')
+
+#knox log4j
+knox_gateway_log_maxfilesize = default('/configurations/gateway-log4j/knox_gateway_log_maxfilesize',256)
+knox_gateway_log_maxbackupindex = default('/configurations/gateway-log4j/knox_gateway_log_maxbackupindex',20)
+knox_ldap_log_maxfilesize = default('/configurations/ldap-log4j/knox_ldap_log_maxfilesize',256)
+knox_ldap_log_maxbackupindex = default('/configurations/ldap-log4j/knox_ldap_log_maxbackupindex',20)
+
+# server configurations
+knox_master_secret = config['configurations']['knox-env']['knox_master_secret']
+knox_host_name = config['clusterHostInfo']['knox_gateway_hosts'][0]
+knox_host_name_in_cluster = config['hostname']
+knox_host_port = config['configurations']['gateway-site']['gateway.port']
+topology_template = config['configurations']['topology']['content']
+admin_topology_template = default('/configurations/admin-topology/content', None)
+knoxsso_topology_template = config['configurations']['knoxsso-topology']['content']
+gateway_log4j = config['configurations']['gateway-log4j']['content']
+ldap_log4j = config['configurations']['ldap-log4j']['content']
+users_ldif = config['configurations']['users-ldif']['content']
+java_home = config['hostLevelParams']['java_home']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+if security_enabled:
+  knox_keytab_path = config['configurations']['knox-env']['knox_keytab_path']
+  _hostname_lowercase = config['hostname'].lower()
+  knox_principal_name = config['configurations']['knox-env']['knox_principal_name'].replace('_HOST',_hostname_lowercase)
+
+# for curl command in ranger plugin to get db connector
+jdk_location = config['hostLevelParams']['jdk_location']
+
+# ranger knox plugin start section
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+
+# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature
+xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
+
+# ambari-server hostname
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+# ranger knox plugin enabled property
+enable_ranger_knox = default("/configurations/ranger-knox-plugin-properties/ranger-knox-plugin-enabled", "No")
+enable_ranger_knox = True if enable_ranger_knox.lower() == 'yes' else False
+
+# get ranger knox properties if enable_ranger_knox is True
+if enable_ranger_knox:
+  # get ranger policy url
+  policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
+  if xml_configurations_supported:
+    policymgr_mgr_url = config['configurations']['ranger-knox-security']['ranger.plugin.knox.policy.rest.url']
+
+  if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
+    policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
+
+  # ranger audit db user
+  xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
+
+  # ranger knox service/repositry name
+  repo_name = str(config['clusterName']) + '_knox'
+  repo_name_value = config['configurations']['ranger-knox-security']['ranger.plugin.knox.service.name']
+  if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
+    repo_name = repo_name_value
+
+  knox_home = config['configurations']['ranger-knox-plugin-properties']['KNOX_HOME']
+  common_name_for_certificate = config['configurations']['ranger-knox-plugin-properties']['common.name.for.certificate']
+  repo_config_username = config['configurations']['ranger-knox-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+
+  # ranger-env config
+  ranger_env = config['configurations']['ranger-env']
+
+  # create ranger-env config having external ranger credential properties
+  if not has_ranger_admin and enable_ranger_knox:
+    external_admin_username = default('/configurations/ranger-knox-plugin-properties/external_admin_username', 'admin')
+    external_admin_password = default('/configurations/ranger-knox-plugin-properties/external_admin_password', 'admin')
+    external_ranger_admin_username = default('/configurations/ranger-knox-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
+    external_ranger_admin_password = default('/configurations/ranger-knox-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
+    ranger_env = {}
+    ranger_env['admin_username'] = external_admin_username
+    ranger_env['admin_password'] = external_admin_password
+    ranger_env['ranger_admin_username'] = external_ranger_admin_username
+    ranger_env['ranger_admin_password'] = external_ranger_admin_password
+
+  ranger_plugin_properties = config['configurations']['ranger-knox-plugin-properties']
+  policy_user = config['configurations']['ranger-knox-plugin-properties']['policy_user']
+  repo_config_password = config['configurations']['ranger-knox-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
+
+  xa_audit_db_password = ''
+  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db and has_ranger_admin:
+    xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password']
+
+  downloaded_custom_connector = None
+  previous_jdbc_jar_name = None
+  driver_curl_source = None
+  driver_curl_target = None
+  previous_jdbc_jar = None
+
+  if has_ranger_admin and stack_supports_ranger_audit_db:
+    xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
+    jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver = get_audit_configs(config)
+
+    downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_target = format("{stack_root}/current/knox-server/ext/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    previous_jdbc_jar = format("{stack_root}/current/knox-server/ext/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    sql_connector_jar = ''
+
+  knox_ranger_plugin_config = {
+    'username': repo_config_username,
+    'password': repo_config_password,
+    'knox.url': format("https://{knox_host_name}:{knox_host_port}/gateway/admin/api/v1/topologies"),
+    'commonNameForCertificate': common_name_for_certificate
+  }
+
+  knox_ranger_plugin_repo = {
+    'isActive': 'true',
+    'config': json.dumps(knox_ranger_plugin_config),
+    'description': 'knox repo',
+    'name': repo_name,
+    'repositoryType': 'knox',
+    'assetType': '5',
+  }
+
+  custom_ranger_service_config = generate_ranger_service_config(ranger_plugin_properties)
+  if len(custom_ranger_service_config) > 0:
+    knox_ranger_plugin_config.update(custom_ranger_service_config)
+
+  if stack_supports_ranger_kerberos and security_enabled:
+    knox_ranger_plugin_config['policy.download.auth.users'] = knox_user
+    knox_ranger_plugin_config['tag.download.auth.users'] = knox_user
+
+  if stack_supports_ranger_kerberos:
+    knox_ranger_plugin_config['ambari.service.check.user'] = policy_user
+
+    knox_ranger_plugin_repo = {
+      'isEnabled': 'true',
+      'configs': knox_ranger_plugin_config,
+      'description': 'knox repo',
+      'name': repo_name,
+      'type': 'knox'
+    }
+
+  xa_audit_db_is_enabled = False
+  if xml_configurations_supported and stack_supports_ranger_audit_db:
+    xa_audit_db_is_enabled = config['configurations']['ranger-knox-audit']['xasecure.audit.destination.db']
+
+  xa_audit_hdfs_is_enabled = config['configurations']['ranger-knox-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else False
+  ssl_keystore_password = config['configurations']['ranger-knox-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None
+  ssl_truststore_password = config['configurations']['ranger-knox-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None
+  credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
+
+  # for SQLA explicitly disable audit to DB for Ranger
+  if has_ranger_admin and stack_supports_ranger_audit_db and xa_audit_db_flavor == 'sqla':
+    xa_audit_db_is_enabled = False
+
+# need this to capture cluster name from where ranger knox plugin is enabled
+cluster_name = config['clusterName']
+
+# ranger knox plugin end section
+
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] if has_namenode else None
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] if has_namenode else None
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] if has_namenode else None
+hdfs_site = config['configurations']['hdfs-site'] if has_namenode else None
+default_fs = config['configurations']['core-site']['fs.defaultFS'] if has_namenode else None
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin") if has_namenode else None
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir() if has_namenode else None
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources()
+)
+
+druid_coordinator_urls = ""
+if "druid-coordinator" in config['configurations']:
+  port = config['configurations']['druid-coordinator']['druid.port']
+  for host in config['clusterHostInfo']['druid_coordinator_hosts']:
+    druid_coordinator_urls += buildUrlElement("http", host, port, "")
+
+druid_overlord_urls = ""
+if "druid-overlord" in config['configurations']:
+  port = config['configurations']['druid-overlord']['druid.port']
+  for host in config['clusterHostInfo']['druid_overlord_hosts']:
+    druid_overlord_urls += buildUrlElement("http", host, port, "")
+
+druid_broker_urls = ""
+if "druid-broker" in config['configurations']:
+  port = config['configurations']['druid-broker']['druid.port']
+  for host in config['clusterHostInfo']['druid_broker_hosts']:
+    druid_broker_urls += buildUrlElement("http", host, port, "")
+
+druid_router_urls = ""
+if "druid-router" in config['configurations']:
+  port = config['configurations']['druid-router']['druid.port']
+  for host in config['clusterHostInfo']['druid_router_hosts']:
+    druid_router_urls += buildUrlElement("http", host, port, "")
+
+zeppelin_ui_urls = ""
+zeppelin_ws_urls = ""
+websocket_support = "false"
+if "zeppelin-config" in config['configurations']:
+  port = config['configurations']['zeppelin-config']['zeppelin.server.port']
+  protocol = "https" if config['configurations']['zeppelin-config']['zeppelin.ssl'] else "http"
+  host = config['clusterHostInfo']['zeppelin_master_hosts'][0]
+  zeppelin_ui_urls += buildUrlElement(protocol, host, port, "")
+  zeppelin_ws_urls += buildUrlElement("ws", host, port, "/ws")
+  websocket_support = "true"
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/params_windows.py
new file mode 100644
index 0000000..631146d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/params_windows.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.script import Script
+import os
+from status_params import *
+
+# server configurations
+config = Script.get_config()
+
+stack_root = None
+knox_home = None
+knox_conf_dir = None
+knox_logs_dir = None
+knox_bin = None
+ldap_bin = None
+knox_client_bin = None
+knox_data_dir = None
+
+knox_master_secret_path = None
+knox_cert_store_path = None
+
+try:
+  stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
+  knox_home = os.environ['KNOX_HOME']
+  knox_conf_dir = os.environ['KNOX_CONF_DIR']
+  knox_logs_dir = os.environ['KNOX_LOG_DIR']
+  knox_bin = os.path.join(knox_home, 'bin', 'gateway.exe')
+  ldap_bin = os.path.join(knox_home, 'bin', 'ldap.exe')
+  knox_client_bin = os.path.join(knox_home, 'bin', 'knoxcli.cmd')
+  knox_data_dir = os.path.join(knox_home, 'data')
+
+  knox_master_secret_path = os.path.join(knox_data_dir, 'security', 'master')
+  knox_cert_store_path = os.path.join(knox_data_dir, 'security', 'keystores', 'gateway.jks')
+except:
+  pass
+
+knox_host_port = config['configurations']['gateway-site']['gateway.port']
+knox_host_name = config['clusterHostInfo']['knox_gateway_hosts'][0]
+knox_host_name_in_cluster = config['hostname']
+knox_master_secret = config['configurations']['knox-env']['knox_master_secret']
+topology_template = config['configurations']['topology']['content']
+admin_topology_template = default('/configurations/admin-topology/content', None)
+knoxsso_topology_template = config['configurations']['knoxsso-topology']['content']
+gateway_log4j = config['configurations']['gateway-log4j']['content']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+ldap_log4j = config['configurations']['ldap-log4j']['content']
+users_ldif = config['configurations']['users-ldif']['content']
+
+hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
+knox_user = hadoop_user
+hdfs_user = hadoop_user
+knox_group = None
+mode = None
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/service_check.py
new file mode 100644
index 0000000..a2134d6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/service_check.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.core.resources.system import Execute, File
+from resource_management.libraries.functions.format import format
+from resource_management.core.source import StaticFile
+import sys
+import os
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+
+class KnoxServiceCheck(Script):
+  def service_check(self, env):
+    pass
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class KnoxServiceCheckWindows(KnoxServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    temp_dir = os.path.join(os.path.dirname(params.knox_home), "temp")
+    validateKnoxFileName = "validateKnoxStatus.py"
+    validateKnoxFilePath = os.path.join(temp_dir, validateKnoxFileName)
+    python_executable = sys.executable
+    validateStatusCmd = "%s %s -p %s -n %s" % (python_executable, validateKnoxFilePath, params.knox_host_port, params.knox_host_name)
+
+    print "Test connectivity to knox server"
+
+    File(validateKnoxFilePath,
+         content=StaticFile(validateKnoxFileName)
+    )
+
+    Execute(validateStatusCmd,
+            tries=3,
+            try_sleep=5,
+            timeout=5,
+            logoutput=True
+    )
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class KnoxServiceCheckDefault(KnoxServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    validateKnoxFileName = "validateKnoxStatus.py"
+    validateKnoxFilePath = format("{tmp_dir}/{validateKnoxFileName}")
+    python_executable = sys.executable
+    validateStatusCmd = format("{python_executable} {validateKnoxFilePath} -p {knox_host_port} -n {knox_host_name}")
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
+      smoke_cmd = format("{kinit_cmd} {validateStatusCmd}")
+    else:
+      smoke_cmd = validateStatusCmd
+
+    print "Test connectivity to knox server"
+
+    File(validateKnoxFilePath,
+         content=StaticFile(validateKnoxFileName),
+         mode=0755
+    )
+
+    Execute(smoke_cmd,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            user=params.smokeuser,
+            timeout=5,
+            logoutput=True
+    )
+
+
+if __name__ == "__main__":
+    KnoxServiceCheck().execute()
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/setup_ranger_knox.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/setup_ranger_knox.py
new file mode 100644
index 0000000..c486ef7
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/setup_ranger_knox.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_core_site_for_required_plugins
+from resource_management.core.resources import File
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.functions.format import format
+
+def setup_ranger_knox(upgrade_type=None):
+  import params
+
+  if params.enable_ranger_knox:
+
+    stack_version = None
+    if upgrade_type is not None:
+      stack_version = params.version
+
+    if params.retryAble:
+      Logger.info("Knox: Setup ranger: command retry enables thus retrying if ranger admin is down !")
+    else:
+      Logger.info("Knox: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
+
+    if params.xml_configurations_supported and params.enable_ranger_knox and params.xa_audit_hdfs_is_enabled:
+      if params.has_namenode:
+        params.HdfsResource("/ranger/audit",
+                           type="directory",
+                           action="create_on_execute",
+                           owner=params.hdfs_user,
+                           group=params.hdfs_user,
+                           mode=0755,
+                           recursive_chmod=True
+        )
+        params.HdfsResource("/ranger/audit/knox",
+                           type="directory",
+                           action="create_on_execute",
+                           owner=params.knox_user,
+                           group=params.knox_user,
+                           mode=0700,
+                           recursive_chmod=True
+        )
+        params.HdfsResource(None, action="execute")
+
+        if params.namenode_hosts is not None and len(params.namenode_hosts) > 1:
+          Logger.info('Ranger Knox plugin is enabled in NameNode HA environment along with audit to Hdfs enabled, creating hdfs-site.xml')
+          XmlConfig("hdfs-site.xml",
+            conf_dir=params.knox_conf_dir,
+            configurations=params.config['configurations']['hdfs-site'],
+            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+            owner=params.knox_user,
+            group=params.knox_group,
+            mode=0644
+          )
+        else:
+          File(format('{knox_conf_dir}/hdfs-site.xml'), action="delete")
+
+    if params.xml_configurations_supported:
+      api_version=None
+      if params.stack_supports_ranger_kerberos:
+        api_version='v2'
+      from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
+      setup_ranger_plugin('knox-server', 'knox', params.previous_jdbc_jar,
+                          params.downloaded_custom_connector, params.driver_curl_source,
+                          params.driver_curl_target, params.java_home,
+                          params.repo_name, params.knox_ranger_plugin_repo,
+                          params.ranger_env, params.ranger_plugin_properties,
+                          params.policy_user, params.policymgr_mgr_url,
+                          params.enable_ranger_knox, conf_dict=params.knox_conf_dir,
+                          component_user=params.knox_user, component_group=params.knox_group, cache_service_list=['knox'],
+                          plugin_audit_properties=params.config['configurations']['ranger-knox-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-knox-audit'],
+                          plugin_security_properties=params.config['configurations']['ranger-knox-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-knox-security'],
+                          plugin_policymgr_ssl_properties=params.config['configurations']['ranger-knox-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-knox-policymgr-ssl'],
+                          component_list=['knox-server'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
+                          credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password,
+                          ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
+                          stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble,api_version=api_version,
+                          is_security_enabled = params.security_enabled,
+                          is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
+                          component_user_principal=params.knox_principal_name if params.security_enabled else None,
+                          component_user_keytab=params.knox_keytab_path if params.security_enabled else None)
+    else:
+      from resource_management.libraries.functions.setup_ranger_plugin import setup_ranger_plugin
+      setup_ranger_plugin('knox-server', 'knox', params.previous_jdbc_jar,
+                        params.downloaded_custom_connector, params.driver_curl_source,
+                        params.driver_curl_target, params.java_home,
+                        params.repo_name, params.knox_ranger_plugin_repo,
+                        params.ranger_env, params.ranger_plugin_properties,
+                        params.policy_user, params.policymgr_mgr_url,
+                        params.enable_ranger_knox, conf_dict=params.knox_conf_dir,
+                        component_user=params.knox_user, component_group=params.knox_group, cache_service_list=['knox'],
+                        plugin_audit_properties=params.config['configurations']['ranger-knox-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-knox-audit'],
+                        plugin_security_properties=params.config['configurations']['ranger-knox-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-knox-security'],
+                        plugin_policymgr_ssl_properties=params.config['configurations']['ranger-knox-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-knox-policymgr-ssl'],
+                        component_list=['knox-server'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
+                        credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, 
+                        ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
+                        stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
+    if params.stack_supports_core_site_for_ranger_plugin and params.enable_ranger_knox and params.has_namenode and params.security_enabled:
+      Logger.info("Stack supports core-site.xml creation for Ranger plugin, creating core-site.xml from namenode configuraitions")
+      setup_core_site_for_required_plugins(component_user=params.knox_user, component_group=params.knox_group,create_core_site_path = params.knox_conf_dir, config = params.config)
+    else:
+      Logger.info("Stack does not support core-site.xml creation for Ranger plugin, skipping core-site.xml configurations")
+
+  else:
+    Logger.info('Ranger Knox plugin is not enabled')
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/status_params.py
new file mode 100644
index 0000000..3cbd920
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/status_params.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+from ambari_commons import OSCheck
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+
+
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+if OSCheck.is_windows_family():
+  knox_gateway_win_service_name = "gateway"
+  knox_ldap_win_service_name = "ldap"
+else:
+  knox_conf_dir = '/etc/knox/conf'
+  if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+    knox_conf_dir = format('{stack_root}/current/knox-server/conf')
+  knox_pid_dir = config['configurations']['knox-env']['knox_pid_dir']
+  knox_pid_file = format("{knox_pid_dir}/gateway.pid")
+  ldap_pid_file = format("{knox_pid_dir}/ldap.pid")
+
+  security_enabled = config['configurations']['cluster-env']['security_enabled']
+  if security_enabled:
+      knox_keytab_path = config['configurations']['knox-env']['knox_keytab_path']
+      knox_principal_name = config['configurations']['knox-env']['knox_principal_name']
+  else:
+      knox_keytab_path = None
+      knox_principal_name = None
+
+  hostname = config['hostname'].lower()
+  knox_user = default("/configurations/knox-env/knox_user", "knox")
+  kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+  temp_dir = Script.get_tmp_dir()
+  
+stack_name = default("/hostLevelParams/stack_name", None)
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/upgrade.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/upgrade.py
new file mode 100644
index 0000000..917f340
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/upgrade.py
@@ -0,0 +1,118 @@
+
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import tempfile
+
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions import tar_archive
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.script.script import Script
+
+
+BACKUP_TEMP_DIR = "knox-upgrade-backup"
+BACKUP_DATA_ARCHIVE = "knox-data-backup.tar"
+STACK_ROOT_DEFAULT = Script.get_stack_root()
+
+def backup_data():
+  """
+  Backs up the knox data as part of the upgrade process.
+  :return: Returns the path to the absolute backup directory.
+  """
+  Logger.info('Backing up Knox data directory before upgrade...')
+  directoryMappings = _get_directory_mappings_during_upgrade()
+
+  Logger.info("Directory mappings to backup: {0}".format(str(directoryMappings)))
+
+  absolute_backup_dir = os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR)
+  if not os.path.isdir(absolute_backup_dir):
+    os.makedirs(absolute_backup_dir)
+
+  for directory in directoryMappings:
+    if not os.path.isdir(directory):
+      raise Fail("Unable to backup missing directory {0}".format(directory))
+
+    archive = os.path.join(absolute_backup_dir, directoryMappings[directory])
+    Logger.info('Compressing {0} to {1}'.format(directory, archive))
+
+    if os.path.exists(archive):
+      os.remove(archive)
+
+    # backup the directory, following symlinks instead of including them
+    tar_archive.archive_directory_dereference(archive, directory)
+
+  return absolute_backup_dir
+
+
+def seed_current_data_directory():
+  """
+  HDP stack example:
+
+  Knox uses "versioned" data directories in some stacks:
+  /usr/hdp/2.2.0.0-1234/knox/data -> /var/lib/knox/data
+  /usr/hdp/2.3.0.0-4567/knox/data -> /var/lib/knox/data-2.3.0.0-4567
+
+  If the stack being upgraded to supports versioned data directories for Knox, then we should
+  seed the data from the prior version. This is mainly because Knox keeps things like keystores
+  in the data directory and if those aren't copied over then it will re-create self-signed
+  versions. This side-effect behavior causes loss of service in clusters where Knox is using
+  custom keystores.
+
+  cp -R -p -f /usr/hdp/<old>/knox-server/data/. /usr/hdp/current/knox-server/data
+  :return:
+  """
+  import params
+
+  if params.version is None or params.upgrade_from_version is None:
+    raise Fail("The source and target versions are required")
+
+  if check_stack_feature(StackFeature.KNOX_VERSIONED_DATA_DIR, params.version):
+    Logger.info("Seeding Knox data from prior version...")
+
+    # <stack-root>/2.3.0.0-1234/knox/data/.
+    source_data_dir = os.path.join(params.stack_root, params.upgrade_from_version, "knox", "data", ".")
+
+    # <stack-root>/current/knox-server/data
+    target_data_dir = os.path.join(params.stack_root, "current", "knox-server", "data")
+
+    # recursive copy, overwriting, and preserving attributes
+    Execute(("cp", "-R", "-p", "-f", source_data_dir, target_data_dir), sudo = True)
+
+
+def _get_directory_mappings_during_upgrade():
+  """
+  Gets a dictionary of directory to archive name that represents the
+  directories that need to be backed up and their output tarball archive targets
+  :return:  the dictionary of directory to tarball mappings
+  """
+  import params
+
+  # the data directory is always a symlink to the "correct" data directory in /var/lib/knox
+  # such as /var/lib/knox/data or /var/lib/knox/data-2.4.0.0-1234
+  knox_data_dir = STACK_ROOT_DEFAULT + '/current/knox-server/data'
+
+  directories = { knox_data_dir: BACKUP_DATA_ARCHIVE }
+
+  Logger.info(format("Knox directories to backup:\n{directories}"))
+  return directories
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/templates/input.config-knox.json.j2 b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/templates/input.config-knox.json.j2
new file mode 100644
index 0000000..6d7cf72
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/templates/input.config-knox.json.j2
@@ -0,0 +1,60 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"knox_gateway",
+      "rowtype":"service",
+      "path":"/var/log/knox/gateway.log"
+    },
+    {
+      "type":"knox_cli",
+      "rowtype":"service",
+      "path":"/var/log/knox/knoxcli.log"
+    },
+    {
+      "type":"knox_ldap",
+      "rowtype":"service",
+      "path":"/var/log/knox/ldap.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "knox_gateway",
+            "knox_cli",
+            "knox_ldap"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/templates/krb5JAASLogin.conf.j2 b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/templates/krb5JAASLogin.conf.j2
new file mode 100644
index 0000000..fa3237b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/templates/krb5JAASLogin.conf.j2
@@ -0,0 +1,30 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+com.sun.security.jgss.initiate {
+com.sun.security.auth.module.Krb5LoginModule required
+renewTGT=true
+doNotPrompt=true
+useKeyTab=true
+keyTab="{{knox_keytab_path}}"
+principal="{{knox_principal_name}}"
+isInitiator=true
+storeKey=true
+useTicketCache=true
+client=true;
+};
+
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/role_command_order.json b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/role_command_order.json
new file mode 100644
index 0000000..c0475e4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/role_command_order.json
@@ -0,0 +1,7 @@
+{
+  "general_deps" : {
+    "_comment" : "dependencies for KNOX",
+    "KNOX_GATEWAY-START" : ["RANGER_USERSYNC-START", "NAMENODE-START"],
+    "KNOX_SERVICE_CHECK-SERVICE_CHECK" : ["KNOX_GATEWAY-START"]
+  }
+}
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/service_advisor.py
new file mode 100644
index 0000000..575f910
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/service_advisor.py
@@ -0,0 +1,253 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+# Python imports
+import imp
+import os
+import traceback
+import re
+import socket
+import fnmatch
+import xml.etree.ElementTree as ET
+
+
+from resource_management.core.logger import Logger
+
+SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
+STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../stacks/')
+PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
+
+try:
+  with open(PARENT_FILE, 'rb') as fp:
+    service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE, ('.py', 'rb', imp.PY_SOURCE))
+except Exception as e:
+  traceback.print_exc()
+  print "Failed to load parent"
+
+class KnoxServiceAdvisor(service_advisor.ServiceAdvisor):
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(KnoxServiceAdvisor, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    # Always call these methods
+    self.modifyMastersWithMultipleInstances()
+    self.modifyCardinalitiesDict()
+    self.modifyHeapSizeProperties()
+    self.modifyNotValuableComponents()
+    self.modifyComponentsNotPreferableOnServer()
+    self.modifyComponentLayoutSchemes()
+
+  def modifyMastersWithMultipleInstances(self):
+    """
+    Modify the set of masters with multiple instances.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyCardinalitiesDict(self):
+    """
+    Modify the dictionary of cardinalities.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyHeapSizeProperties(self):
+    """
+    Modify the dictionary of heap size properties.
+    Must be overriden in child class.
+    """
+    pass
+
+  def modifyNotValuableComponents(self):
+    """
+    Modify the set of components whose host assignment is based on other services.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentsNotPreferableOnServer(self):
+    """
+    Modify the set of components that are not preferable on the server.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentLayoutSchemes(self):
+    """
+    Modify layout scheme dictionaries for components.
+    The scheme dictionary basically maps the number of hosts to
+    host index where component should exist.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def getServiceComponentLayoutValidations(self, services, hosts):
+    """
+    Get a list of errors.
+    Must be overriden in child class.
+    """
+
+    return []
+
+  def getServiceConfigurationRecommendations(self, configurations, clusterData, services, hosts):
+    """
+    Entry point.
+    Must be overriden in child class.
+    """
+    #Logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    recommender = KnoxRecommender()
+    recommender.recommendKnoxConfigurationsFromHDP22(configurations, clusterData, services, hosts)
+
+
+
+  def getServiceConfigurationsValidationItems(self, configurations, recommendedDefaults, services, hosts):
+    """
+    Entry point.
+    Validate configurations for the service. Return a list of errors.
+    The code for this function should be the same for each Service Advisor.
+    """
+    #Logger.info("Class: %s, Method: %s. Validating Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    validator = KnoxValidator()
+    # Calls the methods of the validator using arguments,
+    # method(siteProperties, siteRecommendations, configurations, services, hosts)
+    return validator.validateListOfConfigUsingMethod(configurations, recommendedDefaults, services, hosts, validator.validators)
+
+
+
+class KnoxRecommender(service_advisor.ServiceAdvisor):
+  """
+  Knox Recommender suggests properties when adding the service for the first time or modifying configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(KnoxRecommender, self)
+    self.as_super.__init__(*args, **kwargs)
+
+
+  def recommendKnoxConfigurationsFromHDP22(self, configurations, clusterData, services, hosts):
+    if "ranger-env" in services["configurations"] and "ranger-knox-plugin-properties" in services["configurations"] and \
+        "ranger-knox-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
+      putKnoxRangerPluginProperty = self.putProperty(configurations, "ranger-knox-plugin-properties", services)
+      rangerEnvKnoxPluginProperty = services["configurations"]["ranger-env"]["properties"]["ranger-knox-plugin-enabled"]
+      putKnoxRangerPluginProperty("ranger-knox-plugin-enabled", rangerEnvKnoxPluginProperty)
+
+    if 'topology' in services["configurations"] and 'content' in services["configurations"]["topology"]["properties"]:
+      putKnoxTopologyContent = self.putProperty(configurations, "topology", services)
+      rangerPluginEnabled = ''
+      if 'ranger-knox-plugin-properties' in configurations and 'ranger-knox-plugin-enabled' in  configurations['ranger-knox-plugin-properties']['properties']:
+        rangerPluginEnabled = configurations['ranger-knox-plugin-properties']['properties']['ranger-knox-plugin-enabled']
+      elif 'ranger-knox-plugin-properties' in services['configurations'] and 'ranger-knox-plugin-enabled' in services['configurations']['ranger-knox-plugin-properties']['properties']:
+        rangerPluginEnabled = services['configurations']['ranger-knox-plugin-properties']['properties']['ranger-knox-plugin-enabled']
+
+      # check if authorization provider already added
+      topologyContent = services["configurations"]["topology"]["properties"]["content"]
+      authorizationProviderExists = False
+      authNameChanged = False
+      root = ET.fromstring(topologyContent)
+      if root is not None:
+        gateway = root.find("gateway")
+        if gateway is not None:
+          for provider in gateway.findall('provider'):
+            role = provider.find('role')
+            if role is not None and role.text and role.text.lower() == "authorization":
+              authorizationProviderExists = True
+
+            name = provider.find('name')
+            if name is not None and name.text == "AclsAuthz" and rangerPluginEnabled \
+               and rangerPluginEnabled.lower() == "Yes".lower():
+              newAuthName = "XASecurePDPKnox"
+              authNameChanged = True
+            elif name is not None and (((not rangerPluginEnabled) or rangerPluginEnabled.lower() != "Yes".lower()) \
+               and name.text == 'XASecurePDPKnox'):
+              newAuthName = "AclsAuthz"
+              authNameChanged = True
+
+            if authNameChanged:
+              name.text = newAuthName
+              putKnoxTopologyContent('content', ET.tostring(root))
+
+            if authorizationProviderExists:
+              break
+
+      if not authorizationProviderExists:
+        if root is not None:
+          gateway = root.find("gateway")
+          if gateway is not None:
+            provider = ET.SubElement(gateway, 'provider')
+
+            role = ET.SubElement(provider, 'role')
+            role.text = "authorization"
+
+            name = ET.SubElement(provider, 'name')
+            if rangerPluginEnabled and rangerPluginEnabled.lower() == "Yes".lower():
+              name.text = "XASecurePDPKnox"
+            else:
+              name.text = "AclsAuthz"
+
+            enabled = ET.SubElement(provider, 'enabled')
+            enabled.text = "true"
+
+            #TODO add pretty format for newly added provider
+            putKnoxTopologyContent('content', ET.tostring(root))
+
+
+
+
+class KnoxValidator(service_advisor.ServiceAdvisor):
+  """
+  Knox Validator checks the correctness of properties whenever the service is first added or the user attempts to
+  change configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(KnoxValidator, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    self.validators = [("ranger-knox-plugin-properties", self.validateKnoxRangerPluginConfigurationsFromHDP22),
+                       ]
+
+  def validateKnoxRangerPluginConfigurationsFromHDP22(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    ranger_plugin_properties = self.getSiteProperties(configurations, "ranger-knox-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-knox-plugin-enabled'] if ranger_plugin_properties else 'No'
+    if 'RANGER' in servicesList and ranger_plugin_enabled.lower() == 'yes':
+      # ranger-hdfs-plugin must be enabled in ranger-env
+      ranger_env = self.getServicesSiteProperties(services, 'ranger-env')
+      if not ranger_env or not 'ranger-knox-plugin-enabled' in ranger_env or \
+          ranger_env['ranger-knox-plugin-enabled'].lower() != 'yes':
+        validationItems.append({"config-name": 'ranger-knox-plugin-enabled',
+                                "item": self.getWarnItem(
+                                  "ranger-knox-plugin-properties/ranger-knox-plugin-enabled must correspond ranger-env/ranger-knox-plugin-enabled")})
+    return self.toConfigurationValidationProblems(validationItems, "ranger-knox-plugin-properties")
+
+
+
+
+
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-ambari-config.xml b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-ambari-config.xml
index 19bbd4a..fce4c10 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-ambari-config.xml
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-ambari-config.xml
@@ -31,6 +31,7 @@
       <show-property-name>false</show-property-name>
       <property-file-name>input.config-ambari.json.j2</property-file-name>
       <property-file-type>text</property-file-type>
+      <visible>false</visible>
     </value-attributes>
     <on-ambari-upgrade add="true"/>
   </property>
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-custom-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-custom-logsearch-conf.xml
deleted file mode 100644
index 30a93a5..0000000
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-custom-logsearch-conf.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_component_mappings</name>
-    <display-name>Service Component mapping</display-name>
-    <description>Log Search service component logid mapping list (e.g.: SERVICE1=S1_COMPONENT1:logid1,logid2;S1_COMPONENT2:logid3|SERVICE2=...)</description>
-    <value></value>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Log Feeder Config</display-name>
-    <description>Metadata jinja template for Log Feeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-properties.xml b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-properties.xml
index 1ff8ad3..a38f961 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-properties.xml
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-properties.xml
@@ -120,4 +120,14 @@
     <display-name>Input cache key field</display-name>
     <on-ambari-upgrade add="true"/>
   </property>
+  <property>
+    <name>logfeeder.include.default.level</name>
+    <value>FATAL,ERROR,WARN</value>
+    <description>Include default Log Feeder Log Levels for Log Search. Used for bootstrapping the configuration only. (levels: FATAL,ERROR,WARN,INFO,DEBUG,TRACE)</description>
+    <display-name>Log Feeder Log Levels</display-name>
+    <value-attributes>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-common-properties.xml b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-common-properties.xml
new file mode 100644
index 0000000..0087028
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-common-properties.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+</configuration>
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-properties.xml b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-properties.xml
index a18c5c5..be586fd 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-properties.xml
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-properties.xml
@@ -130,16 +130,6 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
-    <name>logsearch.logfeeder.include.default.level</name>
-    <value>FATAL,ERROR,WARN</value>
-    <description>Include default Log Feeder Log Levels for Log Search. Used for bootstrapping the configuration only. (levels: FATAL,ERROR,WARN,INFO,DEBUG,TRACE)</description>
-    <display-name>Log Feeder Log Levels</display-name>
-    <value-attributes>
-      <editable-only-at-install>true</editable-only-at-install>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
     <name>logsearch.solr.metrics.collector.hosts</name>
     <value>{metrics_collector_hosts}</value>
     <description>Metrics collector hosts for pushing metrics by Log Search Solr</description>
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/logsearch_config_aggregator.py b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/logsearch_config_aggregator.py
deleted file mode 100644
index c14900e..0000000
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/logsearch_config_aggregator.py
+++ /dev/null
@@ -1,77 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management import Logger
-
-def __parse_component_mappings(component_mappings):
-  components = list()
-  component_mappings_list = component_mappings.split(';')
-  if component_mappings_list and len(component_mappings_list) > 0:
-    metadata_list = map(lambda x : x.split(':'), component_mappings_list)
-    if metadata_list and len(metadata_list) > 0:
-      for metadata in metadata_list:
-        if (len(metadata) == 2):
-          logids = metadata[1].split(',')
-          components.extend(logids)
-          Logger.info("Found logids for logsearch component %s - (%s) " % (metadata[0], metadata[1]))
-  return components
-
-def get_logsearch_meta_configs(configurations):
-  logsearch_meta_configs = {}
-  for key, value in configurations.iteritems():  # iter on both keys and values
-    if str(key).endswith('logsearch-conf'):
-      logsearch_meta_configs[key] = value
-      Logger.info("Found logsearch config entry : " + key)
-  return logsearch_meta_configs
-
-def get_logfeeder_metadata(logsearch_meta_configs):
-  """
-  get logfeeder pattern metadata list, an element: (e.g.) :
-  ['service_config_name' : 'pattern json content']
-  """
-  logfeeder_contents = {}
-  for key, value in logsearch_meta_configs.iteritems():
-    if 'content' in logsearch_meta_configs[key] and logsearch_meta_configs[key]['content'].strip():
-      logfeeder_contents[key] = logsearch_meta_configs[key]['content']
-      Logger.info("Found logfeeder pattern content in " + key)
-  return logfeeder_contents
-
-def get_logsearch_metadata(logsearch_meta_configs):
-  """
-  get logsearch metadata list, an element (e.g.) :
-  ['service_name_key' : {component1 : [logid1, logid2]}, {component2 : [logid1, logid2]}]
-  """
-  logsearch_service_component_mappings = {}
-  for key, value in logsearch_meta_configs.iteritems():
-    if 'service_name' in logsearch_meta_configs[key] and 'component_mappings' in logsearch_meta_configs[key]:
-      service_name = logsearch_meta_configs[key]['service_name']
-      component_mappings = __parse_component_mappings(logsearch_meta_configs[key]['component_mappings'])
-      if service_name.strip() and component_mappings:
-        logsearch_service_component_mappings[service_name] = component_mappings
-    if 'service_component_mappings' in logsearch_meta_configs[key]:
-      service_component_mappings = logsearch_meta_configs[key]['service_component_mappings']
-      if service_component_mappings.strip():
-        for service_component_mapping in service_component_mappings.split('|'):
-          tokens = service_component_mapping.split('=')
-          service_name = tokens[0]
-          component_mappings = __parse_component_mappings(tokens[1])
-          if service_name.strip() and component_mappings:
-            logsearch_service_component_mappings[service_name] = component_mappings
-
-  return logsearch_service_component_mappings
-
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
index d00be4e..6738c5c 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
@@ -21,7 +21,6 @@
 
 import os
 from ambari_commons.constants import AMBARI_SUDO_BINARY
-from logsearch_config_aggregator import get_logfeeder_metadata, get_logsearch_metadata, get_logsearch_meta_configs
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.is_empty import is_empty
@@ -79,9 +78,6 @@
 cluster_name = str(config['clusterName'])
 
 configurations = config['configurations'] # need reference inside logfeeder jinja templates
-logserch_meta_configs = get_logsearch_meta_configs(configurations)
-logsearch_metadata = get_logsearch_metadata(logserch_meta_configs)
-logfeeder_metadata = get_logfeeder_metadata(logserch_meta_configs)
 
 # for now just pick first collector
 if 'metrics_collector_hosts' in config['clusterHostInfo']:
@@ -256,17 +252,11 @@
 
 logsearch_properties['logsearch.protocol'] = logsearch_ui_protocol
 
-logsearch_acls = ''
-if 'infra-solr-env' in config['configurations'] and security_enabled and not logsearch_use_external_solr:
-  acl_infra_solr_principal = get_name_from_principal(config['configurations']['infra-solr-env']['infra_solr_kerberos_principal'])
-  acl_logsearch_principal = get_name_from_principal(config['configurations']['logsearch-env']['logsearch_kerberos_principal'])
-  logsearch_acls = format('world:anyone:r,sasl:{acl_infra_solr_principal}:cdrwa,sasl:{acl_logsearch_principal}:cdrwa')
-  logsearch_properties['logsearch.solr.zk.acls'] = logsearch_acls
-  logsearch_properties['logsearch.solr.audit.logs.zk.acls'] = logsearch_acls
-
 # load config values
 
-logsearch_properties = dict(logsearch_properties.items() + dict(config['configurations']['logsearch-properties']).items())
+logsearch_properties = dict(logsearch_properties.items() +\
+                       dict(config['configurations']['logsearch-common-properties']).items() +\
+                       dict(config['configurations']['logsearch-properties']).items())
 
 # load derivated values
 
@@ -279,6 +269,9 @@
 logsearch_properties['logsearch.auth.external_auth.host_url'] = format(logsearch_properties['logsearch.auth.external_auth.host_url'])
 logsearch_properties['logsearch.spnego.kerberos.host'] = logsearch_spnego_host
 
+if not('logsearch.config.zk_connect_string' in logsearch_properties):
+  logsearch_properties['logsearch.config.zk_connect_string'] = logsearch_solr_zk_quorum
+
 if logsearch_solr_kerberos_enabled:
   logsearch_properties['logsearch.solr.kerberos.enable'] = 'true'
   logsearch_properties['logsearch.solr.jaas.file'] = logsearch_jaas_file
@@ -330,17 +323,7 @@
 logfeeder_ambari_config_content = config['configurations']['logfeeder-ambari-config']['content']
 logfeeder_output_config_content = config['configurations']['logfeeder-output-config']['content']
 
-logfeeder_default_services = ['logsearch']
-logfeeder_default_config_file_names = ['global.config.json'] + ['input.config-%s.json' % (tag) for tag in logfeeder_default_services]
-logfeeder_custom_config_file_names = ['input.config-%s.json' % (tag.replace('-logsearch-conf', ''))
-                                      for tag, content in logfeeder_metadata.iteritems() if any(logfeeder_metadata)]
-
-if logfeeder_system_log_enabled:
-  default_config_files = ','.join(['output.config.json','input.config-ambari.json'] + logfeeder_default_config_file_names + logfeeder_custom_config_file_names
-                                  + ['input.config-system_messages.json', 'input.config-secure_log.json'])
-else:
-  default_config_files = ','.join(['output.config.json','input.config-ambari.json'] + logfeeder_default_config_file_names + logfeeder_custom_config_file_names)
-
+default_config_files = ','.join(['output.config.json','global.config.json'])
 
 logfeeder_grok_patterns = config['configurations']['logfeeder-grok']['default_grok_patterns']
 if config['configurations']['logfeeder-grok']['custom_grok_patterns'].strip():
@@ -363,18 +346,25 @@
 
 # load config values
 
-logfeeder_properties = dict(logfeeder_properties.items() + dict(config['configurations']['logfeeder-properties']).items())
+logfeeder_properties = dict(logfeeder_properties.items() +\
+                       dict(config['configurations']['logsearch-common-properties']).items() +\
+                       dict(config['configurations']['logfeeder-properties']).items())
 
 # load derivated values
 
-logfeeder_properties['logfeeder.metrics.collector.hosts'] = format(logfeeder_properties['logfeeder.metrics.collector.hosts'])
+logfeeder_properties['cluster.name'] = cluster_name
+logfeeder_properties['logfeeder.config.dir'] = logsearch_logfeeder_conf
 logfeeder_properties['logfeeder.config.files'] = format(logfeeder_properties['logfeeder.config.files'])
 logfeeder_properties['logfeeder.solr.zk_connect_string'] = logsearch_solr_zk_quorum + logsearch_solr_zk_znode
 
+logfeeder_properties['logfeeder.metrics.collector.hosts'] = format(logfeeder_properties['logfeeder.metrics.collector.hosts'])
 logfeeder_properties['logfeeder.metrics.collector.protocol'] = metrics_collector_protocol
 logfeeder_properties['logfeeder.metrics.collector.port'] = metrics_collector_port
 logfeeder_properties['logfeeder.metrics.collector.path'] = '/ws/v1/timeline/metrics'
 
+if not('logsearch.config.zk_connect_string' in logfeeder_properties):
+  logfeeder_properties['logsearch.config.zk_connect_string'] = logsearch_solr_zk_quorum
+
 if logsearch_solr_kerberos_enabled:
   if 'logfeeder.solr.kerberos.enable' not in logfeeder_properties:
     logfeeder_properties['logfeeder.solr.kerberos.enable'] = 'true'
@@ -387,11 +377,24 @@
 
 logfeeder_use_ssl = logsearch_solr_ssl_enabled or metrics_collector_protocol == 'https'
 
+
+logsearch_acls = ''
+if 'infra-solr-env' in config['configurations'] and security_enabled and not logsearch_use_external_solr:
+  acl_infra_solr_principal = get_name_from_principal(config['configurations']['infra-solr-env']['infra_solr_kerberos_principal'])
+  acl_logsearch_principal = get_name_from_principal(config['configurations']['logsearch-env']['logsearch_kerberos_principal'])
+  logsearch_acls = format('world:anyone:r,sasl:{acl_infra_solr_principal}:cdrwa,sasl:{acl_logsearch_principal}:cdrwa')
+  logsearch_properties['logsearch.solr.zk.acls'] = logsearch_acls
+  logsearch_properties['logsearch.solr.audit.logs.zk.acls'] = logsearch_acls
+  if not('logsearch.config.zk_acls' in logsearch_properties):
+    logsearch_properties['logsearch.config.zk_acls'] = logsearch_acls
+  if not('logsearch.config.zk_acls' in logfeeder_properties):
+    logfeeder_properties['logsearch.config.zk_acls'] = logsearch_acls
+
 #####################################
 # Smoke command
 #####################################
 
-logsearch_server_hosts = default('/configurations/clusterHostInfo/logsearch_server_hosts', None)
+logsearch_server_hosts = default('/clusterHostInfo/logsearch_server_hosts', None)
 logsearch_server_host = ""
 if logsearch_server_hosts is not None and len(logsearch_server_hosts) > 0:
   logsearch_server_host = logsearch_server_hosts[0]
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/service_check.py
index c81268c..b794036 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/service_check.py
@@ -27,9 +27,12 @@
     env.set_params(params)
 
     try:
-      Execute(params.smoke_logsearch_cmd, user=params.logsearch_user,
-              tries=15, try_sleep=5, timeout=10)
-      Logger.info('Log Search Server up and running')
+      if params.logsearch_server_host:
+        Execute(params.smoke_logsearch_cmd, user=params.logsearch_user,
+                tries=15, try_sleep=5, timeout=10)
+        Logger.info('Log Search Server up and running')
+      else:
+        Logger.info('No portal is installed on the cluster thus no service check is required')
     except:
       Logger.error('Log Search Server not running')
       raise
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logfeeder.py b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logfeeder.py
index e6e55b9..653d604 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logfeeder.py
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logfeeder.py
@@ -99,6 +99,10 @@
        encoding="utf-8"
        )
 
+  File(format("{logsearch_logfeeder_conf}/global.config.json"),
+       content=Template("global.config.json.j2")
+       )
+
   File(format("{logsearch_logfeeder_conf}/input.config-ambari.json"),
        content=InlineTemplate(params.logfeeder_ambari_config_content),
        encoding="utf-8"
@@ -109,17 +113,6 @@
        encoding="utf-8"
        )
 
-  for file_name in params.logfeeder_default_config_file_names:
-    File(format("{logsearch_logfeeder_conf}/" + file_name),
-         content=Template(file_name + ".j2")
-         )
-
-  File(format("{logsearch_logfeeder_conf}/input.config-logfeeder-custom.json"), action='delete')
-  for service, pattern_content in params.logfeeder_metadata.iteritems():
-    File(format("{logsearch_logfeeder_conf}/input.config-" + service.replace('-logsearch-conf', '') + ".json"),
-      content=InlineTemplate(pattern_content, extra_imports=[default])
-    )
-
   if params.logfeeder_system_log_enabled:
     File(format("{logsearch_logfeeder_conf}/input.config-system_messages.json"),
          content=params.logfeeder_system_messages_content
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/templates/HadoopServiceConfig.json.j2 b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/templates/HadoopServiceConfig.json.j2
index efa51fa..293a772 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/templates/HadoopServiceConfig.json.j2
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/templates/HadoopServiceConfig.json.j2
@@ -15,84 +15,491 @@
  # See the License for the specific language governing permissions and
  # limitations under the License.
  #}
- {
-   "service": {
-{% if logsearch_metadata and logsearch_metadata.keys()|length > 0 %}
-{%   for metadata_key, component_list in logsearch_metadata.iteritems() %}
-    "{{ metadata_key.lower() }}": {
-      "label": "{{ metadata_key }}",
+{
+  "service": {
+    "accumulo": {
+      "label": "Accumulo",
       "components": [
-{%     for component in component_list %}
         {
-          "name": "{{ component }}"
-        }{% if not loop.last %},{% endif %}
-
-{%     endfor %}
+          "name": "accumulo_gc"
+        },
+        {
+          "name": "accumulo_master"
+        },
+        {
+          "name": "accumulo_monitor"
+        },
+        {
+          "name": "accumulo_tracer"
+        },
+        {
+          "name": "accumulo_tserver"
+        }
       ],
       "dependencies": [
       ]
-    }
-     ,
-{%   endfor %}
-{% endif %}
-{% if logfeeder_system_log_enabled %}
-     "system" : {
-       "label" : "System",
-       "components" : [
+    },
+    "atlas": {
+      "label": "Atlas",
+      "components": [
+        {
+          "name": "atlas_app"
+        }
+      ],
+      "dependencies": [
+      ]
+    },
+    "ambari": {
+      "label": "Ambari",
+      "components": [
+        {
+          "name": "ambari_agent"
+        },
+        {
+          "name": "ambari_server"
+        },
+        {
+          "name": "ambari_alerts"
+        },
+        {
+          "name": "ambari_audit"
+        },
+        {
+          "name": "ambari_config_changes"
+        },
+        {
+          "name": "ambari_eclipselink"
+        },
+        {
+          "name": "ambari_server_check_database"
+        }
+      ],
+      "dependencies": [
+      ]
+    },
+    "ams": {
+      "label": "AMS",
+      "components": [
+        {
+          "name": "ams_hbase_master"
+        },
+        {
+          "name": "ams_hbase_regionserver"
+        },
+        {
+          "name": "ams_collector"
+        },
+        {
+          "name": "ams_monitor"
+        },
+        {
+          "name": "ams_grafana"
+        }
+      ],
+      "dependencies": [
+      ]
+    },
+    "falcon": {
+      "label": "Falcon",
+      "components": [
+        {
+          "name": "falcon_app"
+        }
+      ],
+      "dependencies": [
+      ]
+    },
+    "flume": {
+      "label": "Flume",
+      "components": [
+        {
+          "name": "flume_handler"
+        }
+      ],
+      "dependencies": [
+      ]
+    },
+    "hbase": {
+      "label": "HBase",
+      "components": [
+        {
+          "name": "hbase_master"
+        },
+        {
+          "name": "hbase_regionserver"
+        },
+        {
+          "name": "hbase_phoenix_server"
+        }
+      ],
+      "dependencies": [
+        {
+          "service": "hdfs",
+          "components": [
+            "hdfs_namenode"
+          ]
+        }
+      ]
+    },
+    "hdfs": {
+      "label": "HDFS",
+      "components": [
+        {
+          "name": "hdfs_datanode"
+        },
+        {
+          "name": "hdfs_namenode"
+        },
+        {
+          "name": "hdfs_journalnode"
+        },
+        {
+          "name": "hdfs_secondarynamenode"
+        },
+        {
+          "name": "hdfs_zkfc"
+        },
+        {
+          "name": "hdfs_nfs3"
+        },
+        {
+          "name": "hdfs_audit",
+          "rowtype": "audit"
+        }
+      ],
+      "dependencies": [
+      ]
+    },
+    "hive": {
+      "label": "Hive",
+      "components": [
+        {
+          "name": "hive_hiveserver2"
+        },
+        {
+          "name": "hive_metastore"
+        },
+        {
+          "name": "webhcat_server"
+        }
+      ],
+      "dependencies": [
+        {
+          "service": "hdfs",
+          "components": [
+            "hdfs_namenode"
+          ]
+        }
+      ]
+    },
+    "infra" : {
+      "label" : "Infra",
+      "components": [
+        {
+          "name": "infra_solr"
+        }
+      ],
+      "dependencies": [
+      ]
+    },
+    "kafka": {
+      "label": "Kafka",
+      "components": [
+        {
+          "name": "kafka_controller"
+        },
+        {
+          "name": "kafka_request"
+        },
+        {
+          "name": "kafka_logcleaner"
+        },
+        {
+          "name": "kafka_server"
+        },
+        {
+          "name": "kafka_statechange"
+        }
+      ],
+      "dependencies": [
+        {
+          "service": "zookeeper",
+          "components": [
+            "zookeeper"
+          ]
+        }
+      ]
+    },
+    "knox": {
+      "label": "Knox",
+      "components": [
+        {
+          "name": "knox_gateway"
+        },
+        {
+          "name": "knox_cli"
+        },
+        {
+          "name": "knox_ldap"
+        }
+      ],
+      "dependencies": [
+      ]
+    },
+    "nifi": {
+      "label": "NiFi",
+      "components": [
+        {
+          "name": "nifi_app"
+        },
+        {
+          "name": "nifi_bootstrap"
+        },
+        {
+          "name": "nifi_setup"
+        },
+        {
+          "name": "nifi_user"
+        }
+      ],
+      "dependencies": [
+      ]
+    },
+    "mapred": {
+      "label": "MapReduce",
+      "components": [
+        {
+          "name": "mapred_historyserver"
+        }
+      ],
+      "dependencies": [
+      ]
+    },
+    "logsearch": {
+      "label": "Logsearch",
+      "components": [
+        {
+          "name": "logsearch_app"
+        },
+        {
+          "name": "logsearch_feeder"
+        },
+        {
+          "name": "logsearch_perf"
+        },
+        {
+          "name": "logsearch_solr"
+        }
+      ],
+      "dependencies": [
+      ]
+    },
+    "ranger": {
+      "label": "Ranger",
+      "components": [
+        {
+          "name": "ranger_admin"
+        },
+        {
+          "name": "ranger_dbpatch"
+        },
+        {
+          "name": "ranger_kms"
+        },
+        {
+          "name": "ranger_usersync"
+        }
+      ],
+      "dependencies": [
+        {
+          "service": "hdfs",
+          "required": "optional",
+          "components": [
+            "hdfs_namenode"
+          ]
+        },
+        {
+          "service": "hbase",
+          "required": "optional",
+          "components": [
+            "hbase_master",
+            "hbase_regionserver"
+          ]
+        },
+        {
+          "service": "hive",
+          "required": "optional",
+          "components": [
+            "hive_hiveserver2"
+          ]
+        },
+        {
+          "service": "kafka",
+          "required": "optional",
+          "components": [
+            "kafka_ranger"
+          ]
+        },
+        {
+          "service": "knox",
+          "required": "optional",
+          "components": [
+            "knox_gateway"
+          ]
+        },
+        {
+          "service": "storm",
+          "required": "optional",
+          "components": [
+            "storm_supervisor"
+          ]
+        },
+        {
+          "service": "yarn",
+          "required": "optional",
+          "components": [
+            "yarn_resourcemanager"
+          ]
+        }
+      ]
+    },
+    "oozie": {
+      "label": "Oozie",
+      "components": [
+        {
+          "name": "oozie_app"
+        }
+      ],
+      "dependencies": [
+      ]
+    },
+    "hst": {
+      "label": "SmartSense",
+      "components": [
+        {
+          "name": "hst_server"
+        },
+        {
+          "name": "hst_agent"
+        },
+        {
+          "name": "activity_analyzer"
+        },
+        {
+          "name": "activity_explorer"
+        }
+      ],
+      "dependencies": [
+      ]
+    },
+    "spark": {
+      "label": "Spark",
+      "components": [
+        {
+          "name": "spark_jobhistory_server"
+        },
+        {
+          "name": "spark_thriftserver"
+        },
+        {
+          "name": "livy_server"
+        }
+      ],
+      "dependencies": [
+      ]
+    },
+    "spark2": {
+      "label": "Spark 2",
+      "components": [
+        {
+          "name": "spark2_jobhistory_server"
+        },
+        {
+          "name": "spark2_thriftserver"
+        }
+      ],
+      "dependencies": [
+      ]
+    },
+    "storm": {
+      "label": "Storm",
+      "components": [
+        {
+          "name": "storm_drpc"
+        },
+        {
+          "name": "storm_logviewer"
+        },
+        {
+          "name": "storm_nimbus"
+        },
+        {
+          "name": "storm_supervisor"
+        },
+        {
+          "name": "storm_ui"
+        },
+        {
+          "name": "storm_worker"
+        }
+      ],
+      "dependencies": [
+      ]
+    },
+    "yarn": {
+      "label": "YARN",
+      "components": [
+        {
+          "name": "yarn_nodemanager"
+        },
+        {
+          "name": "yarn_resourcemanager"
+        },
+        {
+          "name": "yarn_timelineserver"
+        },
+        {
+          "name": "yarn_historyserver"
+        },
+        {
+          "name": "yarn_jobsummary"
+        }
+      ],
+      "dependencies": [
+      ]
+    },
+    "zeppelin": {
+      "label": "Zeppelin",
+      "components": [
+        {
+          "name": "zeppelin"
+        }
+      ],
+      "dependencies": [
+      ]
+    },
+    "zookeeper": {
+      "label": "ZooKeeper",
+      "components": [
+        {
+          "name": "zookeeper"
+        }
+      ],
+      "dependencies": [
+      ]
+    },
+    "System": {
+      "label": "System",
+      "components": [
         {
           "name": "system_message"
         },
         {
           "name": "secure_log"
         }
-       ],
-       "dependencies": [
-       ]
-     },
-{% endif %}
-     "ambari": {
-       "label": "Ambari",
-       "components": [
-         {
-           "name": "ambari_agent"
-         },
-         {
-           "name": "ambari_server"
-         },
-         {
-           "name": "ambari_alerts"
-         },
-         {
-           "name": "ambari_audit"
-         },
-         {
-           "name": "ambari_config_changes"
-         },
-         {
-           "name": "ambari_eclipselink"
-         },
-         {
-           "name": "ambari_server_check_database"
-         }
-       ],
-       "dependencies": [
-       ]
-     },
-     "logsearch": {
-       "label": "Logsearch",
-       "components": [
-       {
-         "name": "logsearch_app"
-       },
-       {
-         "name": "logsearch_feeder"
-       },
-       {
-         "name": "logsearch_perf"
-       }
-       ],
-       "dependencies": [
-       ]
-      }
+      ],
+      "dependencies": [
+      ]
+    }
   }
-}
\ No newline at end of file
+}
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/templates/input.config-logsearch.json.j2 b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/templates/input.config-logsearch.json.j2
index ea91405..25bfa63 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/templates/input.config-logsearch.json.j2
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/templates/input.config-logsearch.json.j2
@@ -20,17 +20,17 @@
     {
       "type":"logsearch_app",
       "rowtype":"service",
-      "path":"{{logsearch_log_dir}}/logsearch.json"
+      "path":"{{default('/configurations/logsearch-env/logsearch_log_dir', '/var/log/ambari-logsearch-portal')}}/logsearch.json"
     },
     {
       "type":"logsearch_feeder",
       "rowtype":"service",
-      "path":"{{logfeeder_log_dir}}/logsearch-logfeeder.json"
+      "path":"{{default('/configurations/logfeeder-env/logfeeder_log_dir', '/var/log/ambari-logsearch-logfeeder')}}/logsearch-logfeeder.json"
     },
     {
       "type":"logsearch_perf",
       "rowtype":"service",
-      "path":"{{logsearch_log_dir}}/logsearch-performance.json"
+      "path":"{{default('/configurations/logfeeder-env/logfeeder_log_dir', '/var/log/ambari-logsearch-logfeeder')}}/logsearch-performance.json"
     }
   ],
   "filter":[
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/themes/theme.json b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/themes/theme.json
index 0adcbde..d36d89c 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/themes/theme.json
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/themes/theme.json
@@ -87,7 +87,7 @@
       "configuration-layout": "default",
       "configs": [
         {
-          "config" : "logsearch-properties/logsearch.logfeeder.include.default.level",
+          "config" : "logfeeder-properties/logfeeder.include.default.level",
           "subsection-name": "subsection-logsearch-server-col1"
         },
         {
@@ -353,7 +353,7 @@
         }
       },
       {
-        "config": "logsearch-properties/logsearch.logfeeder.include.default.level",
+        "config": "logfeeder-properties/logfeeder.include.default.level",
         "widget": {
           "type": "text-field"
         }
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/configuration/oozie-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/configuration/oozie-logsearch-conf.xml
deleted file mode 100644
index 2f13d3f..0000000
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/configuration/oozie-logsearch-conf.xml
+++ /dev/null
@@ -1,80 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>Oozie</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>OOZIE_SERVER:oozie_app</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder Config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-  "input":[
-    {
-      "type":"oozie_app",
-      "rowtype":"service",
-      "path":"{{default('/configurations/oozie-env/oozie_log_dir', '/var/log/oozie')}}/oozie.log"
-    }
-   ],
-  "filter":[
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "oozie_app"
-          ]
-         }
-       },
-      "log4j_format":"%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{DATA:logger_name}:%{INT:line_number}%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       }
-     }
-   ]
- }
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
index a8b2cf4..9320bc3 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
@@ -105,69 +105,6 @@
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class OozieServerDefault(OozieServer):
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-
-    if status_params.security_enabled:
-      expectations = {
-        "oozie-site":
-          build_expectations('oozie-site',
-                             {
-                               "oozie.authentication.type": "kerberos",
-                               "oozie.service.AuthorizationService.security.enabled": "true",
-                               "oozie.service.HadoopAccessorService.kerberos.enabled": "true"
-                             },
-                             [
-                               "local.realm",
-                               "oozie.authentication.kerberos.principal",
-                               "oozie.authentication.kerberos.keytab",
-                               "oozie.service.HadoopAccessorService.kerberos.principal",
-                               "oozie.service.HadoopAccessorService.keytab.file"
-                             ],
-                             None)
-      }
-
-      security_params = get_params_from_filesystem(status_params.conf_dir,
-                                                   {'oozie-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ('oozie-site' not in security_params
-              or 'oozie.authentication.kerberos.principal' not in security_params['oozie-site']
-              or 'oozie.authentication.kerberos.keytab' not in security_params['oozie-site']
-              or 'oozie.service.HadoopAccessorService.kerberos.principal' not in security_params['oozie-site']
-              or 'oozie.service.HadoopAccessorService.keytab.file' not in security_params['oozie-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.oozie_user,
-                                security_params['oozie-site']['oozie.authentication.kerberos.keytab'],
-                                security_params['oozie-site']['oozie.authentication.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.oozie_user,
-                                security_params['oozie-site']['oozie.service.HadoopAccessorService.keytab.file'],
-                                security_params['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def pre_upgrade_restart(self, env, upgrade_type=None):
     """
     Performs the tasks that should be done before an upgrade of oozie. This includes:
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/templates/input.config-oozie.json.j2 b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/templates/input.config-oozie.json.j2
new file mode 100644
index 0000000..4a54f74
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/templates/input.config-oozie.json.j2
@@ -0,0 +1,48 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"oozie_app",
+      "rowtype":"service",
+      "path":"{{default('/configurations/oozie-env/oozie_log_dir', '/var/log/oozie')}}/oozie.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "oozie_app"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{DATA:logger_name}:%{INT:line_number}%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/alerts.json b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/alerts.json
new file mode 100644
index 0000000..a1d267f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/alerts.json
@@ -0,0 +1,45 @@
+{
+  "OOZIE": {
+    "service": [],
+    "OOZIE_SERVER": [
+      {
+        "name": "oozie_server_webui",
+        "label": "Oozie Server Web UI",
+        "description": "This host-level alert is triggered if the Oozie server Web UI is unreachable.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{oozie-site/oozie.base.url}}/?user.name={{oozie-env/oozie_user}}",
+            "kerberos_keytab": "{{cluster-env/smokeuser_keytab}}",
+            "kerberos_principal": "{{cluster-env/smokeuser_principal_name}}",
+            "connection_timeout": 5.0
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning":{
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      },
+      {
+        "name": "oozie_server_status",
+        "label": "Oozie Server Status",
+        "description": "This host-level alert is triggered if the Oozie server cannot be determined to be up and responding to client requests.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "SCRIPT",
+          "path": "OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py"
+        }
+      }
+    ]
+  }
+}
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/configuration/oozie-env.xml b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/configuration/oozie-env.xml
new file mode 100644
index 0000000..0f67356
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/configuration/oozie-env.xml
@@ -0,0 +1,255 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>oozie_user</name>
+    <display-name>Oozie User</display-name>
+    <value>oozie</value>
+    <property-type>USER</property-type>
+    <description>Oozie User.</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie_admin_users</name>
+    <value>{oozie_user}, oozie-admin</value>
+    <description>Oozie admin users.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>oozie_data_dir</name>
+    <value>/hadoop/oozie/data</value>
+    <display-name>Oozie Data Dir</display-name>
+    <description>Data directory in which the Oozie DB exists</description>
+    <value-attributes>
+      <type>directory</type>
+      <empty-value-valid>true</empty-value-valid>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie_log_dir</name>
+    <value>/var/log/oozie</value>
+    <display-name>Oozie Log Dir</display-name>
+    <description>Directory for oozie logs</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie_tmp_dir</name>
+    <value>/var/tmp/oozie</value>
+    <display-name>Oozie Tmp Dir</display-name>
+    <description>Directory for oozie temporary files</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie_pid_dir</name>
+    <value>/var/run/oozie</value>
+    <display-name>Oozie PID Dir</display-name>
+    <description>Directory in which the pid files for oozie reside.</description>
+    <value-attributes>
+      <type>directory</type>
+      <editable-only-at-install>true</editable-only-at-install>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie_admin_port</name>
+    <value>11001</value>
+    <display-name>Oozie Server Admin Port</display-name>
+    <description>The admin port Oozie server runs.</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie_heapsize</name>
+    <value>2048</value>
+    <description>Oozie heap size.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie_permsize</name>
+    <value>256</value>
+    <description>Oozie permanent generation size.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie_user_nofile_limit</name>
+    <value>32000</value>
+    <description>Max open files limit setting for OOZIE user.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie_user_nproc_limit</name>
+    <value>16000</value>
+    <description>Max number of processes limit setting for OOZIE user.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <!-- oozie-env.sh -->
+
+  <property>
+    <name>service_check_job_name</name>
+    <value>no-op</value>
+    <description>
+      Job name from Oozie examples that will be executed at each Oozie service check action.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+
+
+  <property>
+    <name>content</name>
+    <display-name>oozie-env template</display-name>
+    <description>This is the jinja template for oozie-env.sh file</description>
+    <value>
+#!/bin/bash
+
+if [ -d "/usr/lib/bigtop-tomcat" ]; then
+  export OOZIE_CONFIG=${OOZIE_CONFIG:-{{conf_dir}}}
+  export CATALINA_BASE=${CATALINA_BASE:-{{oozie_server_dir}}}
+  export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}
+  export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat
+fi
+
+#Set JAVA HOME
+export JAVA_HOME={{java_home}}
+
+export JRE_HOME=${JAVA_HOME}
+
+# Set Oozie specific environment variables here.
+
+# Settings for the Embedded Tomcat that runs Oozie
+# Java System properties for Oozie should be specified in this variable
+#
+{% if java_version &lt; 8 %}
+export CATALINA_OPTS="$CATALINA_OPTS -Xmx{{oozie_heapsize}} -XX:MaxPermSize={{oozie_permsize}}"
+{% else %}
+export CATALINA_OPTS="$CATALINA_OPTS -Xmx{{oozie_heapsize}}"
+{% endif %}
+# Oozie configuration file to load from Oozie configuration directory
+#
+# export OOZIE_CONFIG_FILE=oozie-site.xml
+
+# Oozie logs directory
+#
+export OOZIE_LOG={{oozie_log_dir}}
+
+# Oozie pid directory
+#
+export CATALINA_PID={{pid_file}}
+
+#Location of the data for oozie
+export OOZIE_DATA={{oozie_data_dir}}
+
+# Oozie Log4J configuration file to load from Oozie configuration directory
+#
+# export OOZIE_LOG4J_FILE=oozie-log4j.properties
+
+# Reload interval of the Log4J configuration file, in seconds
+#
+# export OOZIE_LOG4J_RELOAD=10
+
+# The port Oozie server runs
+#
+export OOZIE_HTTP_PORT={{oozie_server_port}}
+
+# The admin port Oozie server runs
+#
+export OOZIE_ADMIN_PORT={{oozie_server_admin_port}}
+
+# The host name Oozie server runs on
+#
+# export OOZIE_HTTP_HOSTNAME=`hostname -f`
+
+# The base URL for callback URLs to Oozie
+#
+# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
+export JAVA_LIBRARY_PATH={{hadoop_lib_home}}/native/Linux-{{architecture}}-64
+
+# At least 1 minute of retry time to account for server downtime during
+# upgrade/downgrade
+export OOZIE_CLIENT_OPTS="${OOZIE_CLIENT_OPTS} -Doozie.connection.retry.count=5 "
+
+{% if sqla_db_used or lib_dir_available %}
+export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{{jdbc_libs_dir}}"
+export JAVA_LIBRARY_PATH="$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}"
+{% endif %}
+
+# Set Hadoop-related properties
+export HADOOP_OPTS="-Dhdp.version=${HDP_VERSION} ${HADOOP_OPTS}"
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie_database</name>
+    <value>New Derby Database</value>
+    <display-name>Oozie Database</display-name>
+    <description>Oozie Server Database.</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>New Derby Database</value>
+          <label>New Derby</label>
+        </entry>
+        <entry>
+          <value>Existing MySQL / MariaDB Database</value>
+          <label>Existing MySQL / MariaDB</label>
+        </entry>
+        <entry>
+          <value>Existing PostgreSQL Database</value>
+          <label>Existing PostgreSQL</label>
+        </entry>
+        <entry>
+          <value>Existing Oracle Database</value>
+          <label>Existing Oracle</label>
+        </entry>
+        <entry>
+          <value>Existing SQL Anywhere Database</value>
+          <label>Existing SQL Anywhere</label>
+        </entry>
+      </entries>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/configuration/oozie-log4j.xml b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/configuration/oozie-log4j.xml
new file mode 100644
index 0000000..005cc0e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/configuration/oozie-log4j.xml
@@ -0,0 +1,149 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="false">
+
+  <property>
+    <name>oozie_log_maxhistory</name>
+    <value>720</value>
+    <description>The number of hours for which log files will be retained</description>
+    <display-name>Oozie Log: # Hours of Log Rentention</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>oozie-log4j template</display-name>
+    <description>Custom log4j.properties</description>
+    <value>
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time
+# XLogService sets its value to '${oozie.home}/logs'
+
+# The appender that Oozie uses must be named 'oozie' (i.e. log4j.appender.oozie)
+
+# Using the RollingFileAppender with the OozieRollingPolicy will roll the log file every hour and retain up to MaxHistory number of
+# log files. If FileNamePattern ends with ".gz" it will create gzip files.
+log4j.appender.oozie=org.apache.log4j.rolling.RollingFileAppender
+log4j.appender.oozie.RollingPolicy=org.apache.oozie.util.OozieRollingPolicy
+log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
+log4j.appender.oozie.Append=true
+log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n
+# The FileNamePattern must end with "-%d{yyyy-MM-dd-HH}.gz" or "-%d{yyyy-MM-dd-HH}" and also start with the
+# value of log4j.appender.oozie.File
+log4j.appender.oozie.RollingPolicy.FileNamePattern=${log4j.appender.oozie.File}-%d{yyyy-MM-dd-HH}
+# The MaxHistory controls how many log files will be retained (720 hours / 24 hours per day = 30 days); -1 to disable
+log4j.appender.oozie.RollingPolicy.MaxHistory={{oozie_log_maxhistory}}
+
+
+
+log4j.appender.oozieError=org.apache.log4j.rolling.RollingFileAppender
+log4j.appender.oozieError.RollingPolicy=org.apache.oozie.util.OozieRollingPolicy
+log4j.appender.oozieError.File=${oozie.log.dir}/oozie-error.log
+log4j.appender.oozieError.Append=true
+log4j.appender.oozieError.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieError.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n
+# The FileNamePattern must end with "-%d{yyyy-MM-dd-HH}.gz" or "-%d{yyyy-MM-dd-HH}" and also start with the
+# value of log4j.appender.oozieError.File
+log4j.appender.oozieError.RollingPolicy.FileNamePattern=${log4j.appender.oozieError.File}-%d{yyyy-MM-dd-HH}
+# The MaxHistory controls how many log files will be retained (720 hours / 24 hours per day = 30 days); -1 to disable
+log4j.appender.oozieError.RollingPolicy.MaxHistory=720
+log4j.appender.oozieError.filter.1 = org.apache.log4j.varia.LevelMatchFilter
+log4j.appender.oozieError.filter.1.levelToMatch = WARN
+log4j.appender.oozieError.filter.2 = org.apache.log4j.varia.LevelMatchFilter
+log4j.appender.oozieError.filter.2.levelToMatch = ERROR
+log4j.appender.oozieError.filter.3 = org.apache.log4j.varia.LevelMatchFilter
+log4j.appender.oozieError.filter.3.levelToMatch = FATAL
+log4j.appender.oozieError.filter.4 = org.apache.log4j.varia.DenyAllFilter
+
+
+
+# Uncomment the below two lines to use the DailyRollingFileAppender instead
+# The DatePattern must end with either "dd" or "HH"
+#log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
+#log4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH
+
+log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
+log4j.appender.oozieops.Append=true
+log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
+log4j.appender.oozieinstrumentation.Append=true
+log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
+log4j.appender.oozieaudit.Append=true
+log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
+log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
+log4j.appender.openjpa.Append=true
+log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
+log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.logger.openjpa=INFO, openjpa
+log4j.logger.oozieops=INFO, oozieops
+log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
+log4j.logger.oozieaudit=ALL, oozieaudit
+log4j.logger.org.apache.oozie=INFO, oozie, oozieError
+log4j.logger.org.apache.hadoop=WARN, oozie
+log4j.logger.org.mortbay=WARN, oozie
+log4j.logger.org.hsqldb=WARN, oozie
+log4j.logger.org.apache.hadoop.security.authentication.server=WARN, oozie
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/configuration/oozie-site.xml b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/configuration/oozie-site.xml
new file mode 100644
index 0000000..f68369a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/configuration/oozie-site.xml
@@ -0,0 +1,254 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+        
+       http://www.apache.org/licenses/LICENSE-2.0
+  
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration supports_final="true">
+  <!--
+      Refer to the oozie-default.xml file for the complete list of
+      Oozie configuration properties and their default values.
+  -->
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  <property>
+    <name>oozie.authentication.simple.anonymous.allowed</name>
+    <value>true</value>
+    <description>
+      Indicates if anonymous requests are allowed.
+      This setting is meaningful only when using 'simple' authentication.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>oozie.authentication.kerberos.name.rules</name>
+    <description>The mapping from kerberos principal names to local OS user names.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.authentication.type</name>
+    <value>simple</value>
+    <description>
+      Authentication used for Oozie HTTP endpoint, the supported values are: simple | kerberos |
+      #AUTHENTICATION_HANDLER_CLASSNAME#.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.base.url</name>
+    <value>http://localhost:11000/oozie</value>
+    <description>Base Oozie URL.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.credentials.credentialclasses</name>
+    <value>hcat=org.apache.oozie.action.hadoop.HCatCredentials,hive2=org.apache.oozie.action.hadoop.Hive2Credentials</value>
+    <description>
+      Credential Class to be used for HCat.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
+    <value>*={{hadoop_conf_dir}}</value>
+    <description>
+      Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
+      the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
+      used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
+      the relevant Hadoop *-site.xml files. If the path is relative is looked within
+      the Oozie configuration directory; though the path can be absolute (i.e. to point
+      to Hadoop client conf/ directories in the local filesystem.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.service.HadoopAccessorService.kerberos.enabled</name>
+    <value>false</value>
+    <description>
+      Indicates if Oozie is configured to use Kerberos.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.service.URIHandlerService.uri.handlers</name>
+    <value>org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler
+    </value>
+    <description>
+      Enlist the different uri handlers supported for data availability checks.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.services.ext</name>
+    <value>
+      org.apache.oozie.service.JMSAccessorService,org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService
+    </value>
+    <description>
+      To add/replace services defined in 'oozie.services' with custom implementations.
+      Class names must be separated by commas.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.db.schema.name</name>
+    <value>oozie</value>
+    <display-name>Database Name</display-name>
+    <description>
+      Oozie DataBase Name
+    </description>
+    <value-attributes>
+      <type>database</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.service.JPAService.jdbc.username</name>
+    <value>oozie</value>
+    <display-name>Database Username</display-name>
+    <description>
+      Database user name to use to connect to the database
+    </description>
+    <value-attributes>
+      <type>db_user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property require-input="true">
+    <name>oozie.service.JPAService.jdbc.password</name>
+    <value/>
+    <display-name>Database Password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>
+      DB user password.
+
+      IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
+      if empty Configuration assumes it is NULL.
+    </description>
+    <value-attributes>
+      <type>password</type>
+      <overridable>false</overridable>
+      <keystore>true</keystore>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.service.JPAService.jdbc.driver</name>
+    <value>org.apache.derby.jdbc.EmbeddedDriver</value>
+    <display-name>JDBC Driver Class</display-name>
+    <description>
+      JDBC driver class.
+    </description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>oozie-env</type>
+        <name>oozie_database</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.service.JPAService.jdbc.url</name>
+    <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
+    <display-name>Database URL</display-name>
+    <description>
+      JDBC URL.
+    </description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>oozie-env</type>
+        <name>oozie_database</name>
+      </property>
+      <property>
+        <type>oozie-site</type>
+        <name>oozie.db.schema.name</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.service.AuthorizationService.security.enabled</name>
+    <value>true</value>
+    <description>
+      Specifies whether security (user name/admin role) is enabled or not.
+      If disabled any user can manage Oozie system and manage any job.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.authentication.simple.anonymous.allowed</name>
+    <value>true</value>
+    <description>
+      Indicates if anonymous requests are allowed when using 'simple' authentication.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.service.SparkConfigurationService.spark.configurations</name>
+    <value>*={{spark_conf_dir}}</value>
+    <description>
+      Comma separated AUTHORITY=SPARK_CONF_DIR, where AUTHORITY is the
+      HOST:PORT of the ResourceManager of a YARN cluster. The wildcard '*'
+      configuration is used when there is no exact match for an authority.
+      The SPARK_CONF_DIR contains the relevant spark-defaults.conf properties
+      file. If the path is relative is looked within the Oozie configuration
+      directory; though the path can be absolute.  This is only used when the
+      Spark master is set to either "yarn-client" or "yarn-cluster".
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>oozie.action.retry.interval</name>
+    <value>30</value>
+    <description>
+      The interval between retries of an action in case of failure
+    </description>
+    <value-attributes>
+      <type>custom</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/kerberos.json b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/kerberos.json
new file mode 100644
index 0000000..f1092f5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/kerberos.json
@@ -0,0 +1,70 @@
+{
+  "services": [
+    {
+      "name": "OOZIE",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "auth_to_local_properties" : [
+        "oozie-site/oozie.authentication.kerberos.name.rules"
+      ],
+      "configurations": [
+        {
+          "oozie-site": {
+            "oozie.authentication.type": "kerberos",
+            "oozie.service.AuthorizationService.authorization.enabled": "true",
+            "oozie.service.HadoopAccessorService.kerberos.enabled": "true",
+            "local.realm": "${realm}",
+            "oozie.credentials.credentialclasses": "hcat=org.apache.oozie.action.hadoop.HCatCredentials,hive2=org.apache.oozie.action.hadoop.Hive2Credentials",
+            "oozie.zookeeper.secure" : "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "OOZIE_SERVER",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "oozie_server",
+              "principal": {
+                "value": "oozie/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "oozie-site/oozie.service.HadoopAccessorService.kerberos.principal",
+                "local_username" : "${oozie-env/oozie_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/oozie.service.keytab",
+                "owner": {
+                  "name": "${oozie-env/oozie_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "oozie-site/oozie.service.HadoopAccessorService.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "oozie-site/oozie.authentication.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "oozie-site/oozie.authentication.kerberos.keytab"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/metainfo.xml
new file mode 100644
index 0000000..d351cbe
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/metainfo.xml
@@ -0,0 +1,203 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>OOZIE</name>
+      <credential-store>
+        <supported>true</supported>
+        <enabled>true</enabled>
+      </credential-store>
+      <displayName>Oozie</displayName>
+      <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/legal/open-source-faq/"&gt;ExtJS&lt;/a&gt; Library.
+      </comment>
+      <version>4.2.0.3.0</version>
+      <components>
+        <component>
+          <name>OOZIE_SERVER</name>
+          <displayName>Oozie Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <reassignAllowed>true</reassignAllowed>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/oozie_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1800</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>oozie_app</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+
+        <component>
+          <name>OOZIE_CLIENT</name>
+          <displayName>Oozie Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/oozie_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>oozie-site.xml</fileName>
+              <dictionaryName>oozie-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>oozie-env.sh</fileName>
+              <dictionaryName>oozie-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>oozie-log4j.properties</fileName>
+              <dictionaryName>oozie-log4j</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>zip</name>
+            </package>
+            <package>
+              <name>mysql-connector-java</name>
+              <skipUpgrade>true</skipUpgrade>
+              <condition>should_install_mysql_connector</condition>
+            </package>
+            <package>
+              <name>extjs</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat5,amazon2015,redhat6,redhat7,suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>oozie_${stack_version}</name>
+            </package>
+            <package>
+              <name>falcon_${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+          <packages>
+            <package>
+              <name>oozie-${stack_version}</name>
+            </package>
+            <package>
+              <name>falcon-${stack_version}</name>
+            </package>
+            <package>
+              <name>extjs</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+
+      <requiredServices>
+        <service>YARN</service>
+      </requiredServices>
+
+       <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+      <configuration-dependencies>
+        <config-type>oozie-site</config-type>
+        <config-type>oozie-env</config-type>
+        <config-type>oozie-log4j</config-type>
+        <config-type>yarn-site</config-type>
+        <config-type>hive-site</config-type>
+        <config-type>tez-site</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>core-site</config-type>
+        <config-type>application-properties</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/alerts/alert_check_oozie_server.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/alerts/alert_check_oozie_server.py
new file mode 100644
index 0000000..0e9fe74
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/alerts/alert_check_oozie_server.py
@@ -0,0 +1,244 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+import os
+import re
+
+from resource_management.core import global_lock
+from resource_management.core.environment import Environment
+from resource_management.core.resources import Execute
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import get_klist_path
+from ambari_commons.os_check import OSConst, OSCheck
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from urlparse import urlparse
+
+STACK_ROOT_PATTERN = "{{ stack_root }}"
+RESULT_CODE_OK = 'OK'
+RESULT_CODE_CRITICAL = 'CRITICAL'
+RESULT_CODE_UNKNOWN = 'UNKNOWN'
+
+if OSCheck.is_windows_family():
+  OOZIE_ENV_HTTPS_RE = r"set\s+OOZIE_HTTPS_PORT=(\d+)"
+else:
+  OOZIE_ENV_HTTPS_RE = r"export\s+OOZIE_HTTPS_PORT=(\d+)"
+
+# The configured Kerberos executable search paths, if any
+KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
+
+OOZIE_URL_KEY = '{{oozie-site/oozie.base.url}}'
+SECURITY_ENABLED = '{{cluster-env/security_enabled}}'
+OOZIE_USER = '{{oozie-env/oozie_user}}'
+OOZIE_CONF_DIR = "{0}/current/oozie-server/conf".format(STACK_ROOT_PATTERN)
+OOZIE_CONF_DIR_LEGACY = '/etc/oozie/conf'
+OOZIE_HTTPS_PORT = '{{oozie-site/oozie.https.port}}'
+OOZIE_ENV_CONTENT = '{{oozie-env/content}}'
+
+USER_KEYTAB_KEY = '{{oozie-site/oozie.service.HadoopAccessorService.keytab.file}}'
+USER_PRINCIPAL_KEY = '{{oozie-site/oozie.service.HadoopAccessorService.kerberos.principal}}'
+USER_KEY = '{{oozie-env/oozie_user}}'
+
+# default keytab location
+USER_KEYTAB_SCRIPT_PARAM_KEY = 'default.oozie.keytab'
+USER_KEYTAB_DEFAULT = '/etc/security/keytabs/oozie.headless.keytab'
+
+# default user principal
+USER_PRINCIPAL_SCRIPT_PARAM_KEY = 'default.oozie.principal'
+USER_PRINCIPAL_DEFAULT = 'oozie@EXAMPLE.COM'
+
+# default user
+USER_DEFAULT = 'oozie'
+
+STACK_ROOT_KEY = '{{cluster-env/stack_root}}'
+STACK_ROOT_DEFAULT = '/usr/hdp'
+
+class KerberosPropertiesNotFound(Exception): pass
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (OOZIE_URL_KEY,)
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (OOZIE_URL_KEY, USER_PRINCIPAL_KEY, SECURITY_ENABLED, USER_KEYTAB_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY,
+          USER_KEY, OOZIE_HTTPS_PORT, OOZIE_ENV_CONTENT, STACK_ROOT_KEY)
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def get_check_command(oozie_url, host_name, configurations):
+  from resource_management.libraries.functions import reload_windows_env
+  reload_windows_env()
+  oozie_home = os.environ['OOZIE_HOME']
+  oozie_cmd = os.path.join(oozie_home, 'bin', 'oozie.cmd')
+  command = format("cmd /c {oozie_cmd} admin -oozie {oozie_url} -status")
+  return (command, None, None)
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def get_check_command(oozie_url, host_name, configurations, parameters, only_kinit):
+  kerberos_env = None
+
+  user = USER_DEFAULT
+  if USER_KEY in configurations:
+    user = configurations[USER_KEY]
+
+  if is_security_enabled(configurations):
+    # defaults
+    user_keytab = USER_KEYTAB_DEFAULT
+    user_principal = USER_PRINCIPAL_DEFAULT
+
+    # check script params
+    if USER_PRINCIPAL_SCRIPT_PARAM_KEY in parameters:
+      user_principal = parameters[USER_PRINCIPAL_SCRIPT_PARAM_KEY]
+      user_principal = user_principal.replace('_HOST', host_name.lower())
+    if USER_KEYTAB_SCRIPT_PARAM_KEY in parameters:
+      user_keytab = parameters[USER_KEYTAB_SCRIPT_PARAM_KEY]
+
+    # check configurations last as they should always take precedence
+    if USER_PRINCIPAL_KEY in configurations:
+      user_principal = configurations[USER_PRINCIPAL_KEY]
+      user_principal = user_principal.replace('_HOST', host_name.lower())
+    if USER_KEYTAB_KEY in configurations:
+      user_keytab = configurations[USER_KEYTAB_KEY]
+
+    # Create the kerberos credentials cache (ccache) file and set it in the environment to use
+    # when executing curl
+    env = Environment.get_instance()
+    ccache_file = "{0}{1}oozie_alert_cc_{2}".format(env.tmp_dir, os.sep, os.getpid())
+    kerberos_env = {'KRB5CCNAME': ccache_file}
+
+    # Get the configured Kerberos executable search paths, if any
+    kerberos_executable_search_paths = None
+    if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
+      kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
+
+    klist_path_local = get_klist_path(kerberos_executable_search_paths)
+    kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
+    kinit_part_command = format("{kinit_path_local} -l 5m20s -c {ccache_file} -kt {user_keytab} {user_principal}; ")
+
+    # Determine if we need to kinit by testing to see if the relevant cache exists and has
+    # non-expired tickets.  Tickets are marked to expire after 5 minutes to help reduce the number
+    # it kinits we do but recover quickly when keytabs are regenerated
+
+    if only_kinit:
+      kinit_command = kinit_part_command
+    else:
+      kinit_command = "{0} -s {1} || ".format(klist_path_local, ccache_file) + kinit_part_command
+
+    # prevent concurrent kinit
+    kinit_lock = global_lock.get_lock(global_lock.LOCK_TYPE_KERBEROS)
+    kinit_lock.acquire()
+    try:
+      Execute(kinit_command, environment=kerberos_env, user=user)
+    finally:
+      kinit_lock.release()
+
+  # Configure stack root
+  stack_root = STACK_ROOT_DEFAULT
+  if STACK_ROOT_KEY in configurations:
+    stack_root = configurations[STACK_ROOT_KEY].lower()
+
+  # oozie configuration directory using a symlink
+  oozie_config_directory = OOZIE_CONF_DIR.replace(STACK_ROOT_PATTERN, stack_root)
+  if not os.path.exists(oozie_config_directory):
+    oozie_config_directory = OOZIE_CONF_DIR_LEGACY
+
+  command = "source {0}/oozie-env.sh ; oozie admin -oozie {1} -status".format(
+    oozie_config_directory, oozie_url)
+
+  return (command, kerberos_env, user)
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  if configurations is None:
+    return (RESULT_CODE_UNKNOWN, ['There were no configurations supplied to the script.'])
+
+  if not OOZIE_URL_KEY in configurations:
+    return (RESULT_CODE_UNKNOWN, ['The Oozie URL is a required parameter.'])
+
+  https_port = None
+  # try to get https port form oozie-env content
+  if OOZIE_ENV_CONTENT in configurations:
+    for line in configurations[OOZIE_ENV_CONTENT].splitlines():
+      result = re.match(OOZIE_ENV_HTTPS_RE, line)
+
+      if result is not None:
+        https_port = result.group(1)
+  # or from oozie-site.xml
+  if https_port is None and OOZIE_HTTPS_PORT in configurations:
+    https_port = configurations[OOZIE_HTTPS_PORT]
+
+  oozie_url = configurations[OOZIE_URL_KEY]
+
+  # construct proper url for https
+  if https_port is not None:
+    parsed_url = urlparse(oozie_url)
+    oozie_url = oozie_url.replace(parsed_url.scheme, "https")
+    if parsed_url.port is None:
+      oozie_url.replace(parsed_url.hostname, ":".join([parsed_url.hostname, str(https_port)]))
+    else:
+      oozie_url = oozie_url.replace(str(parsed_url.port), str(https_port))
+
+  # https will not work with localhost address, we need put fqdn
+  if https_port is None:
+    oozie_url = oozie_url.replace(urlparse(oozie_url).hostname, host_name)
+
+  (code, msg) = get_check_result(oozie_url, host_name, configurations, parameters, False)
+
+  # sometimes real lifetime for ticket is less than we have set(5m20s aS of now)
+  # so i've added this double check with rekinit command to be sure thaT it's not problem with ticket lifetime
+  if is_security_enabled(configurations) and code == RESULT_CODE_CRITICAL:
+    (code, msg) = get_check_result(oozie_url, host_name, configurations, parameters, True)
+
+  return (code, msg)
+
+
+def get_check_result(oozie_url, host_name, configurations, parameters, only_kinit):
+  try:
+    command, env, user = get_check_command(oozie_url, host_name, configurations, parameters, only_kinit)
+    # execute the command
+    Execute(command, environment=env, user=user)
+
+    return (RESULT_CODE_OK, ["Successful connection to {0}".format(oozie_url)])
+  except KerberosPropertiesNotFound, ex:
+    return (RESULT_CODE_UNKNOWN, [str(ex)])
+  except Exception, ex:
+    return (RESULT_CODE_CRITICAL, [str(ex)])
+
+def is_security_enabled(configurations):
+  security_enabled = False
+  if SECURITY_ENABLED in configurations:
+    security_enabled = str(configurations[SECURITY_ENABLED]).upper() == 'TRUE'
+
+  return security_enabled
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/files/oozieSmoke2.sh b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/files/oozieSmoke2.sh
new file mode 100644
index 0000000..60716ae
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/files/oozieSmoke2.sh
@@ -0,0 +1,84 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export os_family=$1
+export oozie_lib_dir=$2
+export oozie_conf_dir=$3
+export oozie_bin_dir=$4
+export oozie_server_url=$5
+export oozie_examples_dir=$6
+export hadoop_conf_dir=$7
+export hadoop_bin_dir=$8
+export smoke_test_user=$9
+export job_name=${10}
+export security_enabled=${11}
+export smoke_user_keytab=${12}
+export kinit_path_local=${13}
+export smokeuser_principal=${14}
+
+function checkOozieJobStatus {
+  local job_id=$1
+  local num_of_tries=$2
+  #default num_of_tries to 10 if not present
+  num_of_tries=${num_of_tries:-10}
+  local i=0
+  local rc=1
+  local cmd="source ${oozie_conf_dir}/oozie-env.sh ; ${oozie_bin_dir}/oozie job -oozie ${OOZIE_SERVER} -info $job_id"
+  /var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"
+  while [ $i -lt $num_of_tries ] ; do
+    cmd_output=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
+    (IFS='';echo $cmd_output)
+    act_status=$(IFS='';echo $cmd_output | grep ^Status | cut -d':' -f2 | sed 's| ||g')
+    echo "workflow_status=$act_status"
+    if [ "RUNNING" == "$act_status" ]; then
+      #increment the counter and get the status again after waiting for 15 secs
+      sleep 15
+      (( i++ ))
+      elif [ "SUCCEEDED" == "$act_status" ]; then
+        rc=0;
+        break;
+      else
+        rc=1
+        break;
+      fi
+    done
+    return $rc
+}
+
+export OOZIE_EXIT_CODE=0
+export OOZIE_SERVER=$oozie_server_url
+
+cd $oozie_examples_dir
+
+if [[ $security_enabled == "True" ]]; then
+  kinitcmd="${kinit_path_local} -kt ${smoke_user_keytab} ${smokeuser_principal}; "
+else 
+  kinitcmd=""
+fi
+
+cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; ${oozie_bin_dir}/oozie -Doozie.auth.token.cache=false job -oozie $OOZIE_SERVER -config $oozie_examples_dir/examples/apps/${job_name}/job.properties  -run"
+echo $cmd
+job_info=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd" | grep "job:"`
+job_id="`echo $job_info | cut -d':' -f2`"
+checkOozieJobStatus "$job_id" 15
+OOZIE_EXIT_CODE="$?"
+exit $OOZIE_EXIT_CODE
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/files/prepareOozieHdfsDirectories.sh b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/files/prepareOozieHdfsDirectories.sh
new file mode 100644
index 0000000..f2bee2d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/files/prepareOozieHdfsDirectories.sh
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export oozie_conf_dir=$1
+export oozie_examples_dir=$2
+export hadoop_conf_dir=$3
+export JOBTRACKER=$4
+export NAMENODE=$5
+export QUEUE=$6
+export JOB_NAME=$7
+
+cd $oozie_examples_dir
+
+/var/lib/ambari-agent/ambari-sudo.sh tar -zxf oozie-examples.tar.gz
+/var/lib/ambari-agent/ambari-sudo.sh chmod -R o+rx examples
+
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|nameNode=hdfs://localhost:8020|nameNode=$NAMENODE|g"  examples/apps/$JOB_NAME/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|nameNode=hdfs://localhost:9000|nameNode=$NAMENODE|g"  examples/apps/$JOB_NAME/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|jobTracker=localhost:8021|jobTracker=$JOBTRACKER|g" examples/apps/$JOB_NAME/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|jobTracker=localhost:9001|jobTracker=$JOBTRACKER|g" examples/apps/$JOB_NAME/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|jobTracker=localhost:8032|jobTracker=$JOBTRACKER|g" examples/apps/$JOB_NAME/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|queueName=default|queueName=$QUEUE|g" examples/apps/$JOB_NAME/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|oozie.wf.application.path=hdfs://localhost:9000|oozie.wf.application.path=$NAMENODE|g" examples/apps/$JOB_NAME/job.properties
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/files/wrap_ooziedb.sh b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/files/wrap_ooziedb.sh
new file mode 100644
index 0000000..36576b5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/files/wrap_ooziedb.sh
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+OUT=`cd /var/tmp/oozie && /usr/lib/oozie/bin/ooziedb.sh "$@" 2>&1`
+EC=$?
+echo $OUT
+GRVAR=`echo ${OUT} | grep -o "java.lang.Exception: DB schema exists"`
+if [ ${EC} -ne 0 ] && [ -n "$GRVAR" ]
+then
+  exit 0
+else
+  exit $EC
+fi  
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/check_oozie_server_status.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/check_oozie_server_status.py
new file mode 100644
index 0000000..7c69779
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/check_oozie_server_status.py
@@ -0,0 +1,38 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def check_oozie_server_status():
+  import status_params
+  from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
+
+  check_windows_service_status(status_params.oozie_server_win_service_name)
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def check_oozie_server_status():
+  import status_params
+  from resource_management.libraries.functions.check_process_status import check_process_status
+
+  check_process_status(status_params.pid_file)
+
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
new file mode 100644
index 0000000..def0545
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie.py
@@ -0,0 +1,516 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+# Python Imports
+import os
+import re
+
+# Resource Management Imports
+from resource_management.core.resources.service import ServiceConfig
+from resource_management.core.resources.system import Directory, Execute, File
+from resource_management.core.source import DownloadSource
+from resource_management.core.source import InlineTemplate
+from resource_management.core.source import Template
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.oozie_prepare_war import prepare_war
+from resource_management.libraries.functions.copy_tarball import get_current_version
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.security_commons import update_credential_provider_path
+from resource_management.core.resources.packaging import Package
+from resource_management.core.shell import as_user, as_sudo, call
+from resource_management.core.exceptions import Fail
+
+from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook
+from ambari_commons.constants import SERVICE, UPGRADE_TYPE_NON_ROLLING, UPGRADE_TYPE_ROLLING
+from resource_management.libraries.functions.constants import Direction
+
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+from ambari_commons.inet_utils import download_file
+
+from resource_management.core import Logger
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def oozie(is_server=False):
+  import params
+
+  from status_params import oozie_server_win_service_name
+
+  XmlConfig("oozie-site.xml",
+            conf_dir=params.oozie_conf_dir,
+            configurations=params.config['configurations']['oozie-site'],
+            owner=params.oozie_user,
+            mode='f',
+            configuration_attributes=params.config['configuration_attributes']['oozie-site']
+  )
+
+  File(os.path.join(params.oozie_conf_dir, "oozie-env.cmd"),
+       owner=params.oozie_user,
+       content=InlineTemplate(params.oozie_env_cmd_template)
+  )
+
+  Directory(params.oozie_tmp_dir,
+            owner=params.oozie_user,
+            create_parents = True,
+  )
+
+  if is_server:
+    # Manually overriding service logon user & password set by the installation package
+    ServiceConfig(oozie_server_win_service_name,
+                  action="change_user",
+                  username = params.oozie_user,
+                  password = Script.get_password(params.oozie_user))
+
+  download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
+                      os.path.join(params.oozie_root, "extra_libs", "sqljdbc4.jar")
+  )
+  webapps_sqljdbc_path = os.path.join(params.oozie_home, "oozie-server", "webapps", "oozie", "WEB-INF", "lib", "sqljdbc4.jar")
+  if os.path.isfile(webapps_sqljdbc_path):
+    download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
+                        webapps_sqljdbc_path
+    )
+  download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
+                      os.path.join(params.oozie_home, "share", "lib", "oozie", "sqljdbc4.jar")
+  )
+  download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
+                      os.path.join(params.oozie_home, "temp", "WEB-INF", "lib", "sqljdbc4.jar")
+  )
+
+# TODO: see if see can remove this
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def oozie(is_server=False):
+  import params
+
+  if is_server:
+    params.HdfsResource(params.oozie_hdfs_user_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.oozie_user,
+                         mode=params.oozie_hdfs_user_mode
+    )
+    params.HdfsResource(None, action="execute")
+  Directory(params.conf_dir,
+             create_parents = True,
+             owner = params.oozie_user,
+             group = params.user_group
+  )
+
+  params.oozie_site = update_credential_provider_path(params.oozie_site,
+                                                      'oozie-site',
+                                                      os.path.join(params.conf_dir, 'oozie-site.jceks'),
+                                                      params.oozie_user,
+                                                      params.user_group
+                                                      )
+
+  XmlConfig("oozie-site.xml",
+    conf_dir = params.conf_dir,
+    configurations = params.oozie_site,
+    configuration_attributes=params.config['configuration_attributes']['oozie-site'],
+    owner = params.oozie_user,
+    group = params.user_group,
+    mode = 0664
+  )
+  File(format("{conf_dir}/oozie-env.sh"),
+    owner=params.oozie_user,
+    content=InlineTemplate(params.oozie_env_sh_template),
+    group=params.user_group,
+  )
+
+  # On some OS this folder could be not exists, so we will create it before pushing there files
+  Directory(params.limits_conf_dir,
+            create_parents=True,
+            owner='root',
+            group='root'
+  )
+
+  File(os.path.join(params.limits_conf_dir, 'oozie.conf'),
+       owner='root',
+       group='root',
+       mode=0644,
+       content=Template("oozie.conf.j2")
+  )
+
+  if (params.log4j_props != None):
+    File(format("{params.conf_dir}/oozie-log4j.properties"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.oozie_user,
+      content=InlineTemplate(params.log4j_props)
+    )
+  elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))):
+    File(format("{params.conf_dir}/oozie-log4j.properties"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.oozie_user
+    )
+
+  if params.stack_version_formatted and check_stack_feature(StackFeature.OOZIE_ADMIN_USER, params.stack_version_formatted):
+    File(format("{params.conf_dir}/adminusers.txt"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.oozie_user,
+      content=Template('adminusers.txt.j2', oozie_admin_users=params.oozie_admin_users)
+    )
+  else:
+    File ( format("{params.conf_dir}/adminusers.txt"),
+           owner = params.oozie_user,
+           group = params.user_group
+    )
+
+  if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
+     params.jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver" or \
+     params.jdbc_driver_name == "org.postgresql.Driver" or \
+     params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+    File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
+      content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")),
+    )
+  pass
+
+  oozie_ownership()
+  
+  if is_server:      
+    oozie_server_specific()
+  
+def oozie_ownership():
+  import params
+  
+  File ( format("{conf_dir}/hadoop-config.xml"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  File ( format("{conf_dir}/oozie-default.xml"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  Directory ( format("{conf_dir}/action-conf"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  File ( format("{conf_dir}/action-conf/hive.xml"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+def oozie_server_specific():
+  import params
+  
+  no_op_test = as_user(format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"), user=params.oozie_user)
+  
+  File(params.pid_file,
+    action="delete",
+    not_if=no_op_test
+  )
+  
+  oozie_server_directories = [format("{oozie_home}/{oozie_tmp_dir}"), params.oozie_pid_dir, params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir, params.oozie_lib_dir, params.oozie_webapps_dir, params.oozie_webapps_conf_dir, params.oozie_server_dir]
+  Directory( oozie_server_directories,
+    owner = params.oozie_user,
+    group = params.user_group,
+    mode = 0755,
+    create_parents = True,
+    cd_access="a",
+  )
+  
+  Directory(params.oozie_libext_dir,
+            create_parents = True,
+  )
+  
+  hashcode_file = format("{oozie_home}/.hashcode")
+  skip_recreate_sharelib = format("test -f {hashcode_file} && test -d {oozie_home}/share")
+
+  untar_sharelib = ('tar','-xvf',format('{oozie_home}/oozie-sharelib.tar.gz'),'-C',params.oozie_home)
+
+  Execute( untar_sharelib,    # time-expensive
+    not_if  = format("{no_op_test} || {skip_recreate_sharelib}"), 
+    sudo = True,
+  )
+
+  configure_cmds = []
+  configure_cmds.append(('cp', params.ext_js_path, params.oozie_libext_dir))
+  configure_cmds.append(('chown', format('{oozie_user}:{user_group}'), format('{oozie_libext_dir}/{ext_js_file}')))
+  
+  Execute( configure_cmds,
+    not_if  = no_op_test,
+    sudo = True,
+  )
+  
+  Directory(params.oozie_webapps_conf_dir,
+            owner = params.oozie_user,
+            group = params.user_group,
+            recursive_ownership = True,
+            recursion_follow_links = True,
+  )
+
+  # download the database JAR
+  download_database_library_if_needed()
+
+  #falcon el extension
+  if params.has_falcon_host:
+    Execute(format('{sudo} cp {falcon_home}/oozie/ext/falcon-oozie-el-extension-*.jar {oozie_libext_dir}'),
+      not_if  = no_op_test)
+
+    Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'),
+      not_if  = no_op_test)
+
+  if params.lzo_enabled and len(params.all_lzo_packages) > 0:
+    Package(params.all_lzo_packages,
+            retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+            retry_count=params.agent_stack_retry_count)
+    Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),
+      not_if  = no_op_test,
+    )
+
+  prepare_war(params)
+
+  File(hashcode_file,
+       mode = 0644,
+  )
+
+  if params.stack_version_formatted and check_stack_feature(StackFeature.OOZIE_CREATE_HIVE_TEZ_CONFIGS, params.stack_version_formatted):
+    # Create hive-site and tez-site configs for oozie
+    Directory(params.hive_conf_dir,
+        create_parents = True,
+        owner = params.oozie_user,
+        group = params.user_group
+    )
+    if 'hive-site' in params.config['configurations']:
+      hive_site_config = update_credential_provider_path(params.config['configurations']['hive-site'],
+                                                         'hive-site',
+                                                         os.path.join(params.hive_conf_dir, 'hive-site.jceks'),
+                                                         params.oozie_user,
+                                                         params.user_group
+                                                         )
+      XmlConfig("hive-site.xml",
+        conf_dir=params.hive_conf_dir,
+        configurations=hive_site_config,
+        configuration_attributes=params.config['configuration_attributes']['hive-site'],
+        owner=params.oozie_user,
+        group=params.user_group,
+        mode=0644
+    )
+    if 'tez-site' in params.config['configurations']:
+      XmlConfig( "tez-site.xml",
+        conf_dir = params.hive_conf_dir,
+        configurations = params.config['configurations']['tez-site'],
+        configuration_attributes=params.config['configuration_attributes']['tez-site'],
+        owner = params.oozie_user,
+        group = params.user_group,
+        mode = 0664
+    )
+
+    # If Atlas is also installed, need to generate Atlas Hive hook (hive-atlas-application.properties file) in directory
+    # {stack_root}/{current_version}/atlas/hook/hive/
+    # Because this is a .properties file instead of an xml file, it will not be read automatically by Oozie.
+    # However, should still save the file on this host so that can upload it to the Oozie Sharelib in DFS.
+    if has_atlas_in_cluster():
+      atlas_hook_filepath = os.path.join(params.hive_conf_dir, params.atlas_hook_filename)
+      Logger.info("Has atlas in cluster, will save Atlas Hive hook into location %s" % str(atlas_hook_filepath))
+      setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.oozie_user, params.user_group)
+
+  Directory(params.oozie_server_dir,
+    owner = params.oozie_user,
+    group = params.user_group,
+    recursive_ownership = True,  
+  )
+  if params.security_enabled:
+    File(os.path.join(params.conf_dir, 'zkmigrator_jaas.conf'),
+         owner=params.oozie_user,
+         group=params.user_group,
+         content=Template("zkmigrator_jaas.conf.j2")
+         )
+
+def __parse_sharelib_from_output(output):
+  """
+  Return the parent directory of the first path from the output of the "oozie admin -shareliblist command $comp"
+  Output will match pattern like:
+
+  Potential errors
+  [Available ShareLib]
+  hive
+    hdfs://server:8020/user/oozie/share/lib/lib_20160811235630/hive/file1.jar
+    hdfs://server:8020/user/oozie/share/lib/lib_20160811235630/hive/file2.jar
+  """
+  if output is not None:
+    pattern = re.compile(r"\[Available ShareLib\]\n\S*?\n(.*share.*)", re.IGNORECASE)
+    m = pattern.search(output)
+    if m and len(m.groups()) == 1:
+      jar_path = m.group(1)
+      # Remove leading/trailing spaces and get the containing directory
+      sharelib_dir = os.path.dirname(jar_path.strip())
+      return sharelib_dir
+  return None
+
+def copy_atlas_hive_hook_to_dfs_share_lib(upgrade_type=None, upgrade_direction=None):
+  """
+   If the Atlas Hive Hook direcotry is present, Atlas is installed, and this is the first Oozie Server,
+  then copy the entire contents of that directory to the Oozie Sharelib in DFS, e.g.,
+  /usr/$stack/$current_version/atlas/hook/hive/ -> hdfs:///user/oozie/share/lib/lib_$timetamp/hive
+
+  :param upgrade_type: If in the middle of a stack upgrade, the type as UPGRADE_TYPE_ROLLING or UPGRADE_TYPE_NON_ROLLING
+  :param upgrade_direction: If in the middle of a stack upgrade, the direction as Direction.UPGRADE or Direction.DOWNGRADE.
+  """
+  import params
+
+  # Calculate the effective version since this code can also be called during EU/RU in the upgrade direction.
+  effective_version = params.stack_version_formatted if upgrade_type is None else format_stack_version(params.version)
+  if not check_stack_feature(StackFeature.ATLAS_HOOK_SUPPORT, effective_version):
+    return
+    
+  # Important that oozie_server_hostnames is sorted by name so that this only runs on a single Oozie server.
+  if not (len(params.oozie_server_hostnames) > 0 and params.hostname == params.oozie_server_hostnames[0]):
+    Logger.debug("Will not attempt to copy Atlas Hive hook to DFS since this is not the first Oozie Server "
+                 "sorted by hostname.")
+    return
+
+  if not has_atlas_in_cluster():
+    Logger.debug("Will not attempt to copy Atlas Hve hook to DFS since Atlas is not installed on the cluster.")
+    return
+
+  if upgrade_type is not None and upgrade_direction == Direction.DOWNGRADE:
+    Logger.debug("Will not attempt to copy Atlas Hve hook to DFS since in the middle of Rolling/Express upgrade "
+                 "and performing a Downgrade.")
+    return
+
+  current_version = get_current_version()
+  atlas_hive_hook_dir = format("{stack_root}/{current_version}/atlas/hook/hive/")
+  if not os.path.exists(atlas_hive_hook_dir):
+    Logger.error(format("ERROR. Atlas is installed in cluster but this Oozie server doesn't "
+                        "contain directory {atlas_hive_hook_dir}"))
+    return
+
+  atlas_hive_hook_impl_dir = os.path.join(atlas_hive_hook_dir, "atlas-hive-plugin-impl")
+
+  num_files = len([name for name in os.listdir(atlas_hive_hook_impl_dir) if os.path.exists(os.path.join(atlas_hive_hook_impl_dir, name))])
+  Logger.info("Found %d files/directories inside Atlas Hive hook impl directory %s"% (num_files, atlas_hive_hook_impl_dir))
+
+  # This can return over 100 files, so take the first 5 lines after "Available ShareLib"
+  # Use -oozie http(s):localhost:{oozie_server_admin_port}/oozie as oozie-env does not export OOZIE_URL
+  command = format(r'source {conf_dir}/oozie-env.sh ; oozie admin -oozie {oozie_base_url} -shareliblist hive | grep "\[Available ShareLib\]" -A 5')
+  Execute(command,
+          user=params.oozie_user,
+          tries=10,
+          try_sleep=5,
+          logoutput=True,
+  )
+
+  hive_sharelib_dir = __parse_sharelib_from_output(out)
+
+  if hive_sharelib_dir is None:
+    raise Fail("Could not parse Hive sharelib from output.")
+
+  Logger.info(format("Parsed Hive sharelib = {hive_sharelib_dir} and will attempt to copy/replace {num_files} files to it from {atlas_hive_hook_impl_dir}"))
+
+  params.HdfsResource(hive_sharelib_dir,
+                      type="directory",
+                      action="create_on_execute",
+                      source=atlas_hive_hook_impl_dir,
+                      user=params.hdfs_user,
+                      owner=params.oozie_user,
+                      group=params.hdfs_user,
+                      mode=0755,
+                      recursive_chown=True,
+                      recursive_chmod=True,
+                      replace_existing_files=True
+                      )
+
+  Logger.info("Copying Atlas Hive hook properties file to Oozie Sharelib in DFS.")
+  atlas_hook_filepath_source = os.path.join(params.hive_conf_dir, params.atlas_hook_filename)
+  atlas_hook_file_path_dest_in_dfs = os.path.join(hive_sharelib_dir, params.atlas_hook_filename)
+  params.HdfsResource(atlas_hook_file_path_dest_in_dfs,
+                      type="file",
+                      source=atlas_hook_filepath_source,
+                      action="create_on_execute",
+                      owner=params.oozie_user,
+                      group=params.hdfs_user,
+                      mode=0755,
+                      replace_existing_files=True
+                      )
+  params.HdfsResource(None, action="execute")
+
+  # Update the sharelib after making any changes
+  # Use -oozie http(s):localhost:{oozie_server_admin_port}/oozie as oozie-env does not export OOZIE_URL
+  Execute(format("source {conf_dir}/oozie-env.sh ; oozie admin -oozie {oozie_base_url} -sharelibupdate"),
+          user=params.oozie_user,
+          tries=5,
+          try_sleep=5,
+          logoutput=True,
+  )
+
+
+def download_database_library_if_needed(target_directory = None):
+  """
+  Downloads the library to use when connecting to the Oozie database, if
+  necessary. The library will be downloaded to 'params.target' unless
+  otherwise specified.
+  :param target_directory: the location where the database library will be
+  downloaded to.
+  :return:
+  """
+  import params
+  jdbc_drivers = ["com.mysql.jdbc.Driver",
+    "com.microsoft.sqlserver.jdbc.SQLServerDriver",
+    "oracle.jdbc.driver.OracleDriver","sap.jdbc4.sqlanywhere.IDriver"]
+
+  # check to see if the JDBC driver name is in the list of ones that need to
+  # be downloaded
+  if params.jdbc_driver_name not in jdbc_drivers or not params.jdbc_driver_jar:
+    return
+
+  if params.previous_jdbc_jar and os.path.isfile(params.previous_jdbc_jar):
+    File(params.previous_jdbc_jar, action='delete')
+
+  # if the target directory is not specified
+  if target_directory is None:
+    target_jar_with_directory = params.target
+  else:
+    # create the full path using the supplied target directory and the JDBC JAR
+    target_jar_with_directory = target_directory + os.path.sep + params.jdbc_driver_jar
+
+  if not os.path.exists(target_jar_with_directory):
+    File(params.downloaded_custom_connector,
+      content = DownloadSource(params.driver_curl_source))
+
+    if params.sqla_db_used:
+      untar_sqla_type2_driver = ('tar', '-xvf', params.downloaded_custom_connector, '-C', params.tmp_dir)
+
+      Execute(untar_sqla_type2_driver, sudo = True)
+
+      Execute(format("yes | {sudo} cp {jars_path_in_archive} {oozie_libext_dir}"))
+
+      Directory(params.jdbc_libs_dir,
+                create_parents = True)
+
+      Execute(format("yes | {sudo} cp {libs_path_in_archive} {jdbc_libs_dir}"))
+
+      Execute(format("{sudo} chown -R {oozie_user}:{user_group} {oozie_libext_dir}/*"))
+
+    else:
+      Execute(('cp', '--remove-destination', params.downloaded_custom_connector, target_jar_with_directory),
+        path=["/bin", "/usr/bin/"],
+        sudo = True)
+
+    File(target_jar_with_directory, owner = params.oozie_user,
+      group = params.user_group)
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_client.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_client.py
new file mode 100644
index 0000000..f98ecfd
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_client.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+
+from oozie import oozie
+from oozie_service import oozie_service
+
+
+class OozieClient(Script):
+
+  def get_component_name(self):
+    return "oozie-client"
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    oozie(is_server=False)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    # this function should not execute if the version can't be determined or
+    # the stack does not support rolling upgrade
+    if not (params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version)):
+      return
+
+    Logger.info("Executing Oozie Client Stack Upgrade pre-restart")
+    conf_select.select(params.stack_name, "oozie", params.version)
+    stack_select.select("oozie-client", params.version)
+
+  # We substitute some configs (oozie.authentication.kerberos.principal) before generation (see oozie.py and params.py).
+  # This function returns changed configs (it's used for config generation before config download)
+  def generate_configs_get_xml_file_content(self, filename, dictionary):
+    if dictionary == 'oozie-site':
+      import params
+      config = self.get_config()
+      return {'configurations': params.oozie_site,
+              'configuration_attributes': config['configuration_attributes'][dictionary]}
+    else:
+      return super(OozieClient, self).generate_configs_get_xml_file_content(filename, dictionary)
+
+if __name__ == "__main__":
+  OozieClient().execute()
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_server.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_server.py
new file mode 100644
index 0000000..9320bc3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_server.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.core import Logger
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions.constants import Direction
+from resource_management.libraries.functions.security_commons import build_expectations
+from resource_management.libraries.functions.security_commons import cached_kinit_executor
+from resource_management.libraries.functions.security_commons import get_params_from_filesystem
+from resource_management.libraries.functions.security_commons import validate_security_config_properties
+from resource_management.libraries.functions.security_commons import FILE_TYPE_XML
+
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons.constants import UPGRADE_TYPE_NON_ROLLING, UPGRADE_TYPE_ROLLING
+
+from oozie import oozie
+from oozie_service import oozie_service
+from oozie_server_upgrade import OozieUpgrade
+
+from check_oozie_server_status import check_oozie_server_status
+from resource_management.core.resources.zkmigrator import ZkMigrator
+
+class OozieServer(Script):
+
+  def get_component_name(self):
+    return "oozie-server"
+
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env, upgrade_type=None):
+    import params
+
+    # The configure command doesn't actually receive the upgrade_type from Script.py, so get it from the config dictionary
+    if upgrade_type is None:
+      upgrade_type = Script.get_upgrade_type(default("/commandParams/upgrade_type", ""))
+
+    if upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE and params.version is not None:
+      Logger.info(format("Configuring Oozie during upgrade type: {upgrade_type}, direction: {params.upgrade_direction}, and version {params.version}"))
+      if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+        # In order for the "<stack-root>/current/oozie-<client/server>" point to the new version of
+        # oozie, we need to create the symlinks both for server and client.
+        # This is required as both need to be pointing to new installed oozie version.
+
+        # Sets the symlink : eg: <stack-root>/current/oozie-client -> <stack-root>/a.b.c.d-<version>/oozie
+        stack_select.select("oozie-client", params.version)
+        # Sets the symlink : eg: <stack-root>/current/oozie-server -> <stack-root>/a.b.c.d-<version>/oozie
+        stack_select.select("oozie-server", params.version)
+
+      if params.version and check_stack_feature(StackFeature.CONFIG_VERSIONING, params.version):
+        conf_select.select(params.stack_name, "oozie", params.version)
+
+    env.set_params(params)
+    oozie(is_server=True)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    self.configure(env)
+
+    # preparing the WAR file must run after configure since configure writes out
+    # oozie-env.sh which is needed to have the right environment directories setup!
+    if upgrade_type is not None:
+      OozieUpgrade.prepare_warfile()
+
+    oozie_service(action='start', upgrade_type=upgrade_type)
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    oozie_service(action='stop', upgrade_type=upgrade_type)
+
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_oozie_server_status()
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class OozieServerDefault(OozieServer):
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    """
+    Performs the tasks that should be done before an upgrade of oozie. This includes:
+      - backing up configurations
+      - running <stack-selector-tool> and <conf-selector-tool>
+      - restoring configurations
+      - preparing the libext directory
+    :param env:
+    :return:
+    """
+    import params
+    env.set_params(params)
+
+    # this function should not execute if the version can't be determined or
+    # the stack does not support rolling upgrade
+    if not (params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version)):
+      return
+
+    Logger.info("Executing Oozie Server Stack Upgrade pre-restart")
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "oozie", params.version)
+      stack_select.select("oozie-server", params.version)
+
+    OozieUpgrade.prepare_libext_directory()
+
+  def disable_security(self, env):
+    import params
+    if not params.stack_supports_zk_security:
+      Logger.info("Stack doesn't support zookeeper security")
+      return
+    if not params.zk_connection_string:
+      Logger.info("No zookeeper connection string. Skipping reverting ACL")
+      return
+    zkmigrator = ZkMigrator(params.zk_connection_string, params.java_exec, params.java64_home, params.jaas_file, params.oozie_user)
+    zkmigrator.set_acls(params.zk_namespace if params.zk_namespace.startswith('/') else '/' + params.zk_namespace, 'world:anyone:crdwa')
+
+  def get_log_folder(self):
+    import params
+    return params.oozie_log_dir
+  
+  def get_user(self):
+    import params
+    return params.oozie_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.pid_file]
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class OozieServerWindows(OozieServer):
+  pass
+
+if __name__ == "__main__":
+  OozieServer().execute()
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_server_upgrade.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_server_upgrade.py
new file mode 100644
index 0000000..402c7cb
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_server_upgrade.py
@@ -0,0 +1,237 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import glob
+import os
+import shutil
+
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.system import Execute
+from resource_management.core.resources.system import Directory
+from resource_management.core.resources.system import File
+from resource_management.libraries.functions import Direction
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.oozie_prepare_war import prepare_war
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+
+import oozie
+
+BACKUP_TEMP_DIR = "oozie-upgrade-backup"
+BACKUP_CONF_ARCHIVE = "oozie-conf-backup.tar"
+
+class OozieUpgrade(Script):
+
+  @staticmethod
+  def prepare_libext_directory():
+    """
+    Performs the following actions on libext:
+      - creates <stack-root>/current/oozie/libext and recursively
+      - set 777 permissions on it and its parents.
+      - downloads JDBC driver JAR if needed
+      - copies Falcon JAR for the Oozie WAR if needed
+    """
+    import params
+
+    # some stack versions don't need the lzo compression libraries
+    target_version_needs_compression_libraries = params.version and check_stack_feature(StackFeature.LZO, params.version)
+
+    # ensure the directory exists
+    Directory(params.oozie_libext_dir, mode = 0777)
+
+    # get all hadooplzo* JAR files
+    # <stack-selector-tool> set hadoop-client has not run yet, therefore we cannot use
+    # <stack-root>/current/hadoop-client ; we must use params.version directly
+    # however, this only works when upgrading beyond 2.2.0.0; don't do this
+    # for downgrade to 2.2.0.0 since hadoop-lzo will not be present
+    # This can also be called during a Downgrade.
+    # When a version is Installed, it is responsible for downloading the hadoop-lzo packages
+    # if lzo is enabled.
+    if params.lzo_enabled and (params.upgrade_direction == Direction.UPGRADE or target_version_needs_compression_libraries):
+      hadoop_lzo_pattern = 'hadoop-lzo*.jar'
+      hadoop_client_new_lib_dir = format("{stack_root}/{version}/hadoop/lib")
+
+      files = glob.iglob(os.path.join(hadoop_client_new_lib_dir, hadoop_lzo_pattern))
+      if not files:
+        raise Fail("There are no files at {0} matching {1}".format(
+          hadoop_client_new_lib_dir, hadoop_lzo_pattern))
+
+      # copy files into libext
+      files_copied = False
+      for file in files:
+        if os.path.isfile(file):
+          Logger.info("Copying {0} to {1}".format(str(file), params.oozie_libext_dir))
+          shutil.copy2(file, params.oozie_libext_dir)
+          files_copied = True
+
+      if not files_copied:
+        raise Fail("There are no files at {0} matching {1}".format(
+          hadoop_client_new_lib_dir, hadoop_lzo_pattern))
+
+    # copy ext ZIP to libext dir
+    oozie_ext_zip_file = params.ext_js_path
+
+    # something like <stack-root>/current/oozie-server/libext/ext-2.2.zip
+    oozie_ext_zip_target_path = os.path.join(params.oozie_libext_dir, params.ext_js_file)
+
+    if not os.path.isfile(oozie_ext_zip_file):
+      raise Fail("Unable to copy {0} because it does not exist".format(oozie_ext_zip_file))
+
+    Logger.info("Copying {0} to {1}".format(oozie_ext_zip_file, params.oozie_libext_dir))
+    Execute(("cp", oozie_ext_zip_file, params.oozie_libext_dir), sudo=True)
+    Execute(("chown", format("{oozie_user}:{user_group}"), oozie_ext_zip_target_path), sudo=True)
+    File(oozie_ext_zip_target_path,
+         mode=0644
+    )
+
+    # Redownload jdbc driver to a new current location
+    oozie.download_database_library_if_needed()
+
+    # get the upgrade version in the event that it's needed
+    upgrade_stack = stack_select._get_upgrade_stack()
+    if upgrade_stack is None or len(upgrade_stack) < 2 or upgrade_stack[1] is None:
+      raise Fail("Unable to determine the stack that is being upgraded to or downgraded to.")
+
+    stack_version = upgrade_stack[1]
+
+    # copy the Falcon JAR if needed; falcon has not upgraded yet, so we must
+    # use the versioned falcon directory
+    if params.has_falcon_host:
+      versioned_falcon_jar_directory = "{0}/{1}/falcon/oozie/ext/falcon-oozie-el-extension-*.jar".format(params.stack_root, stack_version)
+      Logger.info("Copying {0} to {1}".format(versioned_falcon_jar_directory, params.oozie_libext_dir))
+
+      Execute(format('{sudo} cp {versioned_falcon_jar_directory} {oozie_libext_dir}'))
+      Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'))
+
+
+  @staticmethod
+  def prepare_warfile():
+    """
+    Invokes the 'prepare-war' command in Oozie in order to create the WAR.
+    The prepare-war command uses the input WAR from ${OOZIE_HOME}/oozie.war and
+    outputs the prepared WAR to ${CATALINA_BASE}/webapps/oozie.war - because of this,
+    both of these environment variables must point to the upgraded oozie-server path and
+    not oozie-client since it was not yet updated.
+
+    This method will also perform a kinit if necessary.
+    :return:
+    """
+    import params
+
+    # get the kerberos token if necessary to execute commands as oozie
+    if params.security_enabled:
+      oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
+      command = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host}")
+      Execute(command, user=params.oozie_user, logoutput=True)
+
+    prepare_war(params)
+
+
+  def upgrade_oozie_database_and_sharelib(self, env):
+    """
+    Performs the creation and upload of the sharelib and the upgrade of the
+    database. This method will also perform a kinit if necessary.
+    It is run before the upgrade of oozie begins exactly once as part of the
+    upgrade orchestration.
+
+    Since this runs before the upgrade has occurred, it should not use any
+    "current" directories since they will still be pointing to the older
+    version of Oozie. Instead, it should use versioned directories to ensure
+    that the commands running are from the oozie version about to be upgraded to.
+    :return:
+    """
+    import params
+    env.set_params(params)
+
+    Logger.info("Will upgrade the Oozie database")
+
+    # get the kerberos token if necessary to execute commands as oozie
+    if params.security_enabled:
+      oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
+      command = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host}")
+      Execute(command, user=params.oozie_user, logoutput=True)
+
+    upgrade_stack = stack_select._get_upgrade_stack()
+    if upgrade_stack is None or len(upgrade_stack) < 2 or upgrade_stack[1] is None:
+      raise Fail("Unable to determine the stack that is being upgraded to or downgraded to.")
+
+    stack_version = upgrade_stack[1]
+
+    # upgrade oozie DB
+    Logger.info(format('Upgrading the Oozie database, using version {stack_version}'))
+
+    # the database upgrade requires the db driver JAR, but since we have
+    # not yet run <stack-selector-tool> to upgrade the current points, we have to use
+    # the versioned libext directory as the location[[-vufdtffr,
+    versioned_libext_dir = "{0}/{1}/oozie/libext".format(params.stack_root, stack_version)
+    oozie.download_database_library_if_needed(target_directory=versioned_libext_dir)
+
+    database_upgrade_command = "{0}/{1}/oozie/bin/ooziedb.sh upgrade -run".format(params.stack_root, stack_version)
+    Execute(database_upgrade_command, user=params.oozie_user, logoutput=True)
+
+    # install new sharelib to HDFS
+    self.create_sharelib(env)
+
+
+  def create_sharelib(self, env):
+    """
+    Performs the creation and upload of the sharelib.
+    This method will also perform a kinit if necessary.
+    It is run before the upgrade of oozie begins exactly once as part of the
+    upgrade orchestration.
+
+    Since this runs before the upgrade has occurred, it should not use any
+    "current" directories since they will still be pointing to the older
+    version of Oozie. Instead, it should use versioned directories to ensure
+    that the commands running are from the oozie version about to be upgraded to.
+    :param env:
+    :return:
+    """
+    import params
+    env.set_params(params)
+
+    Logger.info('Creating a new sharelib and uploading it to HDFS...')
+
+    # ensure the oozie directory exists for the sharelib
+    params.HdfsResource(format("{oozie_hdfs_user_dir}/share"),
+      action = "create_on_execute",
+      type = "directory",
+      owner = params.oozie_user,
+      group = params.user_group,
+      mode = 0755,
+      recursive_chmod = True)
+
+    params.HdfsResource(None, action = "execute")
+
+    upgrade_stack = stack_select._get_upgrade_stack()
+    if upgrade_stack is None or upgrade_stack[1] is None:
+      raise Fail("Unable to determine the stack that is being upgraded to or downgraded to.")
+
+    stack_version = upgrade_stack[1]
+
+    # install new sharelib to HDFS
+    sharelib_command = "{0}/{1}/oozie/bin/oozie-setup.sh sharelib create -fs {2}".format(
+      params.stack_root, stack_version, params.fs_root)
+
+    Execute(sharelib_command, user=params.oozie_user, logoutput=True)
+
+if __name__ == "__main__":
+  OozieUpgrade().execute()
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_service.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_service.py
new file mode 100644
index 0000000..5fcbf45
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/oozie_service.py
@@ -0,0 +1,188 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+# Python Imports
+import os
+
+# Local Imports
+from oozie import copy_atlas_hive_hook_to_dfs_share_lib
+
+# Resource Managemente Imports
+from resource_management.core import shell, sudo
+from resource_management.core.shell import as_user
+from resource_management.core.logger import Logger
+from resource_management.core.resources.service import Service
+from resource_management.core.resources.system import Execute, File, Directory
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.show_logs import show_logs
+from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+from resource_management.core import Logger
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def oozie_service(action='start', upgrade_type=None):
+  import params
+
+  if action == 'start':
+    cmd = format("cmd /C \"cd /d {oozie_tmp_dir} && {oozie_home}\\bin\\ooziedb.cmd create -sqlfile oozie.sql -run\"")
+    Execute(cmd, user=params.oozie_user, ignore_failures=True)
+    Service(params.oozie_server_win_service_name, action="start")
+  elif action == 'stop':
+    Service(params.oozie_server_win_service_name, action="stop")
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def oozie_service(action = 'start', upgrade_type=None):
+  """
+  Starts or stops the Oozie service
+  :param action: 'start' or 'stop'
+  :param upgrade_type: type of upgrade, either "rolling" or "non_rolling"
+  skipped since a variation of them was performed during the rolling upgrade
+  :return:
+  """
+  import params
+
+  environment={'OOZIE_CONFIG': params.conf_dir}
+
+  if params.security_enabled:
+    if params.oozie_principal is None:
+      oozie_principal_with_host = 'missing_principal'
+    else:
+      oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
+    kinit_if_needed = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host};")
+  else:
+    kinit_if_needed = ""
+
+  no_op_test = as_user(format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"), user=params.oozie_user)
+  
+  if action == 'start':
+    start_cmd = format("cd {oozie_tmp_dir} && {oozie_home}/bin/oozie-start.sh")
+    path_to_jdbc = params.target
+
+    if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
+       params.jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver" or \
+       params.jdbc_driver_name == "org.postgresql.Driver" or \
+       params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+
+      if not params.jdbc_driver_jar:
+        path_to_jdbc = format("{oozie_libext_dir}/") + \
+                       params.default_connectors_map[params.jdbc_driver_name] if params.jdbc_driver_name in params.default_connectors_map else None
+        if not os.path.isfile(path_to_jdbc):
+          path_to_jdbc = format("{oozie_libext_dir}/") + "*"
+          error_message = "Error! Sorry, but we can't find jdbc driver with default name " + params.default_connectors_map[params.jdbc_driver_name] + \
+                " in oozie lib dir. So, db connection check can fail. Please run 'ambari-server setup --jdbc-db={db_name} --jdbc-driver={path_to_jdbc} on server host.'"
+          Logger.error(error_message)
+
+      db_connection_check_command = format("{java_home}/bin/java -cp {check_db_connection_jar}:{path_to_jdbc} org.apache.ambari.server.DBConnectionVerification '{oozie_jdbc_connection_url}' {oozie_metastore_user_name} {oozie_metastore_user_passwd!p} {jdbc_driver_name}")
+    else:
+      db_connection_check_command = None
+
+    if upgrade_type is None:
+      if not os.path.isfile(path_to_jdbc) and params.jdbc_driver_name == "org.postgresql.Driver":
+        print format("ERROR: jdbc file {target} is unavailable. Please, follow next steps:\n" \
+          "1) Download postgresql-9.0-801.jdbc4.jar.\n2) Create needed directory: mkdir -p {oozie_home}/libserver/\n" \
+          "3) Copy postgresql-9.0-801.jdbc4.jar to newly created dir: cp /path/to/jdbc/postgresql-9.0-801.jdbc4.jar " \
+          "{oozie_home}/libserver/\n4) Copy postgresql-9.0-801.jdbc4.jar to libext: cp " \
+          "/path/to/jdbc/postgresql-9.0-801.jdbc4.jar {oozie_home}/libext/\n")
+        exit(1)
+
+      if db_connection_check_command:
+        sudo.chmod(params.check_db_connection_jar, 0755)
+        Execute( db_connection_check_command, 
+                 tries=5, 
+                 try_sleep=10,
+                 user=params.oozie_user,
+        )
+
+      Execute( format("cd {oozie_tmp_dir} && {oozie_home}/bin/ooziedb.sh create -sqlfile oozie.sql -run"), 
+               user = params.oozie_user, not_if = no_op_test,
+               ignore_failures = True 
+      )
+      
+      if params.security_enabled:
+        Execute(kinit_if_needed,
+                user = params.oozie_user,
+        )
+
+      if params.sysprep_skip_copy_oozie_share_lib_to_hdfs:
+        Logger.info("Skipping creation of oozie sharelib as host is sys prepped")
+        # Copy current hive-site to hdfs:/user/oozie/share/lib/spark/
+        params.HdfsResource(format("{hdfs_share_dir}/lib/spark/hive-site.xml"),
+                            action="create_on_execute",
+                            type = 'file',
+                            mode=0444,
+                            owner=params.oozie_user,
+                            group=params.user_group,
+                            source=format("{hive_conf_dir}/hive-site.xml"),
+                            )
+        params.HdfsResource(None, action="execute")
+
+        hdfs_share_dir_exists = True # skip time-expensive hadoop fs -ls check
+      elif WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
+        # check with webhdfs is much faster than executing hadoop fs -ls. 
+        util = WebHDFSUtil(params.hdfs_site, params.oozie_user, params.security_enabled)
+        list_status = util.run_command(params.hdfs_share_dir, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False)
+        hdfs_share_dir_exists = ('FileStatus' in list_status)
+      else:
+        # have to do time expensive hadoop fs -ls check.
+        hdfs_share_dir_exists = shell.call(format("{kinit_if_needed} hadoop --config {hadoop_conf_dir} dfs -ls {hdfs_share_dir} | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'"),
+                                 user=params.oozie_user)[0]
+                                 
+      if not hdfs_share_dir_exists:                      
+        Execute( params.put_shared_lib_to_hdfs_cmd, 
+                 user = params.oozie_user,
+                 path = params.execute_path 
+        )
+        params.HdfsResource(format("{oozie_hdfs_user_dir}/share"),
+                             type="directory",
+                             action="create_on_execute",
+                             mode=0755,
+                             recursive_chmod=True,
+        )
+        params.HdfsResource(None, action="execute")
+        
+
+    try:
+      # start oozie
+      Execute( start_cmd, environment=environment, user = params.oozie_user,
+        not_if = no_op_test )
+
+      copy_atlas_hive_hook_to_dfs_share_lib(upgrade_type, params.upgrade_direction)
+    except:
+      show_logs(params.oozie_log_dir, params.oozie_user)
+      raise
+
+  elif action == 'stop':
+    Directory(params.oozie_tmp_dir,
+              owner=params.oozie_user,
+              create_parents = True,
+    )
+
+    stop_cmd  = format("cd {oozie_tmp_dir} && {oozie_home}/bin/oozied.sh stop 60 -force")
+
+    try:
+      # stop oozie
+      Execute(stop_cmd, environment=environment, only_if  = no_op_test,
+        user = params.oozie_user)
+    except:
+      show_logs(params.oozie_log_dir, params.oozie_user)
+      raise
+
+    File(params.pid_file, action = "delete")
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params.py
new file mode 100644
index 0000000..f39d632
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from ambari_commons import OSCheck
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.expect import expect
+from resource_management.libraries.functions.copy_tarball import get_sysprep_skip_copy_tarballs_hdfs
+
+if OSCheck.is_windows_family():
+  from params_windows import *
+else:
+  from params_linux import *
+
+java_home = config['hostLevelParams']['java_home']
+java_version = expect("/hostLevelParams/java_version", int)
+
+
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+# By default, copy the tarballs to HDFS. If the cluster is sysprepped, then set based on the config.
+sysprep_skip_copy_oozie_share_lib_to_hdfs = False
+if host_sys_prepped:
+  sysprep_skip_copy_oozie_share_lib_to_hdfs = default("/configurations/cluster-env/sysprep_skip_copy_oozie_share_lib_to_hdfs", False)
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
new file mode 100644
index 0000000..d30a465
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_linux.py
@@ -0,0 +1,374 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import get_port_from_url
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
+from resource_management.libraries.functions.expect import expect
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions.get_architecture import get_architecture
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
+
+from resource_management.core.utils import PasswordString
+from ambari_commons.credential_store_helper import get_password_from_credential_store
+from urlparse import urlparse
+
+import status_params
+import os
+import re
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
+
+architecture = get_architecture()
+
+
+# Needed since this writes out the Atlas Hive Hook config file.
+cluster_name = config['clusterName']
+
+hostname = config["hostname"]
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+stack_name = status_params.stack_name
+stack_name_uppercase = stack_name.upper()
+upgrade_direction = default("/commandParams/upgrade_direction", None)
+agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
+agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
+
+stack_root = status_params.stack_root
+stack_version_unformatted =  status_params.stack_version_unformatted
+stack_version_formatted =  status_params.stack_version_formatted
+version_for_stack_feature_checks = get_stack_feature_version(config)
+
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_lib_home = stack_select.get_hadoop_dir("lib")
+
+#spark_conf
+spark_conf_dir = format("{stack_root}/current/spark-client/conf")
+
+#hadoop params
+if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE,stack_version_formatted):
+  stack_version = None
+  upgrade_stack = stack_select._get_upgrade_stack()
+  if upgrade_stack is not None and len(upgrade_stack) == 2 and upgrade_stack[1] is not None:
+    stack_version = upgrade_stack[1]
+
+  # oozie-server or oozie-client, depending on role
+  oozie_root = status_params.component_directory
+
+  # using the correct oozie root dir, format the correct location
+  oozie_lib_dir = format("{stack_root}/current/{oozie_root}")
+  oozie_setup_sh = format("{stack_root}/current/{oozie_root}/bin/oozie-setup.sh")
+  oozie_webapps_dir = format("{stack_root}/current/{oozie_root}/oozie-server/webapps")
+  oozie_webapps_conf_dir = format("{stack_root}/current/{oozie_root}/oozie-server/conf")
+  oozie_libext_dir = format("{stack_root}/current/{oozie_root}/libext")
+  oozie_server_dir = format("{stack_root}/current/{oozie_root}/oozie-server")
+  oozie_shared_lib = format("{stack_root}/current/{oozie_root}/share")
+  oozie_home = format("{stack_root}/current/{oozie_root}")
+  oozie_bin_dir = format("{stack_root}/current/{oozie_root}/bin")
+  oozie_examples_regex = format("{stack_root}/current/{oozie_root}/doc")
+
+  # set the falcon home for copying JARs; if in an upgrade, then use the version of falcon that
+  # matches the version of oozie
+  falcon_home = format("{stack_root}/current/falcon-client")
+  if stack_version is not None:
+    falcon_home = '{0}/{1}/falcon'.format(stack_root, stack_version)
+
+  conf_dir = format("{stack_root}/current/{oozie_root}/conf")
+  hive_conf_dir = format("{conf_dir}/action-conf/hive")
+
+else:
+  oozie_lib_dir = "/var/lib/oozie"
+  oozie_setup_sh = "/usr/lib/oozie/bin/oozie-setup.sh"
+  oozie_webapps_dir = "/var/lib/oozie/oozie-server/webapps/"
+  oozie_webapps_conf_dir = "/var/lib/oozie/oozie-server/conf"
+  oozie_libext_dir = "/usr/lib/oozie/libext"
+  oozie_server_dir = "/var/lib/oozie/oozie-server"
+  oozie_shared_lib = "/usr/lib/oozie/share"
+  oozie_home = "/usr/lib/oozie"
+  oozie_bin_dir = "/usr/bin"
+  falcon_home = '/usr/lib/falcon'
+  conf_dir = "/etc/oozie/conf"
+  hive_conf_dir = "/etc/oozie/conf/action-conf/hive"
+  oozie_examples_regex = "/usr/share/doc/oozie-*"
+
+execute_path = oozie_bin_dir + os.pathsep + hadoop_bin_dir
+
+oozie_user = config['configurations']['oozie-env']['oozie_user']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+smoke_hdfs_user_mode = 0770
+service_check_queue_name = default('/configurations/yarn-env/service_check.queue.name', 'default')
+
+# This config actually contains {oozie_user}
+oozie_admin_users = format(config['configurations']['oozie-env']['oozie_admin_users'])
+
+user_group = config['configurations']['cluster-env']['user_group']
+jdk_location = config['hostLevelParams']['jdk_location']
+check_db_connection_jar_name = "DBConnectionVerification.jar"
+check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
+oozie_tmp_dir = default("configurations/oozie-env/oozie_tmp_dir", "/var/tmp/oozie")
+oozie_hdfs_user_dir = format("/user/{oozie_user}")
+oozie_pid_dir = status_params.oozie_pid_dir
+pid_file = status_params.pid_file
+hadoop_jar_location = "/usr/lib/hadoop/"
+java_share_dir = "/usr/share/java"
+java64_home = config['hostLevelParams']['java_home']
+java_exec = format("{java64_home}/bin/java")
+ext_js_file = "ext-2.2.zip"
+ext_js_path = format("/usr/share/{stack_name_uppercase}-oozie/{ext_js_file}")
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+oozie_heapsize = config['configurations']['oozie-env']['oozie_heapsize']
+oozie_permsize = config['configurations']['oozie-env']['oozie_permsize']
+
+limits_conf_dir = "/etc/security/limits.d"
+
+oozie_user_nofile_limit = default('/configurations/oozie-env/oozie_user_nofile_limit', 32000)
+oozie_user_nproc_limit = default('/configurations/oozie-env/oozie_user_nproc_limit', 16000)
+
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
+oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
+http_principal = config['configurations']['oozie-site']['oozie.authentication.kerberos.principal']
+oozie_site = config['configurations']['oozie-site']
+# Need this for yarn.nodemanager.recovery.dir in yarn-site
+yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
+yarn_resourcemanager_address = config['configurations']['yarn-site']['yarn.resourcemanager.address']
+zk_namespace = default('/configurations/oozie-site/oozie.zookeeper.namespace', 'oozie')
+zk_connection_string = default('/configurations/oozie-site/oozie.zookeeper.connection.string', None)
+jaas_file = os.path.join(conf_dir, 'zkmigrator_jaas.conf')
+stack_supports_zk_security = check_stack_feature(StackFeature.SECURE_ZOOKEEPER, version_for_stack_feature_checks)
+
+credential_store_enabled = False
+if 'credentialStoreEnabled' in config:
+  credential_store_enabled = config['credentialStoreEnabled']
+
+if security_enabled:
+  oozie_site = dict(config['configurations']['oozie-site'])
+  oozie_principal_with_host = oozie_principal.replace('_HOST', hostname)
+
+  # If a user-supplied oozie.ha.authentication.kerberos.principal property exists in oozie-site,
+  # use it to replace the existing oozie.authentication.kerberos.principal value. This is to ensure
+  # that any special principal name needed for HA is used rather than the Ambari-generated value
+  if "oozie.ha.authentication.kerberos.principal" in oozie_site:
+    oozie_site['oozie.authentication.kerberos.principal'] = oozie_site['oozie.ha.authentication.kerberos.principal']
+    http_principal = oozie_site['oozie.authentication.kerberos.principal']
+
+  # If a user-supplied oozie.ha.authentication.kerberos.keytab property exists in oozie-site,
+  # use it to replace the existing oozie.authentication.kerberos.keytab value. This is to ensure
+  # that any special keytab file needed for HA is used rather than the Ambari-generated value
+  if "oozie.ha.authentication.kerberos.keytab" in oozie_site:
+    oozie_site['oozie.authentication.kerberos.keytab'] = oozie_site['oozie.ha.authentication.kerberos.keytab']
+
+  if stack_version_formatted and check_stack_feature(StackFeature.OOZIE_HOST_KERBEROS, stack_version_formatted):
+    #older versions of oozie have problems when using _HOST in principal
+    oozie_site['oozie.service.HadoopAccessorService.kerberos.principal'] = oozie_principal_with_host
+    oozie_site['oozie.authentication.kerberos.principal'] = http_principal.replace('_HOST', hostname)
+
+smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+oozie_keytab = default("/configurations/oozie-env/oozie_keytab", oozie_service_keytab)
+oozie_env_sh_template = config['configurations']['oozie-env']['content']
+
+oracle_driver_jar_name = "ojdbc6.jar"
+
+oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
+
+if credential_store_enabled:
+  if 'hadoop.security.credential.provider.path' in config['configurations']['oozie-site']:
+    cs_lib_path = config['configurations']['oozie-site']['credentialStoreClassPath']
+    java_home = config['hostLevelParams']['java_home']
+    alias = 'oozie.service.JPAService.jdbc.password'
+    provider_path = config['configurations']['oozie-site']['hadoop.security.credential.provider.path']
+    oozie_metastore_user_passwd = PasswordString(get_password_from_credential_store(alias, provider_path, cs_lib_path, java_home, jdk_location))
+  else:
+    raise Exception("hadoop.security.credential.provider.path property should be set")
+else:
+  oozie_metastore_user_passwd = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.password","")
+
+oozie_jdbc_connection_url = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.url", "")
+oozie_log_dir = config['configurations']['oozie-env']['oozie_log_dir']
+oozie_data_dir = config['configurations']['oozie-env']['oozie_data_dir']
+oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
+oozie_server_admin_port = config['configurations']['oozie-env']['oozie_admin_port']
+if 'export OOZIE_HTTPS_PORT' in oozie_env_sh_template or 'oozie.https.port' in config['configurations']['oozie-site'] or 'oozie.https.keystore.file' in config['configurations']['oozie-site'] or 'oozie.https.keystore.pass' in config['configurations']['oozie-site']:
+  oozie_secure = '-secure'
+else:
+  oozie_secure = ''
+
+https_port = None
+# try to get https port form oozie-env content
+for line in oozie_env_sh_template.splitlines():
+  result = re.match(r"export\s+OOZIE_HTTPS_PORT=(\d+)", line)
+  if result is not None:
+    https_port = result.group(1)
+# or from oozie-site.xml
+if https_port is None and 'oozie.https.port' in config['configurations']['oozie-site']:
+  https_port = config['configurations']['oozie-site']['oozie.https.port']
+
+oozie_base_url = config['configurations']['oozie-site']['oozie.base.url']
+
+service_check_job_name = default("/configurations/oozie-env/service_check_job_name", "no-op")
+
+# construct proper url for https
+if https_port is not None:
+  parsed_url = urlparse(oozie_base_url)
+  oozie_base_url = oozie_base_url.replace(parsed_url.scheme, "https")
+  if parsed_url.port is None:
+    oozie_base_url.replace(parsed_url.hostname, ":".join([parsed_url.hostname, str(https_port)]))
+  else:
+    oozie_base_url = oozie_base_url.replace(str(parsed_url.port), str(https_port))
+
+oozie_setup_sh_current = oozie_setup_sh
+
+hdfs_site = config['configurations']['hdfs-site']
+fs_root = config['configurations']['core-site']['fs.defaultFS']
+
+if stack_version_formatted and check_stack_feature(StackFeature.OOZIE_SETUP_SHARED_LIB, stack_version_formatted):
+  put_shared_lib_to_hdfs_cmd = format("{oozie_setup_sh} sharelib create -fs {fs_root} -locallib {oozie_shared_lib}")
+  # for older  
+else: 
+  put_shared_lib_to_hdfs_cmd = format("hadoop --config {hadoop_conf_dir} dfs -put {oozie_shared_lib} {oozie_hdfs_user_dir}")
+
+default_connectors_map = { "com.microsoft.sqlserver.jdbc.SQLServerDriver":"sqljdbc4.jar",
+                           "com.mysql.jdbc.Driver":"mysql-connector-java.jar",
+                           "org.postgresql.Driver":"postgresql-jdbc.jar",
+                           "oracle.jdbc.driver.OracleDriver":"ojdbc.jar",
+                           "sap.jdbc4.sqlanywhere.IDriver":"sajdbc4.jar"}
+
+jdbc_driver_name = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.driver", "")
+# NOT SURE THAT IT'S A GOOD IDEA TO USE PATH TO CLASS IN DRIVER, MAYBE IT WILL BE BETTER TO USE DB TYPE.
+# BECAUSE PATH TO CLASSES COULD BE CHANGED
+sqla_db_used = False
+previous_jdbc_jar_name = None
+if jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver":
+  jdbc_driver_jar = default("/hostLevelParams/custom_mssql_jdbc_name", None)
+  previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
+elif jdbc_driver_name == "com.mysql.jdbc.Driver":
+  jdbc_driver_jar = default("/hostLevelParams/custom_mysql_jdbc_name", None)
+  previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
+elif jdbc_driver_name == "org.postgresql.Driver":
+  jdbc_driver_jar = format("{oozie_home}/libserver/postgresql-9.0-801.jdbc4.jar")  #oozie using it's own postgres jdbc
+  previous_jdbc_jar_name = None
+elif jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+  jdbc_driver_jar = default("/hostLevelParams/custom_oracle_jdbc_name", None)
+  previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
+elif jdbc_driver_name == "sap.jdbc4.sqlanywhere.IDriver":
+  jdbc_driver_jar = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
+  previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
+  sqla_db_used = True
+else:
+  jdbc_driver_jar = ""
+  jdbc_symlink_name = ""
+  previous_jdbc_jar_name = None
+
+default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
+driver_curl_source = format("{jdk_location}/{jdbc_driver_jar}")
+downloaded_custom_connector = format("{tmp_dir}/{jdbc_driver_jar}")
+if jdbc_driver_name == "org.postgresql.Driver":
+  target = jdbc_driver_jar
+  previous_jdbc_jar = None
+else:
+  target = format("{oozie_libext_dir}/{jdbc_driver_jar}")
+  previous_jdbc_jar = format("{oozie_libext_dir}/{previous_jdbc_jar_name}")
+
+#constants for type2 jdbc
+jdbc_libs_dir = format("{oozie_libext_dir}/native/lib64")
+lib_dir_available = os.path.exists(jdbc_libs_dir)
+
+if sqla_db_used:
+  jars_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/java/*")
+  libs_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/native/lib64/*")
+  downloaded_custom_connector = format("{tmp_dir}/{jdbc_driver_jar}")
+
+hdfs_share_dir = format("{oozie_hdfs_user_dir}/share")
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
+has_falcon_host = not len(falcon_host)  == 0
+
+oozie_server_hostnames = default("/clusterHostInfo/oozie_server", [])
+oozie_server_hostnames = sorted(oozie_server_hostnames)
+
+oozie_log_maxhistory = default('configurations/oozie-log4j/oozie_log_maxhistory',720)
+
+#oozie-log4j.properties
+if (('oozie-log4j' in config['configurations']) and ('content' in config['configurations']['oozie-log4j'])):
+  log4j_props = config['configurations']['oozie-log4j']['content']
+else:
+  log4j_props = None
+
+oozie_hdfs_user_mode = 0775
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+dfs_type = default("/commandParams/dfs_type", "")
+
+
+########################################################
+############# Atlas related params #####################
+########################################################
+#region Atlas Hooks needed by Hive on Oozie
+hive_atlas_application_properties = default('/configurations/hive-atlas-application.properties', {})
+
+if has_atlas_in_cluster():
+  atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
+#endregion
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
+)
+
+is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
+
+# The logic for LZO also exists in HDFS' params.py
+io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
+lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
+
+all_lzo_packages = get_lzo_packages(stack_version_unformatted)
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_windows.py
new file mode 100644
index 0000000..1f939d4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/params_windows.py
@@ -0,0 +1,34 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.script import Script
+import os
+from status_params import *
+
+config = Script.get_config()
+
+hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
+stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"], ".."))
+oozie_root = os.environ['OOZIE_ROOT']
+oozie_home = os.environ['OOZIE_HOME']
+oozie_conf_dir = os.path.join(oozie_home,'conf')
+oozie_user = hadoop_user
+oozie_tmp_dir = "c:\\hadoop\\temp\\oozie"
+
+oozie_env_cmd_template = config['configurations']['oozie-env']['content']
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/service_check.py
new file mode 100644
index 0000000..ae7cb21
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/service_check.py
@@ -0,0 +1,140 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import glob
+
+from resource_management.core.resources.system import Execute
+from resource_management.core.resources import File
+from resource_management.core.source import StaticFile
+from resource_management.core.system import System
+from resource_management.libraries.functions import format
+from resource_management.libraries.script import Script
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+
+from resource_management.core.logger import Logger
+
+NO_DOCS_FOLDER_MESSAGE = "Cannot find {oozie_examples_regex}. Possible reason is that /etc/yum.conf contains" \
+" tsflags=nodocs which prevents this folder from being installed along with oozie-client package." \
+" If this is the case, please fix /etc/yum.conf and re-install the package."
+
+class OozieServiceCheck(Script):
+  pass
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class OozieServiceCheckDefault(OozieServiceCheck):
+
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    # on HDP1 this file is different
+    prepare_hdfs_file_name = 'prepareOozieHdfsDirectories.sh'
+    smoke_test_file_name = 'oozieSmoke2.sh'
+
+    OozieServiceCheckDefault.oozie_smoke_shell_file(smoke_test_file_name, prepare_hdfs_file_name)
+
+  @staticmethod
+  def oozie_smoke_shell_file(file_name, prepare_hdfs_file_name):
+    import params
+
+    File(format("{tmp_dir}/{file_name}"),
+         content=StaticFile(file_name),
+         mode=0755
+    )
+    File(format("{tmp_dir}/{prepare_hdfs_file_name}"),
+         content=StaticFile(prepare_hdfs_file_name),
+         mode=0755
+    )
+
+    os_family = System.get_instance().os_family
+    oozie_examples_dir_regex_matches = glob.glob(params.oozie_examples_regex)
+    if not oozie_examples_dir_regex_matches:
+      raise Fail(format(NO_DOCS_FOLDER_MESSAGE))
+    oozie_examples_dir = oozie_examples_dir_regex_matches[0]
+
+    Execute((format("{tmp_dir}/{prepare_hdfs_file_name}"), params.conf_dir, oozie_examples_dir, params.hadoop_conf_dir, params.yarn_resourcemanager_address, params.fs_root, params.service_check_queue_name, params.service_check_job_name),
+            tries=3,
+            try_sleep=5,
+            logoutput=True
+    )
+
+    params.HdfsResource(format("/user/{smokeuser}"),
+        type="directory",
+        action="create_on_execute",
+        owner=params.smokeuser,
+        mode=params.smoke_hdfs_user_mode,
+        )
+
+    examples_dir = format('/user/{smokeuser}/examples')
+    params.HdfsResource(examples_dir,
+                        action = "delete_on_execute",
+                        type = "directory"
+    )
+    params.HdfsResource(examples_dir,
+      action = "create_on_execute",
+      type = "directory",
+      source = format("{oozie_examples_dir}/examples"),
+      owner = params.smokeuser,
+      group = params.user_group
+    )
+
+    input_data_dir = format('/user/{smokeuser}/input-data')
+    params.HdfsResource(input_data_dir,
+                        action = "delete_on_execute",
+                        type = "directory"
+    )
+    params.HdfsResource(input_data_dir,
+      action = "create_on_execute",
+      type = "directory",
+      source = format("{oozie_examples_dir}/examples/input-data"),
+      owner = params.smokeuser,
+      group = params.user_group
+    )
+    params.HdfsResource(None, action="execute")
+
+    if params.security_enabled:
+      sh_cmd = format(
+        "{tmp_dir}/{file_name} {os_family} {oozie_lib_dir} {conf_dir} {oozie_bin_dir} {oozie_base_url} {oozie_examples_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {service_check_job_name} {security_enabled} {smokeuser_keytab} {kinit_path_local} {smokeuser_principal}")
+    else:
+      sh_cmd = format(
+        "{tmp_dir}/{file_name} {os_family} {oozie_lib_dir} {conf_dir} {oozie_bin_dir} {oozie_base_url} {oozie_examples_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {service_check_job_name} {security_enabled}")
+
+    Execute(sh_cmd,
+            path=params.execute_path,
+            tries=3,
+            try_sleep=5,
+            logoutput=True
+    )
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class OozieServiceCheckWindows(OozieServiceCheck):
+
+  def service_check(self, env):
+    import params
+
+    env.set_params(params)
+    smoke_cmd = os.path.join(params.stack_root, "Run-SmokeTests.cmd")
+    service = "OOZIE"
+    Execute(format("cmd /C {smoke_cmd} {service}"), logoutput=True)
+
+if __name__ == "__main__":
+  OozieServiceCheck().execute()
+
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/status_params.py
new file mode 100644
index 0000000..ce990cf
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/scripts/status_params.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons.os_check import OSCheck
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.version import format_stack_version
+
+# a map of the Ambari role to the component name
+# for use with <stack-root>/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'OOZIE_SERVER' : 'oozie-server',
+  'OOZIE_CLIENT' : 'oozie-client',
+  'OOZIE_SERVICE_CHECK' : 'oozie-client',
+  'ru_execute_tasks' : 'oozie-server'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "OOZIE_CLIENT")
+
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+if OSCheck.is_windows_family():
+  # windows service mapping
+  oozie_server_win_service_name = "oozieservice"
+else:
+  oozie_pid_dir = config['configurations']['oozie-env']['oozie_pid_dir']
+  pid_file = format("{oozie_pid_dir}/oozie.pid")
+
+  security_enabled = config['configurations']['cluster-env']['security_enabled']
+  kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+
+  conf_dir = "/etc/oozie/conf"
+  if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+    conf_dir = format("{stack_root}/current/{component_directory}/conf")
+
+  tmp_dir = Script.get_tmp_dir()
+  oozie_user = config['configurations']['oozie-env']['oozie_user']
+  hostname = config["hostname"]
+
+stack_name = default("/hostLevelParams/stack_name", None)
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/adminusers.txt.j2 b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/adminusers.txt.j2
new file mode 100644
index 0000000..2a0f7b2
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/adminusers.txt.j2
@@ -0,0 +1,28 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Users should be set using following rules:
+#
+#     One user name per line
+#     Empty lines and lines starting with '#' are ignored
+
+{% if oozie_admin_users %}
+{% for oozie_admin_user in oozie_admin_users.split(',') %}
+{{oozie_admin_user|trim}}
+{% endfor %}
+{% endif %}
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/input.config-oozie.json.j2 b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/input.config-oozie.json.j2
new file mode 100644
index 0000000..4a54f74
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/input.config-oozie.json.j2
@@ -0,0 +1,48 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"oozie_app",
+      "rowtype":"service",
+      "path":"{{default('/configurations/oozie-env/oozie_log_dir', '/var/log/oozie')}}/oozie.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "oozie_app"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{DATA:logger_name}:%{INT:line_number}%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/oozie-log4j.properties.j2 b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/oozie-log4j.properties.j2
new file mode 100644
index 0000000..e39428f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/oozie-log4j.properties.j2
@@ -0,0 +1,93 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time
+# XLogService sets its value to '${oozie.home}/logs'
+
+log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
+log4j.appender.oozie.Append=true
+log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n
+log4j.appender.oozie.RollingPolicy.FileNamePattern=${log4j.appender.oozie.File}-%d{yyyy-MM-dd}
+log4j.appender.oozie.DatePattern='.'yyyy-MM-dd
+
+log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
+log4j.appender.oozieops.Append=true
+log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
+log4j.appender.oozieinstrumentation.Append=true
+log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
+log4j.appender.oozieaudit.Append=true
+log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
+log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
+log4j.appender.openjpa.Append=true
+log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
+log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.logger.openjpa=INFO, openjpa
+log4j.logger.oozieops=INFO, oozieops
+log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
+log4j.logger.oozieaudit=ALL, oozieaudit
+log4j.logger.org.apache.oozie=INFO, oozie
+log4j.logger.org.apache.hadoop=WARN, oozie
+log4j.logger.org.mortbay=WARN, oozie
+log4j.logger.org.hsqldb=WARN, oozie
+log4j.logger.org.apache.hadoop.security.authentication.server=INFO, oozie
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/oozie.conf.j2 b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/oozie.conf.j2
new file mode 100644
index 0000000..1f99e49
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/oozie.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{oozie_user}}   - nofile   {{oozie_user_nofile_limit}}
+{{oozie_user}}   - nproc    {{oozie_user_nproc_limit}}
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/zkmigrator_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/zkmigrator_jaas.conf.j2
new file mode 100644
index 0000000..fbc0ce5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/package/templates/zkmigrator_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+  com.sun.security.auth.module.Krb5LoginModule required
+  useKeyTab=true
+  storeKey=true
+  useTicketCache=false
+  keyTab="{{oozie_keytab}}"
+  principal="{{oozie_principal_with_host}}";
+};
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/quicklinks/quicklinks.json
new file mode 100644
index 0000000..81e7cbe
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/quicklinks/quicklinks.json
@@ -0,0 +1,45 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"https",
+      "checks":[
+        {
+          "property":"oozie.https.port",
+          "desired":"EXISTS",
+          "site":"oozie-site"
+        },
+        {
+          "property":"oozie.https.keystore.file",
+          "desired":"EXISTS",
+          "site":"oozie-site"
+        },
+        {
+          "property":"oozie.https.keystore.pass",
+          "desired":"EXISTS",
+          "site":"oozie-site"
+        }
+      ]
+    },
+
+    "links": [
+      {
+        "name": "oozie_server_ui",
+        "component_name": "OOZIE_SERVER",
+        "label": "Oozie Web UI",
+        "requires_user_name": "true",
+        "url":"%@://%@:%@/oozie?user.name=%@",
+        "port":{
+          "http_property": "oozie.base.url",
+          "http_default_port": "11000",
+          "https_property": "oozie.base.url",
+          "https_default_port": "11443",
+          "regex": "\\w*:(\\d+)",
+          "site": "oozie-site"
+        }
+      }
+    ]
+  }
+}
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/role_command_order.json b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/role_command_order.json
new file mode 100644
index 0000000..769e917
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/role_command_order.json
@@ -0,0 +1,9 @@
+{
+  "general_deps" : {
+    "_comment" : "dependencies for OOZIE",
+    "OOZIE_SERVER-START": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "OOZIE_SERVER-RESTART": ["NAMENODE-RESTART"],
+    "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START", "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK"]
+
+  }
+}
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/themes/theme.json b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/themes/theme.json
new file mode 100644
index 0000000..5f325f7
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.3.0/themes/theme.json
@@ -0,0 +1,116 @@
+{
+  "name": "default",
+  "description": "Default theme for Oozie service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "oozie-database",
+            "display-name": "Database",
+            "layout": {
+              "tab-rows": 1,
+              "tab-columns": 1,
+              "sections": [
+                {
+                  "name": "oozie-database-configurations",
+                  "display-name": "Database Configurations",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "0",
+                  "column-span": "0",
+                  "section-columns": "2",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "oozie-database-configurations-col-1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "oozie-database-configurations-col-2",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "oozie-env/oozie_database",
+          "subsection-name": "oozie-database-configurations-col-1"
+        },
+        {
+          "config": "oozie-site/oozie.db.schema.name",
+          "subsection-name": "oozie-database-configurations-col-1"
+        },
+        {
+          "config": "oozie-site/oozie.service.JPAService.jdbc.username",
+          "subsection-name": "oozie-database-configurations-col-1"
+        },
+        {
+          "config": "oozie-site/oozie.service.JPAService.jdbc.url",
+          "subsection-name": "oozie-database-configurations-col-1"
+        },
+        {
+          "config": "oozie-site/oozie.service.JPAService.jdbc.driver",
+          "subsection-name": "oozie-database-configurations-col-2"
+        },
+        {
+          "config": "oozie-site/oozie.service.JPAService.jdbc.password",
+          "subsection-name": "oozie-database-configurations-col-2"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "oozie-env/oozie_database",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "oozie-site/oozie.service.JPAService.jdbc.username",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "oozie-site/oozie.service.JPAService.jdbc.password",
+        "widget": {
+          "type": "password"
+        }
+      },
+      {
+        "config": "oozie-site/oozie.service.JPAService.jdbc.driver",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "oozie-site/oozie.service.JPAService.jdbc.url",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "oozie-site/oozie.db.schema.name",
+        "widget": {
+          "type": "text-field"
+        }
+      }
+    ]
+  }
+}
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/ranger-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/ranger-logsearch-conf.xml
deleted file mode 100644
index fc26336..0000000
--- a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/ranger-logsearch-conf.xml
+++ /dev/null
@@ -1,111 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>Ranger</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>RANGER_SERVER:ranger_admin,ranger_dbpatch;RANGER_USERSYNC:ranger_usersync;</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder Config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-  "input":[
-    {
-      "type":"ranger_admin",
-      "rowtype":"service",
-      "path":"{{default('/configurations/ranger-env/ranger_admin_log_dir', '/var/log/ranger/admin')}}/xa_portal.log"
-    },
-    {
-      "type":"ranger_dbpatch",
-      "is_enabled":"true",
-      "path":"{{default('/configurations/ranger-env/ranger_admin_log_dir', '/var/log/ranger/admin')}}/ranger_db_patch.log"
-    },
-    {
-      "type":"ranger_usersync",
-      "rowtype":"service",
-      "path":"{{default('/configurations/ranger-env/ranger_usersync_log_dir', '/var/log/ranger/usersync')}}/usersync.log"
-    }
-   ],
-  "filter":[
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "ranger_admin",
-            "ranger_dbpatch"
-          ]
-         }
-       },
-      "log4j_format":"%d [%t] %-5p %C{6} (%F:%L) - %m%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{INT:line_number}\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       }
-     },
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "ranger_usersync"
-          ]
-         }
-       },
-      "log4j_format":"%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n",
-      "multiline_pattern":"^(%{USER_SYNC_DATE:logtime})",
-      "message_pattern":"(?m)^%{USER_SYNC_DATE:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"dd MMM yyyy HH:mm:ss"
-          }
-         }
-       }
-     }
-   ]
- }
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py
index e56a705..26e6578 100644
--- a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py
@@ -35,6 +35,7 @@
 from resource_management.core.shell import as_sudo
 from resource_management.libraries.functions import solr_cloud_util
 from ambari_commons.constants import UPGRADE_TYPE_NON_ROLLING, UPGRADE_TYPE_ROLLING
+from resource_management.core.exceptions import ExecutionFailed
 
 # This file contains functions used for setup/configure of Ranger Admin and Ranger Usersync.
 # The design is to mimic what is done by the setup.sh script bundled by Ranger component currently.
@@ -684,70 +685,72 @@
         content=Template("ranger_solr_jaas_conf.j2"),
         owner=params.unix_user
       )
+  try:
+    check_znode()
 
-  check_znode()
+    if params.stack_supports_ranger_solr_configs:
+      Logger.info('Solr configrations supported,creating solr-configurations.')
+      File(format("{ranger_solr_conf}/solrconfig.xml"),
+           content=InlineTemplate(params.ranger_solr_config_content),
+           owner=params.unix_user,
+           group=params.unix_group,
+           mode=0644
+      )
 
-  if params.stack_supports_ranger_solr_configs:
-    Logger.info('Solr configrations supported,creating solr-configurations.')
-    File(format("{ranger_solr_conf}/solrconfig.xml"),
-         content=InlineTemplate(params.ranger_solr_config_content),
-         owner=params.unix_user,
-         group=params.unix_group,
-         mode=0644
-    )
+      solr_cloud_util.upload_configuration_to_zk(
+        zookeeper_quorum = params.zookeeper_quorum,
+        solr_znode = params.solr_znode,
+        config_set = params.ranger_solr_config_set,
+        config_set_dir = params.ranger_solr_conf,
+        tmp_dir = params.tmp_dir,
+        java64_home = params.java_home,
+        solrconfig_content = InlineTemplate(params.ranger_solr_config_content),
+        jaas_file=params.solr_jaas_file,
+        retry=30, interval=5
+      )
 
-    solr_cloud_util.upload_configuration_to_zk(
+    else:
+      Logger.info('Solr configrations not supported, skipping solr-configurations.')
+      solr_cloud_util.upload_configuration_to_zk(
+        zookeeper_quorum = params.zookeeper_quorum,
+        solr_znode = params.solr_znode,
+        config_set = params.ranger_solr_config_set,
+        config_set_dir = params.ranger_solr_conf,
+        tmp_dir = params.tmp_dir,
+        java64_home = params.java_home,
+        jaas_file=params.solr_jaas_file,
+        retry=30, interval=5)
+
+    if params.security_enabled and params.has_infra_solr \
+      and not params.is_external_solrCloud_enabled and params.stack_supports_ranger_kerberos:
+
+      solr_cloud_util.add_solr_roles(params.config,
+                                     roles = [params.infra_solr_role_ranger_admin, params.infra_solr_role_ranger_audit, params.infra_solr_role_dev],
+                                     new_service_principals = [params.ranger_admin_jaas_principal])
+      service_default_principals_map = [('hdfs', 'nn'), ('hbase', 'hbase'), ('hive', 'hive'), ('kafka', 'kafka'), ('kms', 'rangerkms'),
+                                                    ('knox', 'knox'), ('nifi', 'nifi'), ('storm', 'storm'), ('yanr', 'yarn')]
+      service_principals = get_ranger_plugin_principals(service_default_principals_map)
+      solr_cloud_util.add_solr_roles(params.config,
+                                     roles = [params.infra_solr_role_ranger_audit, params.infra_solr_role_dev],
+                                     new_service_principals = service_principals)
+
+
+    solr_cloud_util.create_collection(
       zookeeper_quorum = params.zookeeper_quorum,
       solr_znode = params.solr_znode,
+      collection = params.ranger_solr_collection_name,
       config_set = params.ranger_solr_config_set,
-      config_set_dir = params.ranger_solr_conf,
-      tmp_dir = params.tmp_dir,
       java64_home = params.java_home,
-      solrconfig_content = InlineTemplate(params.ranger_solr_config_content),
-      jaas_file=params.solr_jaas_file,
-      retry=30, interval=5
-    )
+      shards = params.ranger_solr_shards,
+      replication_factor = int(params.replication_factor),
+      jaas_file = params.solr_jaas_file)
 
-  else:
-    Logger.info('Solr configrations not supported, skipping solr-configurations.')
-    solr_cloud_util.upload_configuration_to_zk(
-      zookeeper_quorum = params.zookeeper_quorum,
-      solr_znode = params.solr_znode,
-      config_set = params.ranger_solr_config_set,
-      config_set_dir = params.ranger_solr_conf,
-      tmp_dir = params.tmp_dir,
-      java64_home = params.java_home,
-      jaas_file=params.solr_jaas_file,
-      retry=30, interval=5)
-
-  if params.security_enabled and params.has_infra_solr \
-    and not params.is_external_solrCloud_enabled and params.stack_supports_ranger_kerberos:
-
-    solr_cloud_util.add_solr_roles(params.config,
-                                   roles = [params.infra_solr_role_ranger_admin, params.infra_solr_role_ranger_audit, params.infra_solr_role_dev],
-                                   new_service_principals = [params.ranger_admin_jaas_principal])
-    service_default_principals_map = [('hdfs', 'nn'), ('hbase', 'hbase'), ('hive', 'hive'), ('kafka', 'kafka'), ('kms', 'rangerkms'),
-                                                  ('knox', 'knox'), ('nifi', 'nifi'), ('storm', 'storm'), ('yanr', 'yarn')]
-    service_principals = get_ranger_plugin_principals(service_default_principals_map)
-    solr_cloud_util.add_solr_roles(params.config,
-                                   roles = [params.infra_solr_role_ranger_audit, params.infra_solr_role_dev],
-                                   new_service_principals = service_principals)
-
-
-  solr_cloud_util.create_collection(
-    zookeeper_quorum = params.zookeeper_quorum,
-    solr_znode = params.solr_znode,
-    collection = params.ranger_solr_collection_name,
-    config_set = params.ranger_solr_config_set,
-    java64_home = params.java_home,
-    shards = params.ranger_solr_shards,
-    replication_factor = int(params.replication_factor),
-    jaas_file = params.solr_jaas_file)
-
-  if params.security_enabled and params.has_infra_solr \
-    and not params.is_external_solrCloud_enabled and params.stack_supports_ranger_kerberos:
-    secure_znode(format('{solr_znode}/configs/{ranger_solr_config_set}'), params.solr_jaas_file)
-    secure_znode(format('{solr_znode}/collections/{ranger_solr_collection_name}'), params.solr_jaas_file)
+    if params.security_enabled and params.has_infra_solr \
+      and not params.is_external_solrCloud_enabled and params.stack_supports_ranger_kerberos:
+      secure_znode(format('{solr_znode}/configs/{ranger_solr_config_set}'), params.solr_jaas_file)
+      secure_znode(format('{solr_znode}/collections/{ranger_solr_collection_name}'), params.solr_jaas_file)
+  except ExecutionFailed as execution_exception:
+    Logger.error('Error when configuring Solr for Ranger, Kindly check Solr/Zookeeper services to be up and running:\n {0}'.format(execution_exception))
 
 def setup_ranger_admin_passwd_change():
   import params
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/templates/input.config-ranger.json.j2 b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/templates/input.config-ranger.json.j2
new file mode 100644
index 0000000..6c5bb1f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/templates/input.config-ranger.json.j2
@@ -0,0 +1,79 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"ranger_admin",
+      "rowtype":"service",
+      "path":"{{default('/configurations/ranger-env/ranger_admin_log_dir', '/var/log/ranger/admin')}}/xa_portal.log"
+    },
+    {
+      "type":"ranger_dbpatch",
+      "is_enabled":"true",
+      "path":"{{default('/configurations/ranger-env/ranger_admin_log_dir', '/var/log/ranger/admin')}}/ranger_db_patch.log"
+    },
+    {
+      "type":"ranger_usersync",
+      "rowtype":"service",
+      "path":"{{default('/configurations/ranger-env/ranger_usersync_log_dir', '/var/log/ranger/usersync')}}/usersync.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "ranger_admin",
+            "ranger_dbpatch"
+          ]
+        }
+      },
+      "log4j_format":"%d [%t] %-5p %C{6} (%F:%L) - %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{INT:line_number}\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    },
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "ranger_usersync"
+          ]
+        }
+      },
+      "log4j_format":"%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n",
+      "multiline_pattern":"^(%{USER_SYNC_DATE:logtime})",
+      "message_pattern":"(?m)^%{USER_SYNC_DATE:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"dd MMM yyyy HH:mm:ss"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.5.0/role_command_order.json b/ambari-server/src/main/resources/common-services/RANGER/0.5.0/role_command_order.json
index df62dfd..073a077 100644
--- a/ambari-server/src/main/resources/common-services/RANGER/0.5.0/role_command_order.json
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.5.0/role_command_order.json
@@ -1,6 +1,6 @@
 {
   "general_deps" : {
     "_comment" : "dependencies for RANGER",
-    "RANGER_USERSYNC-START" : ["RANGER_ADMIN-START", "RANGER_KMS_SERVER-START"]
+    "RANGER_USERSYNC-START" : ["RANGER_ADMIN-START"]
   }
 }
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/alerts.json b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/alerts.json
new file mode 100644
index 0000000..ab473a8
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/alerts.json
@@ -0,0 +1,76 @@
+{
+  "RANGER": {
+    "service": [],
+    "RANGER_ADMIN": [
+      {
+        "name": "ranger_admin_process",
+        "label": "Ranger Admin Process",
+        "description": "This host-level alert is triggered if the Ranger Admin Web UI is unreachable.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "WEB",
+          "uri": {
+              "http": "{{admin-properties/policymgr_external_url}}/login.jsp",
+              "https": "{{admin-properties/policymgr_external_url}}/login.jsp",
+              "kerberos_keytab": "{{cluster-env/smokeuser_keytab}}",
+              "kerberos_principal": "{{cluster-env/smokeuser_principal_name}}",
+              "https_property": "{{ranger-admin-site/ranger.service.https.attrib.ssl.enabled}}",
+              "https_property_value": "true",
+              "connection_timeout": 5.0
+            },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning": {
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      },
+      {
+        "name": "ranger_admin_password_check",
+        "label": "Ranger Admin password check",
+        "description": "This alert is used to ensure that the Ranger Admin password in Ambari is correct.",
+        "interval": 30,
+        "scope": "ANY",
+        "source": {
+          "type": "SCRIPT",
+          "path": "RANGER/0.4.0/package/alerts/alert_ranger_admin_passwd_check.py",
+          "parameters": []
+        }
+      }
+    ],
+    "RANGER_USERSYNC": [
+      {
+        "name": "ranger_usersync_process",
+        "label": "Ranger Usersync Process",
+        "description": "This host-level alert is triggered if the Ranger Usersync cannot be determined to be up.",
+        "interval": 1,
+        "scope": "HOST",
+        "source": {
+          "type": "PORT",
+          "uri": "{{ranger-ugsync-site/ranger.usersync.port}}",
+          "default_port": 5151,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/admin-log4j.xml b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/admin-log4j.xml
new file mode 100644
index 0000000..fbbfac7
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/admin-log4j.xml
@@ -0,0 +1,132 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="false">
+  <property>
+    <name>ranger_xa_log_maxfilesize</name>
+    <value>256</value>
+   <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>Ranger Log: backup file size</display-name>
+    <value-attributes>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+   </property>
+   <property>
+    <name>ranger_xa_log_maxbackupindex</name>
+    <value>20</value>
+    <description>The number of backup files</description>
+    <display-name>Ranger Log: # of backup files</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>admin-log4j template</display-name>
+    <description>admin-log4j.properties</description>
+    <value>
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+log4j.rootLogger = warn,xa_log_appender
+
+
+# xa_logger
+log4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.xa_log_appender.file=${logdir}/xa_portal.log
+log4j.appender.xa_log_appender.datePattern='.'yyyy-MM-dd
+log4j.appender.xa_log_appender.append=true
+log4j.appender.xa_log_appender.layout=org.apache.log4j.PatternLayout
+log4j.appender.xa_log_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n
+log4j.appender.xa_log_appender.MaxFileSize={{ranger_xa_log_maxfilesize}}MB
+log4j.appender.xa_log_appender.MaxBackupIndex={{ranger_xa_log_maxbackupindex}}
+
+# xa_log_appender : category and additivity
+log4j.category.org.springframework=warn,xa_log_appender
+log4j.additivity.org.springframework=false
+
+log4j.category.org.apache.ranger=info,xa_log_appender
+log4j.additivity.org.apache.ranger=false
+
+log4j.category.xa=info,xa_log_appender
+log4j.additivity.xa=false
+
+# perf_logger
+log4j.appender.perf_appender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.perf_appender.file=${logdir}/ranger_admin_perf.log
+log4j.appender.perf_appender.datePattern='.'yyyy-MM-dd
+log4j.appender.perf_appender.append=true
+log4j.appender.perf_appender.layout=org.apache.log4j.PatternLayout
+log4j.appender.perf_appender.layout.ConversionPattern=%d [%t] %m%n
+
+
+# sql_appender
+log4j.appender.sql_appender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.sql_appender.file=${logdir}/xa_portal_sql.log
+log4j.appender.sql_appender.datePattern='.'yyyy-MM-dd
+log4j.appender.sql_appender.append=true
+log4j.appender.sql_appender.layout=org.apache.log4j.PatternLayout
+log4j.appender.sql_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n
+
+# sql_appender : category and additivity
+log4j.category.org.hibernate.SQL=warn,sql_appender
+log4j.additivity.org.hibernate.SQL=false
+
+log4j.category.jdbc.sqlonly=fatal,sql_appender
+log4j.additivity.jdbc.sqlonly=false
+
+log4j.category.jdbc.sqltiming=warn,sql_appender
+log4j.additivity.jdbc.sqltiming=false
+
+log4j.category.jdbc.audit=fatal,sql_appender
+log4j.additivity.jdbc.audit=false
+
+log4j.category.jdbc.resultset=fatal,sql_appender
+log4j.additivity.jdbc.resultset=false
+
+log4j.category.jdbc.connection=fatal,sql_appender
+log4j.additivity.jdbc.connection=false
+        </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/admin-properties.xml b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/admin-properties.xml
new file mode 100644
index 0000000..1d73087
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/admin-properties.xml
@@ -0,0 +1,163 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false">
+
+
+  <property>
+    <name>SQL_CONNECTOR_JAR</name>
+    <value>{{driver_curl_target}}</value>
+    <display-name>Location of Sql Connector Jar</display-name>
+    <description>Location of DB client library (please check the location of the jar file)</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>admin-properties</type>
+        <name>DB_FLAVOR</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false" update="false"/>
+  </property>
+  <property>
+    <name>db_root_user</name>
+    <value>root</value>
+    <display-name>Database Administrator (DBA) username</display-name>
+    <description>Database admin user. This user should have DBA permission to create the Ranger Database and Ranger Database User</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property require-input="true">
+    <name>db_root_password</name>
+    <value/>
+    <property-type>PASSWORD</property-type>
+    <display-name>Database Administrator (DBA) password</display-name>
+    <description>Database password for the database admin username</description>
+    <value-attributes>
+      <type>password</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>db_host</name>
+    <value/>
+    <display-name>Ranger DB host</display-name>
+    <description>Database host</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>db_name</name>
+    <value>ranger</value>
+    <display-name>Ranger DB name</display-name>
+    <description>Database name</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>db_user</name>
+    <value>rangeradmin</value>
+    <display-name>Ranger DB username</display-name>
+    <description>Database username used for the Ranger schema</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property require-input="true">
+    <name>db_password</name>
+    <value/>
+    <property-type>PASSWORD</property-type>
+    <display-name>Ranger DB password</display-name>
+    <description>Database password for the Ranger schema</description>
+    <value-attributes>
+      <type>password</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>DB_FLAVOR</name>
+    <value>MYSQL</value>
+    <display-name>DB FLAVOR</display-name>
+    <description>The database type to be used (mysql/oracle)</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>MYSQL</value>
+          <label>MYSQL</label>
+        </entry>
+        <entry>
+          <value>ORACLE</value>
+          <label>ORACLE</label>
+        </entry>
+        <entry>
+          <value>POSTGRES</value>
+          <label>POSTGRES</label>
+        </entry>
+        <entry>
+          <value>MSSQL</value>
+          <label>MSSQL</label>
+        </entry>
+        <entry>
+          <value>SQLA</value>
+          <label>SQL Anywhere</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>policymgr_external_url</name>
+    <value/>
+    <display-name>External URL</display-name>
+    <description>Policy Manager external url eg: http://RANGER_HOST:6080</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.service.http.enabled</name>
+      </property>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.service.http.port</name>
+      </property>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.service.https.port</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/atlas-tagsync-ssl.xml b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/atlas-tagsync-ssl.xml
new file mode 100644
index 0000000..d43c010
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/atlas-tagsync-ssl.xml
@@ -0,0 +1,72 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore</name>
+    <value>/etc/security/serverKeys/atlas-tagsync-keystore.jks</value>
+    <description>Java Keystore files</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.password</name>
+    <value>myKeyFilePassword</value>
+    <property-type>PASSWORD</property-type>
+    <description>password for keystore</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore</name>
+    <value>/etc/security/serverKeys/atlas-tagsync-mytruststore.jks</value>
+    <description>java truststore file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.password</name>
+    <value>changeit</value>
+    <property-type>PASSWORD</property-type>
+    <description>java truststore password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
+    <value>jceks://file{{atlas_tagsync_credential_file}}</value>
+    <description>java keystore credential file</description>
+    <on-ambari-upgrade add="false" />
+  </property>
+
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
+    <value>jceks://file{{atlas_tagsync_credential_file}}</value>
+    <description>java truststore credential file</description>
+    <on-ambari-upgrade add="false" />
+  </property>
+
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/ranger-admin-site.xml b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/ranger-admin-site.xml
new file mode 100644
index 0000000..a9153f8
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/ranger-admin-site.xml
@@ -0,0 +1,785 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration supports_final="true">
+  <property>
+    <name>ranger.service.host</name>
+    <value>{{ranger_host}}</value>
+    <description>Host where ranger service to be installed</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.service.http.enabled</name>
+    <value>true</value>
+    <display-name>HTTP enabled</display-name>
+    <description>Enable HTTP</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.service.http.port</name>
+    <value>6080</value>
+    <description>HTTP port</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.service.https.port</name>
+    <value>6182</value>
+    <description>HTTPS port (if SSL is enabled)</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.service.https.attrib.ssl.enabled</name>
+    <value>false</value>
+    <description>true/false, set to true if using SSL</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.service.https.attrib.clientAuth</name>
+    <value>want</value>
+    <description>Needs to be set to want for two way SSL</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.service.https.attrib.keystore.keyalias</name>
+    <value>rangeradmin</value>
+    <description>Alias for Ranger Admin key in keystore</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.service.https.attrib.keystore.pass</name>
+    <value>xasecure</value>
+    <property-type>PASSWORD</property-type>
+    <description>Password for keystore</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.https.attrib.keystore.file</name>
+    <value>/etc/ranger/admin/conf/ranger-admin-keystore.jks</value>
+    <description>Ranger admin keystore (specify full path)</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.externalurl</name>
+    <value>{{ranger_external_url}}</value>
+    <display-name>External URL</display-name>
+    <description>URL to be used by clients to access ranger admin</description>
+    <value-attributes>
+      <visible>false</visible>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.jpa.jdbc.driver</name>
+    <value>com.mysql.jdbc.Driver</value>
+    <display-name>Driver class name for a JDBC Ranger database</display-name>
+    <description>JDBC driver class name. Example: For MySQL / MariaDB: com.mysql.jdbc.Driver, For Oracle: oracle.jdbc.OracleDriver</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>admin-properties</type>
+        <name>DB_FLAVOR</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.jpa.jdbc.url</name>
+    <value>jdbc:mysql://localhost</value>
+    <display-name>JDBC connect string for a Ranger database</display-name>
+    <description>JDBC connect string</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>admin-properties</type>
+        <name>DB_FLAVOR</name>
+      </property>
+      <property>
+        <type>admin-properties</type>
+        <name>db_host</name>
+      </property>
+      <property>
+        <type>admin-properties</type>
+        <name>db_name</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.jpa.jdbc.user</name>
+    <value>{{ranger_db_user}}</value>
+    <description>JDBC user</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.jpa.jdbc.password</name>
+    <value>_</value>
+    <property-type>PASSWORD</property-type>
+    <description>JDBC password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.jpa.jdbc.credential.alias</name>
+    <value>rangeradmin</value>
+    <description>Alias name for storing JDBC password</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.credential.provider.path</name>
+    <value>/etc/ranger/admin/rangeradmin.jceks</value>
+    <description>File for credential store, provide full file path</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.audit.source.type</name>
+    <value>solr</value>
+    <description>db or solr, based on the audit destination used</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.solr</name>
+      </property>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.db</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.audit.solr.urls</name>
+    <value/>
+    <description>Solr url for audit. Example: http://solr_host:6083/solr/ranger_audits</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.authentication.method</name>
+    <value>UNIX</value>
+    <display-name>Authentication method</display-name>
+    <description>Ranger admin Authentication - UNIX/PAM/LDAP/AD/NONE</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-ugsync-site</type>
+        <name>ranger.usersync.source.impl.class</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ldap.url</name>
+    <display-name>​LDAP URL</display-name>
+    <value>{{ranger_ug_ldap_url}}</value>
+    <description>LDAP Server URL, only used if Authentication method is LDAP</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ldap.user.dnpattern</name>
+    <value>uid={0},ou=users,dc=xasecure,dc=net</value>
+    <description>LDAP user DN, only used if Authentication method is LDAP</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ldap.group.searchbase</name>
+    <display-name>Group Search Base</display-name>
+    <value>{{ranger_ug_ldap_group_searchbase}}</value>
+    <description>LDAP group searchbase, only used if Authentication method is LDAP</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ldap.group.searchfilter</name>
+    <display-name>Group Search Filter</display-name>
+    <value>{{ranger_ug_ldap_group_searchfilter}}</value>
+    <description>LDAP group search filter, only used if Authentication method is LDAP</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>ranger.ldap.group.roleattribute</name>
+    <value>cn</value>
+    <description>LDAP group role attribute, only used if Authentication method is LDAP</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ldap.base.dn</name>
+    <value>dc=example,dc=com</value>
+    <description>The Distinguished Name (DN) of the starting point for directory server searches.</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ldap.bind.dn</name>
+    <display-name>Bind User</display-name>
+    <value>{{ranger_ug_ldap_bind_dn}}</value>
+    <description>Full distinguished name (DN), including common name (CN), of an LDAP user account that has privileges to search for users. </description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ldap.bind.password</name>
+    <display-name>​Bind User Password</display-name>
+    <value>{{ranger_usersync_ldap_ldapbindpassword}}</value>
+    <property-type>PASSWORD</property-type>
+    <description>Password for the account that can search for users</description>
+    <value-attributes>
+      <type>password</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ldap.referral</name>
+    <value>ignore</value>
+    <description>Set to follow if multiple LDAP servers are configured to return continuation references for results. Set to ignore (default) if no referrals should be followed. Possible values are follow|throw|ignore</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ldap.ad.domain</name>
+    <display-name>Domain Name (Only for AD)</display-name>
+    <value/>
+    <description>AD domain, only used if Authentication method is AD</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ldap.ad.url</name>
+    <value>{{ranger_ug_ldap_url}}</value>
+    <description>AD URL, only used if Authentication method is AD</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ldap.ad.base.dn</name>
+    <value>dc=example,dc=com</value>
+    <description>The Distinguished Name (DN) of the starting point for directory server searches.</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ldap.ad.bind.dn</name>
+    <value>{{ranger_ug_ldap_bind_dn}}</value>
+    <description>Full distinguished name (DN), including common name (CN), of an LDAP user account that has privileges to search for users.</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ldap.ad.bind.password</name>
+    <value>{{ranger_usersync_ldap_ldapbindpassword}}</value>
+    <property-type>PASSWORD</property-type>
+    <description>Password for the account that can search for users</description>
+    <value-attributes>
+      <type>password</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>ranger.ldap.ad.referral</name>
+    <value>ignore</value>
+    <description>Set to follow if multiple LDAP servers are configured to return continuation references for results. Set to ignore (default) if no referrals should be followed. Possible values are follow|throw|ignore</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+
+
+
+
+  <property>
+    <name>ranger.unixauth.remote.login.enabled</name>
+    <value>true</value>
+    <display-name>Allow remote Login</display-name>
+    <description>Remote login enabled? - only used if Authentication method is UNIX</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+      <type>value-list</type>
+      <overridable>false</overridable>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Yes</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>No</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.unixauth.service.hostname</name>
+    <value>{{ugsync_host}}</value>
+    <description>Host where unix authentication service is running - only used if Authentication method is UNIX</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.unixauth.service.port</name>
+    <value>5151</value>
+    <description>Port for unix authentication service - only used if Authentication method is UNIX</description>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.jpa.jdbc.dialect</name>
+    <value>{{jdbc_dialect}}</value>
+    <description>JDBC dialect used for policy DB</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+
+  <property>
+    <name>ranger.audit.solr.username</name>
+    <value>ranger_solr</value>
+    <description>Solr username</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.audit.solr.password</name>
+    <value>NONE</value>
+    <property-type>PASSWORD</property-type>
+    <description>Solr password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.sso.providerurl</name>
+    <value/>
+    <display-name>SSO provider url</display-name>
+    <description>Example: https://KNOX_HOST:KNOX_PORT/gateway/TOPOLOGY_NAME/knoxsso/api/v1/websso</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>gateway-site</type>
+        <name>gateway.port</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.sso.publicKey</name>
+    <value/>
+    <display-name>SSO public key</display-name>
+    <description>Public key for SSO cookie verification</description>
+    <value-attributes>
+      <type>multiLine</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>ranger.sso.enabled</name>
+    <value>false</value>
+    <display-name>Enable Ranger SSO</display-name>
+    <description/>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>ranger.sso.browser.useragent</name>
+    <value>Mozilla,chrome</value>
+    <display-name>SSO browser useragent</display-name>
+    <description>Comma seperated browser agent</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ldap.binddn.credential.alias</name>
+    <value>ranger.ldap.bind.password</value>
+    <description></description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ldap.ad.binddn.credential.alias</name>
+    <value>ranger.ldap.ad.bind.password</value>
+    <description></description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+
+
+
+
+
+
+
+
+
+
+  <property>
+    <name>ranger.admin.kerberos.token.valid.seconds</name>
+    <value>30</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ranger.admin.kerberos.cookie.domain</name>
+    <value>{{ranger_host}}</value>
+    <description/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ranger.admin.kerberos.cookie.path</name>
+    <value>/</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ranger.spnego.kerberos.principal</name>
+    <value>*</value>
+    <description/>
+    <property-type>KERBEROS_PRINCIPAL</property-type>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ranger.spnego.kerberos.keytab</name>
+    <value/>
+    <description/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ranger.admin.kerberos.principal</name>
+    <value/>
+    <description/>
+    <property-type>KERBEROS_PRINCIPAL</property-type>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ranger.admin.kerberos.keytab</name>
+    <value/>
+    <description/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ranger.lookup.kerberos.principal</name>
+    <value/>
+    <description/>
+    <property-type>KERBEROS_PRINCIPAL</property-type>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ranger.lookup.kerberos.keytab</name>
+    <value/>
+    <description/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ranger.truststore.file</name>
+    <value>/etc/ranger/admin/conf/ranger-admin-keystore.jks</value>
+    <display-name>ranger.truststore.file</display-name>
+    <description>Ranger trust-store file-path</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ranger.truststore.password</name>
+    <value>changeit</value>
+    <property-type>PASSWORD</property-type>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <display-name>ranger.truststore.password</display-name>
+    <description>Ranger trust-store password</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ranger.audit.solr.zookeepers</name>
+    <value>NONE</value>
+    <description>Solr Zookeeper string</description>
+    <depends-on>
+      <property>
+        <type>infra-solr-env</type>
+        <name>infra_solr_znode</name>
+      </property>
+      <property>
+        <type>ranger-env</type>
+        <name>is_solrCloud_enabled</name>
+      </property>
+      <property>
+        <type>ranger-env</type>
+        <name>is_external_solrCloud_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+
+
+  <property>
+    <name>ranger.ldap.ad.user.searchfilter</name>
+    <value>(sAMAccountName={0})</value>
+    <description>Search filter used for Bind Authentication</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ranger.ldap.user.searchfilter</name>
+    <display-name>User Search Filter</display-name>
+    <value>(uid={0})</value>
+    <description>Search filter used for Bind Authentication</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ranger.kms.service.user.hdfs</name>
+    <value/>
+    <description/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hadoop-env</type>
+        <name>hdfs_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ranger.kms.service.user.hive</name>
+    <value/>
+    <description/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>ranger.plugins.hdfs.serviceuser</name>
+    <value>hdfs</value>
+    <depends-on>
+      <property>
+        <type>hadoop-env</type>
+        <name>hdfs_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>ranger.plugins.hive.serviceuser</name>
+    <value>hive</value>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>ranger.plugins.hbase.serviceuser</name>
+    <value>hbase</value>
+    <depends-on>
+      <property>
+        <type>hbase-env</type>
+        <name>hbase_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>ranger.plugins.yarn.serviceuser</name>
+    <value>yarn</value>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>ranger.plugins.knox.serviceuser</name>
+    <value>knox</value>
+    <depends-on>
+      <property>
+        <type>knox-env</type>
+        <name>knox_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>ranger.plugins.storm.serviceuser</name>
+    <value>storm</value>
+    <depends-on>
+      <property>
+        <type>storm-env</type>
+        <name>storm_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>ranger.plugins.kafka.serviceuser</name>
+    <value>kafka</value>
+    <depends-on>
+      <property>
+        <type>kafka-env</type>
+        <name>kafka_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>ranger.plugins.atlas.serviceuser</name>
+    <value>atlas</value>
+    <depends-on>
+      <property>
+        <type>atlas-env</type>
+        <name>metadata_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>ranger.plugins.kms.serviceuser</name>
+    <value>kms</value>
+    <depends-on>
+      <property>
+        <type>kms-env</type>
+        <name>kms_user</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>ranger.is.solr.kerberised</name>
+    <value>{{ranger_is_solr_kerberised}}</value>
+    <value-attributes>
+      <visible>false</visible>
+    </value-attributes>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+
+
+  <property>
+    <name>ranger.truststore.alias</name>
+    <value>trustStoreAlias</value>
+    <description></description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.service.https.attrib.keystore.credential.alias</name>
+    <value>keyStoreCredentialAlias</value>
+    <description></description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/ranger-env.xml b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/ranger-env.xml
new file mode 100644
index 0000000..3e25470
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/ranger-env.xml
@@ -0,0 +1,513 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true" supports_adding_forbidden="true">
+  <property>
+    <name>ranger_user</name>
+    <value>ranger</value>
+    <property-type>USER</property-type>
+    <display-name>Ranger User</display-name>
+    <description>Ranger username</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger_group</name>
+    <value>ranger</value>
+    <property-type>GROUP</property-type>
+    <display-name>Ranger Group</display-name>
+    <description>Ranger group</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger_admin_log_dir</name>
+    <value>/var/log/ranger/admin</value>
+    <description/>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger_usersync_log_dir</name>
+    <value>/var/log/ranger/usersync</value>
+    <description/>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger_admin_username</name>
+    <value>amb_ranger_admin</value>
+    <property-type>TEXT</property-type>
+    <display-name>Ranger Admin username for Ambari</display-name>
+    <description>This is the ambari user created for creating repositories and policies in Ranger Admin for each plugin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger_admin_password</name>
+    <value/>
+    <property-type>PASSWORD</property-type>
+    <display-name>Ranger Admin user's password for Ambari</display-name>
+    <description>This is the ambari user password created for creating repositories and policies in Ranger Admin for each plugin</description>
+    <value-attributes>
+      <type>password</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>admin_username</name>
+    <value>admin</value>
+    <description>This is the username for default admin user that is used for creating ambari user in Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>admin_password</name>
+    <value>admin</value>
+    <property-type>PASSWORD</property-type>
+    <description>This is the password for default admin user that is used for creating ambari user in Ranger Admin</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+
+  <property>
+    <name>ranger_pid_dir</name>
+    <value>/var/run/ranger</value>
+    <description/>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger-hdfs-plugin-enabled</name>
+    <value>No</value>
+    <display-name>HDFS Ranger Plugin</display-name>
+    <description>Enable HDFS Ranger plugin</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>Yes</value>
+          <label>ON</label>
+        </entry>
+        <entry>
+          <value>No</value>
+          <label>OFF</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger-hive-plugin-enabled</name>
+    <value>No</value>
+    <display-name>Hive Ranger Plugin</display-name>
+    <description>Enable Hive Ranger plugin</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>Yes</value>
+          <label>ON</label>
+        </entry>
+        <entry>
+          <value>No</value>
+          <label>OFF</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger-hbase-plugin-enabled</name>
+    <value>No</value>
+    <display-name>Hbase Ranger Plugin</display-name>
+    <description>Enable HBase Ranger plugin</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>Yes</value>
+          <label>ON</label>
+        </entry>
+        <entry>
+          <value>No</value>
+          <label>OFF</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger-storm-plugin-enabled</name>
+    <value>No</value>
+    <display-name>Storm Ranger Plugin</display-name>
+    <description>Enable Storm Ranger plugin</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>Yes</value>
+          <label>ON</label>
+        </entry>
+        <entry>
+          <value>No</value>
+          <label>OFF</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger-knox-plugin-enabled</name>
+    <value>No</value>
+    <display-name>Knox Ranger Plugin</display-name>
+    <description>Enable Knox Ranger plugin</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>Yes</value>
+          <label>ON</label>
+        </entry>
+        <entry>
+          <value>No</value>
+          <label>OFF</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+
+  <property>
+    <name>xml_configurations_supported</name>
+    <value>true</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>create_db_dbuser</name>
+    <value>true</value>
+    <display-name>Setup Database and Database User</display-name>
+    <description>If set to Yes, Ambari will create and setup Ranger Database and Database User. This will require to specify Database Admin user and password</description>
+    <value-attributes>
+      <type>value-list</type>
+      <overridable>false</overridable>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Yes</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>No</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>ranger_privelege_user_jdbc_url</name>
+    <display-name>JDBC connect string for root user</display-name>
+    <description>JDBC connect string - auto populated based on other values. This is to be used by root user</description>
+    <value>jdbc:mysql://localhost</value>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>admin-properties</type>
+        <name>DB_FLAVOR</name>
+      </property>
+      <property>
+        <type>admin-properties</type>
+        <name>db_host</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger-yarn-plugin-enabled</name>
+    <value>No</value>
+    <display-name>YARN Ranger Plugin</display-name>
+    <description>Enable YARN Ranger plugin</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>Yes</value>
+          <label>ON</label>
+        </entry>
+        <entry>
+          <value>No</value>
+          <label>OFF</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger-kafka-plugin-enabled</name>
+    <value>No</value>
+    <display-name>Kafka Ranger Plugin</display-name>
+    <description>Enable Kafka Ranger plugin</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>Yes</value>
+          <label>ON</label>
+        </entry>
+        <entry>
+          <value>No</value>
+          <label>OFF</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr</name>
+    <value>true</value>
+    <display-name>Audit to Solr</display-name>
+    <description>Enable Audit to Solr for all ranger supported services. This property is overridable at service level</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>ON</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>OFF</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>is_solrCloud_enabled</name>
+    <display-name>SolrCloud</display-name>
+    <description>SolrCloud uses zookeeper for distributed search and indexing</description>
+    <value>false</value>
+    <value-attributes>
+      <type>value-list</type>
+      <overridable>false</overridable>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>ON</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>OFF</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs</name>
+    <value>true</value>
+    <display-name>Audit to HDFS</display-name>
+    <description>Enable Audit to HDFS for all ranger supported services. This property is overridable at service level</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>ON</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>OFF</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.dir</name>
+    <value>hdfs://localhost:8020</value>
+    <display-name>Destination HDFS Directory</display-name>
+    <description>HDFS folder to write audit to, make sure all service user has required permissions. This property is overridable at service level</description>
+    <depends-on>
+      <property>
+        <type>core-site</type>
+        <name>fs.defaultFS</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger_solr_config_set</name>
+    <value>ranger_audits</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger_solr_collection_name</name>
+    <value>ranger_audits</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger_solr_shards</name>
+    <value>1</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger_solr_replication_factor</name>
+    <value>1</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger-atlas-plugin-enabled</name>
+    <value>No</value>
+    <display-name>Atlas Ranger Plugin</display-name>
+    <description>Enable Atlas Ranger plugin</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>Yes</value>
+          <label>ON</label>
+        </entry>
+        <entry>
+          <value>No</value>
+          <label>OFF</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>is_external_solrCloud_enabled</name>
+    <display-name>External SolrCloud</display-name>
+    <value>false</value>
+    <description>Using Externally managed solr cloud ?</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>ON</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>OFF</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>is_external_solrCloud_kerberos</name>
+    <display-name>External SolrCloud kerberos</display-name>
+    <value>false</value>
+    <description>Is Externally managed solr cloud kerberos ?</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>ON</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>OFF</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>ranger-nifi-plugin-enabled</name>
+    <value>No</value>
+    <display-name>NIFI Ranger Plugin</display-name>
+    <description>Enable NIFI Ranger plugin</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>Yes</value>
+          <label>ON</label>
+        </entry>
+        <entry>
+          <value>No</value>
+          <label>OFF</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/ranger-site.xml b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/ranger-site.xml
new file mode 100644
index 0000000..c70e222
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/ranger-site.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false">
+
+
+
+
+
+
+
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/ranger-solr-configuration.xml b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/ranger-solr-configuration.xml
new file mode 100644
index 0000000..550ce0d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/ranger-solr-configuration.xml
@@ -0,0 +1,59 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>ranger_audit_max_retention_days</name>
+    <display-name>Max Retention Days</display-name>
+    <description>Days to retain audit logs in Solr</description>
+    <value>90</value>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger_audit_logs_merge_factor</name>
+    <display-name>Merge Factor</display-name>
+    <description>
+      The mergeFactor value tells Lucene how many segments of equal size to build before merging them into a
+      single segment. High value merge factor (e.g. 25) improves indexing speed, but slows down searching. Low value
+      (e.g. 5) improves searching, but slows down indexing.
+    </description>
+    <value>5</value>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>solr-config template</display-name>
+    <description>the jinja template for solrconfig.xml file used for ranger audit logs</description>
+    <value/>
+    <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
+    <value-attributes>
+      <property-file-name>ranger-solrconfig.xml.j2</property-file-name>
+      <property-file-type>xml</property-file-type>
+    </value-attributes>
+    <on-ambari-upgrade add="false" />
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/ranger-tagsync-policymgr-ssl.xml b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/ranger-tagsync-policymgr-ssl.xml
new file mode 100644
index 0000000..a4c9441
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/ranger-tagsync-policymgr-ssl.xml
@@ -0,0 +1,72 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore</name>
+    <value>/etc/security/serverKeys/ranger-tagsync-keystore.jks</value>
+    <description>Java Keystore files</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.password</name>
+    <value>myKeyFilePassword</value>
+    <property-type>PASSWORD</property-type>
+    <description>password for keystore</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore</name>
+    <value>/etc/security/serverKeys/ranger-tagsync-mytruststore.jks</value>
+    <description>java truststore file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.password</name>
+    <value>changeit</value>
+    <property-type>PASSWORD</property-type>
+    <description>java truststore password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
+    <value>jceks://file{{ranger_tagsync_credential_file}}</value>
+    <description>java keystore credential file</description>
+    <on-ambari-upgrade add="false" />
+  </property>
+
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
+    <value>jceks://file{{ranger_tagsync_credential_file}}</value>
+    <description>java truststore credential file</description>
+    <on-ambari-upgrade add="false" />
+  </property>
+
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/ranger-tagsync-site.xml b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/ranger-tagsync-site.xml
new file mode 100644
index 0000000..5e60c06
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/ranger-tagsync-site.xml
@@ -0,0 +1,206 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>ranger.tagsync.logdir</name>
+    <value>/var/log/ranger/tagsync</value>
+    <description>Ranger Log dir</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.tagsync.dest.ranger.endpoint</name>
+    <value>{{ranger_external_url}}</value>
+    <description>Ranger TagAdmin REST URL</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.tagsync.source.atlas</name>
+    <display-name>Enable Atlas Tag Source</display-name>
+    <value>false</value>
+    <description/>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.server.bind.address</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.tagsync.source.atlasrest</name>
+    <display-name>Enable AtlasRest Tag Source</display-name>
+    <value>false</value>
+    <description/>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.tagsync.source.file</name>
+    <display-name>Enable File Tag Source</display-name>
+    <value>false</value>
+    <description/>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.tagsync.source.file.check.interval.millis</name>
+    <display-name>File Source: File update polling interval</display-name>
+    <value/>
+    <description/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.tagsync.source.atlasrest.download.interval.millis</name>
+    <display-name>AtlasREST Source: Atlas source download interval</display-name>
+    <value>60000</value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>ranger.tagsync.source.file.filename</name>
+    <display-name>File Source: Filename</display-name>
+    <value/>
+    <description>File Source Filename</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>ranger.tagsync.source.atlasrest.endpoint</name>
+    <display-name>AtlasREST Source: Atlas endpoint</display-name>
+    <value/>
+    <description/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.server.http.port</name>
+      </property>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.server.https.port</name>
+      </property>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.enableTLS</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>ranger.tagsync.kerberos.principal</name>
+    <value/>
+    <description/>
+    <property-type>KERBEROS_PRINCIPAL</property-type>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.tagsync.kerberos.keytab</name>
+    <value/>
+    <description/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.tagsync.dest.ranger.username</name>
+    <value>rangertagsync</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>ranger.tagsync.source.atlasrest.username</name>
+    <value>admin</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>ranger.tagsync.atlas.default.cluster.name</name>
+    <value>{{cluster_name}}</value>
+    <description>Capture cluster name</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>ranger.tagsync.keystore.filename</name>
+    <value>/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks</value>
+    <description>Keystore file</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.tagsync.source.atlasrest.keystore.filename</name>
+    <value>/usr/hdp/current/ranger-tagsync/conf/atlasuser.jceks</value>
+    <description>Tagsync atlasrest keystore file</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.tagsync.dest.ranger.ssl.config.filename</name>
+    <value>{{stack_root}}/current/ranger-tagsync/conf/ranger-policymgr-ssl.xml</value>
+    <description>Keystore and truststore information used for tagsync, required if tagsync -&gt; ranger admin communication is SSL enabled</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.tagsync.source.atlasrest.ssl.config.filename</name>
+    <value>{{stack_root}}/current/ranger-tagsync/conf/atlas-tagsync-ssl.xml</value>
+    <description>Keystore and truststore information used for tagsync, required if tagsync to atlas communication is SSL enabled</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/ranger-ugsync-site.xml b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/ranger-ugsync-site.xml
new file mode 100644
index 0000000..2c62851
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/ranger-ugsync-site.xml
@@ -0,0 +1,574 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration supports_final="true">
+  <property>
+    <name>ranger.usersync.port</name>
+    <value>5151</value>
+    <description>Port for unix authentication service, run within usersync</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.ssl</name>
+    <value>true</value>
+    <description>SSL enabled? (ranger admin -&gt; usersync communication)</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>ranger.usersync.keystore.password</name>
+    <value>UnIx529p</value>
+    <property-type>PASSWORD</property-type>
+    <description>Keystore password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>ranger.usersync.truststore.password</name>
+    <value>changeit</value>
+    <property-type>PASSWORD</property-type>
+    <description>Truststore password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.passwordvalidator.path</name>
+    <value>./native/credValidator.uexe</value>
+    <description>Native program for password validation</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.enabled</name>
+    <display-name>Enable User Sync</display-name>
+    <value>true</value>
+    <description>Should users and groups be synchronized to Ranger Database? Required to setup Ranger policies</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+      <type>value-list</type>
+      <overridable>false</overridable>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Yes</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>No</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.sink.impl.class</name>
+    <value>org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder</value>
+    <description>Class to be used as sink (to sync users into ranger admin)</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.policymanager.baseURL</name>
+    <value>{{ranger_external_url}}</value>
+    <description>URL to be used by clients to access ranger admin, use FQDN</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.policymanager.maxrecordsperapicall</name>
+    <value>1000</value>
+    <description>How many records to be returned per API call</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.policymanager.mockrun</name>
+    <value>false</value>
+    <description>Is user sync doing mock run?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.unix.minUserId</name>
+    <display-name>Minimum User ID</display-name>
+    <value>500</value>
+    <description>Only sync users above this user id (applicable for UNIX)</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.unix.group.file</name>
+    <display-name>Group File</display-name>
+    <value>/etc/group</value>
+    <description>Location of the groups file on the linux server</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.unix.password.file</name>
+    <display-name>Password File</display-name>
+    <value>/etc/passwd</value>
+    <description>Location of the password file on the linux server</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.sleeptimeinmillisbetweensynccycle</name>
+    <value>60000</value>
+    <description>Sleeptime interval in milliseconds, if &lt; 6000 then default to 1 min</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.source.impl.class</name>
+    <value>org.apache.ranger.unixusersync.process.UnixUserGroupBuilder</value>
+    <display-name>Sync Source</display-name>
+    <description>For Ldap: org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder, For Unix: org.apache.ranger.unixusersync.process.UnixUserGroupBuilder, org.apache.ranger.unixusersync.process.FileSourceUserGroupBuilder</description>
+    <value-attributes>
+      <type>value-list</type>
+      <empty-value-valid>true</empty-value-valid>
+      <overridable>false</overridable>
+      <entries>
+        <entry>
+          <value>org.apache.ranger.unixusersync.process.UnixUserGroupBuilder</value>
+          <label>UNIX</label>
+        </entry>
+        <entry>
+          <value>org.apache.ranger.unixusersync.process.FileSourceUserGroupBuilder</value>
+          <label>FILE</label>
+        </entry>
+        <entry>
+          <value>org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder</value>
+          <label>LDAP/AD</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.filesource.file</name>
+    <display-name>File Name</display-name>
+    <value>/tmp/usergroup.txt</value>
+    <description>Path to the file with the users and groups information. Example: /tmp/usergroup.json or /tmp/usergroup.csv or /tmp/usergroup.txt</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.filesource.text.delimiter</name>
+    <display-name>Delimiter</display-name>
+    <value>,</value>
+    <description>Delimiter used in file, if File based user sync is used</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.ldap.url</name>
+    <display-name>LDAP/AD URL</display-name>
+    <value/>
+    <description>LDAP server URL. Example: value = ldap://localhost:389 or ldaps//localhost:636</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.ldap.binddn</name>
+    <display-name>​Bind User</display-name>
+    <value/>
+    <description>Full distinguished name (DN), including common name (CN), of an LDAP user account that has privileges to search for users. This user is used for searching the users. This could be read-only LDAP user. Example: cn=admin,dc=example,dc=com</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.ldap.ldapbindpassword</name>
+    <display-name>Bind User Password</display-name>
+    <value/>
+    <property-type>PASSWORD</property-type>
+    <description>Password for the LDAP bind user used for searching users.</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.ldap.bindalias</name>
+    <value>testldapalias</value>
+    <description>Set as ranger.usersync.ldap.bindalias (string as is)</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>ranger.usersync.ldap.searchBase</name>
+    <value>dc=hadoop,dc=apache,dc=org</value>
+    <description>"# search base for users and groups
+# sample value would be dc=hadoop,dc=apache,dc=org
+# From Ranger Release 0.6.0 multiple Ous can be configured with ; (semicolon) separated"</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.ldap.user.searchbase</name>
+    <display-name>User Search Base</display-name>
+    <value/>
+    <description>"# search base for users
+# sample value would be ou=users,dc=hadoop,dc=apache,dc=org
+# overrides value specified in ranger.usersync.ldap.searchBase
+# From Ranger Release 0.6.0 multiple Ous can be configured with ; (semicolon) separated eg: cn=users,dc=example,dc=com;ou=example1,ou=example2"</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.ldap.user.searchscope</name>
+    <display-name>User Search Scope</display-name>
+    <value>sub</value>
+    <description>"# search scope for the users, only base, one and sub are supported values
+# please customize the value to suit your deployment
+# default value: sub"</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.ldap.user.objectclass</name>
+    <display-name>User Object Class​</display-name>
+    <value>person</value>
+    <description>LDAP User Object Class. Example: person or user</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.ldap.user.searchfilter</name>
+    <display-name>​User Search Filter</display-name>
+    <value/>
+    <description>"optional additional filter constraining the users selected for syncing
+# a sample value would be (dept=eng)
+# please customize the value to suit your deployment
+# default value is empty"</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.ldap.user.nameattribute</name>
+    <display-name>Username Attribute</display-name>
+    <value/>
+    <description>LDAP user name attribute. Example: sAMAccountName in AD, uid or cn in OpenLDAP</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ranger.usersync.ldap.referral</name>
+    <value>ignore</value>
+    <description>Set to follow if multiple LDAP servers are configured to return continuation references for results. Set to ignore (default) if no referrals should be followed</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.ldap.user.groupnameattribute</name>
+    <display-name>User Group Name Attribute</display-name>
+    <value>memberof, ismemberof</value>
+    <description>LDAP user group name attribute. Generally it is the same as username attribute. Example: sAMAccountName in AD, uid or cn in OpenLDAP</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.ldap.username.caseconversion</name>
+    <value>none</value>
+    <description>User name case conversion</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.ldap.groupname.caseconversion</name>
+    <value>none</value>
+    <description>Group name case conversion</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.logdir</name>
+    <value>{{usersync_log_dir}}</value>
+    <description>User sync log directory</description>
+    <value-attributes>
+      <visible>false</visible>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>ranger.usersync.group.usermapsyncenabled</name>
+    <value>true</value>
+    <display-name>Group User Map Sync</display-name>
+    <description>Sync specific groups for users?</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+      <type>value-list</type>
+      <overridable>false</overridable>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Yes</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>No</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.group.searchbase</name>
+    <display-name>Group Search Base</display-name>
+    <value/>
+    <description>"# search base for groups
+# sample value would be ou=groups,dc=hadoop,dc=apache,dc=org
+# overrides value specified in ranger.usersync.ldap.searchBase,  ranger.usersync.ldap.user.searchbase
+# if a value is not specified, takes the value of  ranger.usersync.ldap.searchBase
+# if  ranger.usersync.ldap.searchBase is also not specified, takes the value of ranger.usersync.ldap.user.searchbase"
+# From Ranger Release 0.6.0 multiple Ous can be configured with ; (semicolon) separated eg: ou=groups,DC=example,DC=com;ou=group1,ou=group2"
+</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.group.searchscope</name>
+    <value/>
+    <description>"# search scope for the groups, only base, one and sub are supported values
+# please customize the value to suit your deployment
+# default value: sub"</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.group.objectclass</name>
+    <display-name>Group Object Class</display-name>
+    <value/>
+    <description>LDAP Group object class. Example: group</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ranger.usersync.group.searchfilter</name>
+    <value/>
+    <display-name>Group Search Filter</display-name>
+    <description>"# optional additional filter constraining the groups selected for syncing
+# a sample value would be (dept=eng)
+# please customize the value to suit your deployment
+# default value is empty"</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.group.nameattribute</name>
+    <display-name>Group Name Attribute</display-name>
+    <value/>
+    <description>LDAP group name attribute. Example: cn</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.group.memberattributename</name>
+    <display-name>Group Member Attribute</display-name>
+    <value/>
+    <description>LDAP group member attribute name. Example: member</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.pagedresultsenabled</name>
+    <value>true</value>
+    <description>Results can be paged?</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+      <type>value-list</type>
+      <overridable>false</overridable>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Yes</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>No</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.pagedresultssize</name>
+    <value>500</value>
+    <description>Page size</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>ranger.usersync.kerberos.principal</name>
+    <value/>
+    <description/>
+    <property-type>KERBEROS_PRINCIPAL</property-type>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.kerberos.keytab</name>
+    <value/>
+    <description/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.policymgr.username</name>
+    <value>rangerusersync</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.policymgr.alias</name>
+    <value>ranger.usersync.policymgr.password</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>ranger.usersync.group.search.first.enabled</name>
+    <display-name>Enable Group Search First</display-name>
+    <value>false</value>
+    <description/>
+    <value-attributes>
+      <type>value-list</type>
+      <overridable>false</overridable>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Yes</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>No</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.user.searchenabled</name>
+    <display-name>Enable User Search</display-name>
+    <value>false</value>
+    <description/>
+    <value-attributes>
+      <type>value-list</type>
+      <overridable>false</overridable>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Yes</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>No</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.ldap.deltasync</name>
+    <display-name>Incremental Sync</display-name>
+    <value>true</value>
+    <description>Enable Incremental Sync</description>
+    <value-attributes>
+      <type>value-list</type>
+      <overridable>false</overridable>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Yes</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>No</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>ranger.usersync.group.searchenabled</name>
+    <display-name>Enable Group Sync</display-name>
+    <value>false</value>
+    <description>"# do we want to do ldapsearch to find groups instead of relying on user entry attributes
+    # valid values: true, false
+    # any value other than true would be treated as false
+    # default value: false"</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+      <type>value-list</type>
+      <overridable>false</overridable>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Yes</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>No</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-ugsync-site</type>
+        <name>ranger.usersync.ldap.deltasync</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.keystore.file</name>
+    <value>/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks</value>
+    <description>Keystore file used for usersync</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.truststore.file</name>
+    <value>/usr/hdp/current/ranger-usersync/conf/mytruststore.jks</value>
+    <description>Truststore used for usersync, required if usersync -&gt; ranger admin communication is SSL enabled</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.ldap.bindkeystore</name>
+    <value/>
+    <description>Set same value as ranger.usersync.keystore.file property i.e default value /usr/hdp/current/ranger-usersync/conf/ugsync.jceks</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.credstore.filename</name>
+    <value>/usr/hdp/current/ranger-usersync/conf/ugsync.jceks</value>
+    <description>Credential store file name for user sync, specify full path</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.usersync.policymgr.keystore</name>
+    <value>/usr/hdp/current/ranger-usersync/conf/ugsync.jceks</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/tagsync-application-properties.xml b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/tagsync-application-properties.xml
new file mode 100644
index 0000000..f616324
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/tagsync-application-properties.xml
@@ -0,0 +1,62 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>atlas.kafka.entities.group.id</name>
+    <display-name>Atlas Source: Kafka consumer group</display-name>
+    <value>ranger_entities_consumer</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.kafka.bootstrap.servers</name>
+    <display-name>Atlas Source: Kafka endpoint</display-name>
+    <value>localhost:6667</value>
+    <description/>
+    <depends-on>
+      <property>
+        <type>kafka-broker</type>
+        <name>port</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.kafka.zookeeper.connect</name>
+    <display-name>Atlas Source: Zookeeper endpoint</display-name>
+    <value>localhost:2181</value>
+    <description/>
+    <depends-on>
+      <property>
+        <type>zoo.cfg</type>
+        <name>clientPort</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/tagsync-log4j.xml b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/tagsync-log4j.xml
new file mode 100644
index 0000000..8ec85a0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/tagsync-log4j.xml
@@ -0,0 +1,90 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="false">
+  <property>
+    <name>ranger_tagsync_log_maxfilesize</name>
+    <value>256</value>
+   <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>Ranger tagsync Log: backup file size</display-name>
+    <value-attributes>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+   </property>
+    <property>
+     <name>ranger_tagsync_log_number_of_backup_files</name>
+     <value>20</value>
+     <description>The number of backup files</description>
+     <display-name>Ranger tagsync Log: # of backup files</display-name>
+     <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>tagsync-log4j template</display-name>
+    <description>tagsync-log4j.properties</description>
+    <value>
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+log4j.rootLogger = info,logFile
+
+# logFile
+log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.logFile.file=${logdir}/tagsync.log
+log4j.appender.logFile.datePattern='.'yyyy-MM-dd
+log4j.appender.logFile.layout=org.apache.log4j.PatternLayout
+log4j.appender.logFile.MaxFileSize = {{ranger_tagsync_log_maxfilesize}}MB
+log4j.appender.logFile.MaxBackupIndex = {{ranger_tagsync_log_number_of_backup_files}}
+log4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n
+
+# console
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.Target=System.out
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n
+        </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/usersync-log4j.xml b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/usersync-log4j.xml
new file mode 100644
index 0000000..6d91b6e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/usersync-log4j.xml
@@ -0,0 +1,89 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="false">
+  <property>
+    <name>ranger_usersync_log_maxfilesize</name>
+    <value>256</value>
+   <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>Ranger usersync Log: backup file size</display-name>
+    <value-attributes>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+   </property>
+   <property>
+    <name>ranger_usersync_log_maxbackupindex</name>
+    <value>20</value>
+    <description>The number of backup files</description>
+    <display-name>Ranger usersync Log: # of backup files</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>usersync-log4j template</display-name>
+    <description>usersync-log4j.properties</description>
+    <value>
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+log4j.rootLogger = info,logFile
+
+# logFile
+log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.logFile.file=${logdir}/usersync.log
+log4j.appender.logFile.datePattern='.'yyyy-MM-dd
+log4j.appender.logFile.layout=org.apache.log4j.PatternLayout
+log4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n
+log4j.appender.logFile.MaxFileSize = {{ranger_usersync_log_maxfilesize}}MB
+log4j.appender.logFile.MaxBackupIndex = {{ranger_usersync_log_maxbackupindex}}
+
+# console
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.Target=System.out
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n
+        </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/usersync-properties.xml b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/usersync-properties.xml
new file mode 100644
index 0000000..15aabe8
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/configuration/usersync-properties.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false">
+
+
+
+
+
+
+
+
+
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/kerberos.json b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/kerberos.json
new file mode 100644
index 0000000..1fc8acf
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/kerberos.json
@@ -0,0 +1,153 @@
+{
+  "services": [
+    {
+      "name": "RANGER",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "ranger-admin-site": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "RANGER_ADMIN",
+          "identities": [
+            {
+              "name": "rangeradmin",
+              "principal": {
+                "value": "rangeradmin/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "ranger-admin-site/ranger.admin.kerberos.principal",
+                "local_username" : "${ranger-env/ranger_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/rangeradmin.service.keytab",
+                "owner": {
+                  "name": "${ranger-env/ranger_user}",
+                  "access": "r"
+                },
+                "configuration": "ranger-admin-site/ranger.admin.kerberos.keytab"
+              }
+            },
+            {
+              "name": "rangerlookup",
+              "principal": {
+                "value": "rangerlookup/_HOST@${realm}",
+                "configuration": "ranger-admin-site/ranger.lookup.kerberos.principal",
+                "type" : "service"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/rangerlookup.service.keytab",
+                "owner": {
+                  "name": "${ranger-env/ranger_user}",
+                  "access": "r"
+                },
+                "configuration": "ranger-admin-site/ranger.lookup.kerberos.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "keytab": {
+                "configuration": "ranger-admin-site/ranger.spnego.kerberos.keytab"
+              }
+            },
+            {
+              "name": "/RANGER/RANGER_ADMIN/rangeradmin",
+              "principal": {
+                "configuration": "ranger-admin-site/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-admin-site/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            },
+            {
+              "name": "/AMBARI_INFRA/INFRA_SOLR/infra-solr",
+              "when" : {
+                "contains" : ["services", "AMBARI_INFRA"]
+              }
+            }
+          ]
+        },
+        {
+          "name": "RANGER_USERSYNC",
+          "identities": [
+            {
+              "name": "rangerusersync",
+              "principal": {
+                "value": "rangerusersync/_HOST@${realm}",
+                "type" : "service",
+                "configuration" : "ranger-ugsync-site/ranger.usersync.kerberos.principal",
+                "local_username" : "rangerusersync"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/rangerusersync.service.keytab",
+                "owner": {
+                  "name": "${ranger-env/ranger_user}",
+                  "access": "r"
+                },
+                "configuration": "ranger-ugsync-site/ranger.usersync.kerberos.keytab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "RANGER_TAGSYNC",
+          "identities": [
+            {
+              "name": "rangertagsync",
+              "principal": {
+                "value": "rangertagsync/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "ranger-tagsync-site/ranger.tagsync.kerberos.principal",
+                "local_username" : "rangertagsync"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/rangertagsync.service.keytab",
+                "owner": {
+                  "name": "${ranger-env/ranger_user}",
+                  "access": "r"
+                },
+                "configuration": "ranger-tagsync-site/ranger.tagsync.kerberos.keytab"
+              }
+            },
+            {
+              "name": "/RANGER/RANGER_TAGSYNC/rangertagsync",
+              "principal": {
+                "configuration": "tagsync-application-properties/atlas.jaas.KafkaClient.option.principal"
+              },
+              "keytab": {
+                "configuration": "tagsync-application-properties/atlas.jaas.KafkaClient.option.keyTab"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "tagsync-application-properties": {
+                "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+                "atlas.jaas.KafkaClient.loginModuleControlFlag": "required",
+                "atlas.jaas.KafkaClient.option.useKeyTab": "true",
+                "atlas.jaas.KafkaClient.option.storeKey": "true",
+                "atlas.jaas.KafkaClient.option.serviceName": "kafka",
+                "atlas.kafka.sasl.kerberos.service.name": "kafka",
+                "atlas.kafka.security.protocol": "PLAINTEXTSASL"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/metainfo.xml
new file mode 100644
index 0000000..e208800
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/metainfo.xml
@@ -0,0 +1,189 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>RANGER</name>
+      <displayName>Ranger</displayName>
+      <comment>Comprehensive security for Hadoop</comment>
+      <version>0.7.0.3.0</version>
+      <components>
+          
+        <component>
+          <name>RANGER_ADMIN</name>
+          <displayName>Ranger Admin</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>AMBARI_INFRA/INFRA_SOLR_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/ranger_admin.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>ranger_admin</logId>
+              <primary>true</primary>
+            </log>
+            <log>
+              <logId>ranger_dbpatch</logId>
+            </log>
+          </logs>
+        </component>
+
+        <component>
+          <name>RANGER_TAGSYNC</name>
+          <displayName>Ranger Tagsync</displayName>
+          <category>SLAVE</category>
+          <cardinality>0-1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/ranger_tagsync.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configuration-dependencies>
+            <config-type>ranger-tagsync-site</config-type>
+            <config-type>tagsync-application-properties</config-type>
+          </configuration-dependencies>
+        </component>
+
+        <component>
+          <name>RANGER_USERSYNC</name>
+          <displayName>Ranger Usersync</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <auto-deploy>
+            <enabled>true</enabled>
+            <co-locate>RANGER/RANGER_ADMIN</co-locate>
+          </auto-deploy>
+          <commandScript>
+            <script>scripts/ranger_usersync.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>ranger_usersync</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+
+      </components>
+      <configuration-dependencies>
+        <config-type>admin-properties</config-type>
+        <config-type>ranger-site</config-type>
+        <config-type>usersync-properties</config-type>
+        <config-type>ranger-admin-site</config-type>
+        <config-type>ranger-ugsync-site</config-type>
+        <config-type>admin-log4j</config-type>
+        <config-type>usersync-log4j</config-type>
+        <config-type>ranger-solr-configuration</config-type>
+      </configuration-dependencies>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <themes>
+        <theme>
+          <fileName>theme_version_1.json</fileName>
+          <default>true</default>
+        </theme>
+        <theme>
+          <fileName>theme_version_2.json</fileName>
+          <default>true</default>
+        </theme>
+        <theme>
+          <fileName>theme_version_3.json</fileName>
+          <default>true</default>
+        </theme>
+        <theme>
+          <fileName>theme_version_5.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat7,amazon2015,redhat6,suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>ranger_${stack_version}-admin</name>
+            </package>
+            <package>
+              <name>ranger_${stack_version}-usersync</name>
+            </package>
+            <package>
+              <name>ranger_${stack_version}-tagsync</name>
+              <condition>should_install_ranger_tagsync</condition>
+            </package>
+            <package>
+              <name>ambari-infra-solr-client</name>
+              <condition>should_install_infra_solr_client</condition>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+          <packages>
+            <package>
+              <name>ranger-${stack_version}-admin</name>
+            </package>
+            <package>
+              <name>ranger-${stack_version}-usersync</name>
+            </package>
+            <package>
+              <name>ranger-${stack_version}-tagsync</name>
+              <condition>should_install_ranger_tagsync</condition>
+            </package>
+            <package>
+              <name>ambari-infra-solr-client</name>
+              <condition>should_install_infra_solr_client</condition>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+
+    </service>
+  </services>
+</metainfo>
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/alerts/alert_ranger_admin_passwd_check.py b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/alerts/alert_ranger_admin_passwd_check.py
new file mode 100644
index 0000000..8ea8070
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/alerts/alert_ranger_admin_passwd_check.py
@@ -0,0 +1,195 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import base64
+import urllib2
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+import logging
+from resource_management.core.environment import Environment
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+
+logger = logging.getLogger()
+RANGER_ADMIN_URL = '{{admin-properties/policymgr_external_url}}'
+ADMIN_USERNAME = '{{ranger-env/admin_username}}'
+ADMIN_PASSWORD = '{{ranger-env/admin_password}}'
+RANGER_ADMIN_USERNAME = '{{ranger-env/ranger_admin_username}}'
+RANGER_ADMIN_PASSWORD = '{{ranger-env/ranger_admin_password}}'
+SECURITY_ENABLED = '{{cluster-env/security_enabled}}'
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+
+  :return tuple
+  """
+  return (RANGER_ADMIN_URL, ADMIN_USERNAME, ADMIN_PASSWORD, RANGER_ADMIN_USERNAME, RANGER_ADMIN_PASSWORD, SECURITY_ENABLED)
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  if configurations is None:
+    return (('UNKNOWN', ['There were no configurations supplied to the script.']))
+
+  ranger_link = None
+  ranger_auth_link = None
+  ranger_get_user = None
+  admin_username = None
+  admin_password = None
+  ranger_admin_username = None
+  ranger_admin_password = None
+  security_enabled = False
+
+  stack_version_formatted = Script.get_stack_version()
+  stack_supports_ranger_kerberos = stack_version_formatted and check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, stack_version_formatted)
+
+  if RANGER_ADMIN_URL in configurations:
+    ranger_link = configurations[RANGER_ADMIN_URL]
+    if ranger_link.endswith('/'):
+      ranger_link = ranger_link[:-1]
+    ranger_auth_link = '{0}/{1}'.format(ranger_link, 'service/public/api/repository/count')
+    ranger_get_user = '{0}/{1}'.format(ranger_link, 'service/xusers/users')
+
+  if ADMIN_USERNAME in configurations:
+    admin_username = configurations[ADMIN_USERNAME]
+
+  if ADMIN_PASSWORD in configurations:
+    admin_password = configurations[ADMIN_PASSWORD]
+
+  if RANGER_ADMIN_USERNAME in configurations:
+    ranger_admin_username = configurations[RANGER_ADMIN_USERNAME]
+
+  if RANGER_ADMIN_PASSWORD in configurations:
+    ranger_admin_password = configurations[RANGER_ADMIN_PASSWORD]
+
+  if SECURITY_ENABLED in configurations:
+    security_enabled = str(configurations[SECURITY_ENABLED]).upper() == 'TRUE'
+
+  label = None
+  result_code = 'OK'
+
+  try:
+    if security_enabled and stack_supports_ranger_kerberos:
+      result_code = 'UNKNOWN'
+      label = 'This alert will get skipped for Ranger Admin on kerberos env'
+    else:
+      admin_http_code = check_ranger_login(ranger_auth_link, admin_username, admin_password)
+      if admin_http_code == 200:
+        get_user_code = get_ranger_user(ranger_get_user, admin_username, admin_password, ranger_admin_username)
+        if get_user_code:
+          user_http_code = check_ranger_login(ranger_auth_link, ranger_admin_username, ranger_admin_password)
+          if user_http_code == 200:
+            result_code = 'OK'
+            label = 'Login Successful for users {0} and {1}'.format(admin_username, ranger_admin_username)
+          elif user_http_code == 401:
+            result_code = 'CRITICAL'
+            label = 'User:{0} credentials on Ambari UI are not in sync with Ranger'.format(ranger_admin_username)
+          else:
+            result_code = 'WARNING'
+            label = 'Ranger Admin service is not reachable, please restart the service'
+        else:
+          result_code = 'OK'
+          label = 'Login Successful for user: {0}. User:{1} user not yet synced with Ranger'.format(admin_username, ranger_admin_username)
+      elif admin_http_code == 401:
+        result_code = 'CRITICAL'
+        label = 'User:{0} credentials on Ambari UI are not in sync with Ranger'.format(admin_username)
+      else:
+        result_code = 'WARNING'
+        label = 'Ranger Admin service is not reachable, please restart the service'
+
+  except Exception, e:
+    label = str(e)
+    result_code = 'UNKNOWN'
+    logger.exception(label)
+
+  return ((result_code, [label]))
+
+def check_ranger_login(ranger_auth_link, username, password):
+  """
+  params ranger_auth_link: ranger login url
+  params username: user credentials
+  params password: user credentials
+
+  return response code
+  """
+  try:
+    usernamepassword = '{0}:{1}'.format(username, password)
+    base_64_string = base64.encodestring(usernamepassword).replace('\n', '')
+    request = urllib2.Request(ranger_auth_link)
+    request.add_header("Content-Type", "application/json")
+    request.add_header("Accept", "application/json")
+    request.add_header("Authorization", "Basic {0}".format(base_64_string))
+    result = urllib2.urlopen(request, timeout=20)
+    response_code = result.getcode()
+    if response_code == 200:
+      response = json.loads(result.read())
+    return response_code
+  except urllib2.HTTPError, e:
+    logger.exception("Error during Ranger service authentication. Http status code - {0}. {1}".format(e.code, e.read()))
+    return e.code
+  except urllib2.URLError, e:
+    logger.exception("Error during Ranger service authentication. {0}".format(e.reason))
+    return None
+  except Exception, e:
+    return 401
+
+def get_ranger_user(ranger_get_user, username, password, user):
+  """
+  params ranger_get_user: ranger get user url
+  params username: user credentials
+  params password: user credentials
+  params user: user to be search
+  return Boolean if user exist or not
+  """
+  try:
+    url = '{0}?name={1}'.format(ranger_get_user, user)
+    usernamepassword = '{0}:{1}'.format(username, password)
+    base_64_string = base64.encodestring(usernamepassword).replace('\n', '')
+    request = urllib2.Request(url)
+    request.add_header("Content-Type", "application/json")
+    request.add_header("Accept", "application/json")
+    request.add_header("Authorization", "Basic {0}".format(base_64_string))
+    result = urllib2.urlopen(request, timeout=20)
+    response_code = result.getcode()
+    response = json.loads(result.read())
+    if response_code == 200 and len(response['vXUsers']) > 0:
+      for xuser in response['vXUsers']:
+        if xuser['name'] == user:
+          return True
+    else:
+      return False
+  except urllib2.HTTPError, e:
+    logger.exception("Error getting user from Ranger service. Http status code - {0}. {1}".format(e.code, e.read()))
+    return False
+  except urllib2.URLError, e:
+    logger.exception("Error getting user from Ranger service. {0}".format(e.reason))
+    return False
+  except Exception, e:
+    return False
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/params.py
new file mode 100644
index 0000000..094d239
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/params.py
@@ -0,0 +1,448 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.functions.constants import Direction
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.get_bare_principal import get_bare_principal
+
+# a map of the Ambari role to the component name
+# for use with <stack-root>/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'RANGER_ADMIN' : 'ranger-admin',
+  'RANGER_USERSYNC' : 'ranger-usersync',
+  'RANGER_TAGSYNC' : 'ranger-tagsync'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "RANGER_ADMIN")
+
+config  = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+stack_root = Script.get_stack_root()
+
+stack_name = default("/hostLevelParams/stack_name", None)
+version = default("/commandParams/version", None)
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+upgrade_marker_file = format("{tmp_dir}/rangeradmin_ru.inprogress")
+
+xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
+
+create_db_dbuser = config['configurations']['ranger-env']['create_db_dbuser']
+
+# get the correct version to use for checking stack features
+version_for_stack_feature_checks = get_stack_feature_version(config)
+
+stack_supports_rolling_upgrade = check_stack_feature(StackFeature.ROLLING_UPGRADE, version_for_stack_feature_checks)
+stack_supports_config_versioning = check_stack_feature(StackFeature.CONFIG_VERSIONING, version_for_stack_feature_checks)
+stack_supports_usersync_non_root = check_stack_feature(StackFeature.RANGER_USERSYNC_NON_ROOT, version_for_stack_feature_checks)
+stack_supports_ranger_tagsync = check_stack_feature(StackFeature.RANGER_TAGSYNC_COMPONENT, version_for_stack_feature_checks)
+stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
+stack_supports_ranger_log4j = check_stack_feature(StackFeature.RANGER_LOG4J_SUPPORT, version_for_stack_feature_checks)
+stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
+stack_supports_usersync_passwd = check_stack_feature(StackFeature.RANGER_USERSYNC_PASSWORD_JCEKS, version_for_stack_feature_checks)
+stack_supports_infra_client = check_stack_feature(StackFeature.RANGER_INSTALL_INFRA_CLIENT, version_for_stack_feature_checks)
+stack_supports_pid = check_stack_feature(StackFeature.RANGER_PID_SUPPORT, version_for_stack_feature_checks)
+stack_supports_ranger_admin_password_change = check_stack_feature(StackFeature.RANGER_ADMIN_PASSWD_CHANGE, version_for_stack_feature_checks)
+stack_supports_ranger_setup_db_on_start = check_stack_feature(StackFeature.RANGER_SETUP_DB_ON_START, version_for_stack_feature_checks)
+stack_supports_ranger_tagsync_ssl_xml_support = check_stack_feature(StackFeature.RANGER_TAGSYNC_SSL_XML_SUPPORT, version_for_stack_feature_checks)
+stack_supports_ranger_solr_configs = check_stack_feature(StackFeature.RANGER_SOLR_CONFIG_SUPPORT, version_for_stack_feature_checks)
+stack_supports_secure_ssl_password = check_stack_feature(StackFeature.SECURE_RANGER_SSL_PASSWORD, version_for_stack_feature_checks)
+
+downgrade_from_version = default("/commandParams/downgrade_from_version", None)
+upgrade_direction = default("/commandParams/upgrade_direction", None)
+
+ranger_conf    = '/etc/ranger/admin/conf'
+ranger_ugsync_conf = '/etc/ranger/usersync/conf'
+ranger_tagsync_home  = format('{stack_root}/current/ranger-tagsync')
+ranger_tagsync_conf = format('{stack_root}/current/ranger-tagsync/conf')
+tagsync_bin = '/usr/bin/ranger-tagsync'
+tagsync_services_file = format('{stack_root}/current/ranger-tagsync/ranger-tagsync-services.sh')
+security_store_path = '/etc/security/serverKeys'
+tagsync_etc_path = '/etc/ranger/tagsync/'
+ranger_tagsync_credential_file= os.path.join(tagsync_etc_path,'rangercred.jceks')
+atlas_tagsync_credential_file= os.path.join(tagsync_etc_path,'atlascred.jceks')
+ranger_tagsync_keystore_password = config['configurations']['ranger-tagsync-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']
+ranger_tagsync_truststore_password = config['configurations']['ranger-tagsync-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']
+atlas_tagsync_keystore_password = config['configurations']['atlas-tagsync-ssl']['xasecure.policymgr.clientssl.keystore.password']
+atlas_tagsync_truststore_password = config['configurations']['atlas-tagsync-ssl']['xasecure.policymgr.clientssl.truststore.password']
+
+if upgrade_direction == Direction.DOWNGRADE and version and not check_stack_feature(StackFeature.CONFIG_VERSIONING, version):
+  stack_supports_rolling_upgrade = True
+  stack_supports_config_versioning = False
+
+if upgrade_direction == Direction.DOWNGRADE and version and not check_stack_feature(StackFeature.RANGER_USERSYNC_NON_ROOT, version):
+  stack_supports_usersync_non_root = False
+
+if stack_supports_rolling_upgrade:
+  ranger_home    = format('{stack_root}/current/ranger-admin')
+  ranger_conf    = '/etc/ranger/admin/conf'
+  ranger_stop    = '/usr/bin/ranger-admin-stop'
+  ranger_start   = '/usr/bin/ranger-admin-start'
+  usersync_home  = format('{stack_root}/current/ranger-usersync')
+  usersync_start = '/usr/bin/ranger-usersync-start'
+  usersync_stop  = '/usr/bin/ranger-usersync-stop'
+  ranger_ugsync_conf = '/etc/ranger/usersync/conf'
+
+if stack_supports_config_versioning:
+  ranger_conf = format('{stack_root}/current/ranger-admin/conf')
+  ranger_ugsync_conf = format('{stack_root}/current/ranger-usersync/conf')
+
+if stack_supports_ranger_tagsync:
+  ranger_tagsync_home  = format('{stack_root}/current/ranger-tagsync')
+  tagsync_bin = '/usr/bin/ranger-tagsync'
+  ranger_tagsync_conf = format('{stack_root}/current/ranger-tagsync/conf')
+  tagsync_services_file = format('{stack_root}/current/ranger-tagsync/ranger-tagsync-services.sh')
+
+usersync_services_file = format('{stack_root}/current/ranger-usersync/ranger-usersync-services.sh')
+
+java_home = config['hostLevelParams']['java_home']
+unix_user  = config['configurations']['ranger-env']['ranger_user']
+unix_group = config['configurations']['ranger-env']['ranger_group']
+ranger_pid_dir = default("/configurations/ranger-env/ranger_pid_dir", "/var/run/ranger")
+usersync_log_dir = default("/configurations/ranger-env/ranger_usersync_log_dir", "/var/log/ranger/usersync")
+admin_log_dir = default("/configurations/ranger-env/ranger_admin_log_dir", "/var/log/ranger/admin")
+ranger_admin_default_file = format('{ranger_conf}/ranger-admin-default-site.xml')
+security_app_context_file = format('{ranger_conf}/security-applicationContext.xml')
+ranger_ugsync_default_file = format('{ranger_ugsync_conf}/ranger-ugsync-default.xml')
+usgsync_log4j_file = format('{ranger_ugsync_conf}/log4j.xml')
+if stack_supports_ranger_log4j:
+  usgsync_log4j_file = format('{ranger_ugsync_conf}/log4j.properties')
+cred_validator_file = format('{usersync_home}/native/credValidator.uexe')
+
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+db_flavor =  (config['configurations']['admin-properties']['DB_FLAVOR']).lower()
+usersync_exturl =  config['configurations']['admin-properties']['policymgr_external_url']
+if usersync_exturl.endswith('/'):
+  usersync_exturl = usersync_exturl.rstrip('/')
+ranger_host = config['clusterHostInfo']['ranger_admin_hosts'][0]
+ugsync_host = 'localhost'
+usersync_host_info = config['clusterHostInfo']['ranger_usersync_hosts']
+if not is_empty(usersync_host_info) and len(usersync_host_info) > 0:
+  ugsync_host = config['clusterHostInfo']['ranger_usersync_hosts'][0]
+ranger_external_url = config['configurations']['admin-properties']['policymgr_external_url']
+if ranger_external_url.endswith('/'):
+  ranger_external_url = ranger_external_url.rstrip('/')
+ranger_db_name = config['configurations']['admin-properties']['db_name']
+ranger_auditdb_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
+
+sql_command_invoker = config['configurations']['admin-properties']['SQL_COMMAND_INVOKER']
+db_root_user = config['configurations']['admin-properties']['db_root_user']
+db_root_password = unicode(config['configurations']['admin-properties']['db_root_password'])
+db_host =  config['configurations']['admin-properties']['db_host']
+ranger_db_user = config['configurations']['admin-properties']['db_user']
+ranger_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
+ranger_db_password = unicode(config['configurations']['admin-properties']['db_password'])
+
+#ranger-env properties
+oracle_home = default("/configurations/ranger-env/oracle_home", "-")
+
+#For curl command in ranger to get db connector
+jdk_location = config['hostLevelParams']['jdk_location'] 
+java_share_dir = '/usr/share/java'
+jdbc_jar_name = None
+previous_jdbc_jar_name = None
+if db_flavor.lower() == 'mysql':
+  jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
+  previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
+  audit_jdbc_url = format('jdbc:mysql://{db_host}/{ranger_auditdb_name}') if stack_supports_ranger_audit_db else None
+  jdbc_dialect = "org.eclipse.persistence.platform.database.MySQLPlatform"
+elif db_flavor.lower() == 'oracle':
+  jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
+  previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
+  jdbc_dialect = "org.eclipse.persistence.platform.database.OraclePlatform"
+  colon_count = db_host.count(':')
+  if colon_count == 2 or colon_count == 0:
+    audit_jdbc_url = format('jdbc:oracle:thin:@{db_host}') if stack_supports_ranger_audit_db else None
+  else:
+    audit_jdbc_url = format('jdbc:oracle:thin:@//{db_host}') if stack_supports_ranger_audit_db else None
+elif db_flavor.lower() == 'postgres':
+  jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
+  previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
+  audit_jdbc_url = format('jdbc:postgresql://{db_host}/{ranger_auditdb_name}') if stack_supports_ranger_audit_db else None
+  jdbc_dialect = "org.eclipse.persistence.platform.database.PostgreSQLPlatform"
+elif db_flavor.lower() == 'mssql':
+  jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
+  previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
+  audit_jdbc_url = format('jdbc:sqlserver://{db_host};databaseName={ranger_auditdb_name}') if stack_supports_ranger_audit_db else None
+  jdbc_dialect = "org.eclipse.persistence.platform.database.SQLServerPlatform"
+elif db_flavor.lower() == 'sqla':
+  jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
+  previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
+  audit_jdbc_url = format('jdbc:sqlanywhere:database={ranger_auditdb_name};host={db_host}') if stack_supports_ranger_audit_db else None
+  jdbc_dialect = "org.eclipse.persistence.platform.database.SQLAnywherePlatform"
+
+downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}")
+
+driver_curl_source = format("{jdk_location}/{jdbc_jar_name}")
+driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
+previous_jdbc_jar = format("{java_share_dir}/{previous_jdbc_jar_name}")
+if stack_supports_config_versioning:
+  driver_curl_target = format("{ranger_home}/ews/lib/{jdbc_jar_name}")
+  previous_jdbc_jar = format("{ranger_home}/ews/lib/{previous_jdbc_jar_name}")
+
+if db_flavor.lower() == 'sqla':
+  downloaded_custom_connector = format("{tmp_dir}/sqla-client-jdbc.tar.gz")
+  jar_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/java/sajdbc4.jar")
+  libs_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/native/lib64/*")
+  jdbc_libs_dir = format("{ranger_home}/native/lib64")
+  ld_lib_path = format("{jdbc_libs_dir}")
+
+#for db connection
+check_db_connection_jar_name = "DBConnectionVerification.jar"
+check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
+ranger_jdbc_connection_url = config["configurations"]["ranger-admin-site"]["ranger.jpa.jdbc.url"]
+ranger_jdbc_driver = config["configurations"]["ranger-admin-site"]["ranger.jpa.jdbc.driver"]
+
+ranger_credential_provider_path = config["configurations"]["ranger-admin-site"]["ranger.credential.provider.path"]
+ranger_jpa_jdbc_credential_alias = config["configurations"]["ranger-admin-site"]["ranger.jpa.jdbc.credential.alias"]
+ranger_ambari_db_password = unicode(config["configurations"]["admin-properties"]["db_password"])
+
+ranger_jpa_audit_jdbc_credential_alias = default('/configurations/ranger-admin-site/ranger.jpa.audit.jdbc.credential.alias', 'rangeraudit')
+ranger_ambari_audit_db_password = ''
+if not is_empty(config["configurations"]["admin-properties"]["audit_db_password"]) and stack_supports_ranger_audit_db:
+  ranger_ambari_audit_db_password = unicode(config["configurations"]["admin-properties"]["audit_db_password"])
+
+ugsync_jceks_path = config["configurations"]["ranger-ugsync-site"]["ranger.usersync.credstore.filename"]
+ugsync_cred_lib = os.path.join(usersync_home,"lib","*")
+cred_lib_path = os.path.join(ranger_home,"cred","lib","*")
+cred_setup_prefix = (format('{ranger_home}/ranger_credential_helper.py'), '-l', cred_lib_path)
+ranger_audit_source_type = config["configurations"]["ranger-admin-site"]["ranger.audit.source.type"]
+
+if xml_configurations_supported:
+  ranger_usersync_keystore_password = unicode(config["configurations"]["ranger-ugsync-site"]["ranger.usersync.keystore.password"])
+  ranger_usersync_ldap_ldapbindpassword = unicode(config["configurations"]["ranger-ugsync-site"]["ranger.usersync.ldap.ldapbindpassword"])
+  ranger_usersync_truststore_password = unicode(config["configurations"]["ranger-ugsync-site"]["ranger.usersync.truststore.password"])
+  ranger_usersync_keystore_file = config["configurations"]["ranger-ugsync-site"]["ranger.usersync.keystore.file"]
+  default_dn_name = 'cn=unixauthservice,ou=authenticator,o=mycompany,c=US'
+
+ranger_admin_hosts = config['clusterHostInfo']['ranger_admin_hosts']
+is_ranger_ha_enabled = True if len(ranger_admin_hosts) > 1 else False
+ranger_ug_ldap_url = config["configurations"]["ranger-ugsync-site"]["ranger.usersync.ldap.url"]
+ranger_ug_ldap_bind_dn = config["configurations"]["ranger-ugsync-site"]["ranger.usersync.ldap.binddn"]
+ranger_ug_ldap_user_searchfilter = config["configurations"]["ranger-ugsync-site"]["ranger.usersync.ldap.user.searchfilter"]
+ranger_ug_ldap_group_searchbase = config["configurations"]["ranger-ugsync-site"]["ranger.usersync.group.searchbase"]
+ranger_ug_ldap_group_searchfilter = config["configurations"]["ranger-ugsync-site"]["ranger.usersync.group.searchfilter"]
+ug_sync_source = config["configurations"]["ranger-ugsync-site"]["ranger.usersync.source.impl.class"]
+current_host = config['hostname']
+if current_host in ranger_admin_hosts:
+  ranger_host = current_host
+
+# ranger-tagsync
+ranger_tagsync_hosts = default("/clusterHostInfo/ranger_tagsync_hosts", [])
+has_ranger_tagsync = len(ranger_tagsync_hosts) > 0
+
+tagsync_log_dir = default("/configurations/ranger-tagsync-site/ranger.tagsync.logdir", "/var/log/ranger/tagsync")
+tagsync_jceks_path = config["configurations"]["ranger-tagsync-site"]["ranger.tagsync.keystore.filename"]
+atlas_tagsync_jceks_path = config["configurations"]["ranger-tagsync-site"]["ranger.tagsync.source.atlasrest.keystore.filename"]
+tagsync_application_properties = dict(config["configurations"]["tagsync-application-properties"]) if has_ranger_tagsync else None
+tagsync_pid_file = format('{ranger_pid_dir}/tagsync.pid')
+tagsync_cred_lib = os.path.join(ranger_tagsync_home, "lib", "*")
+
+ranger_usersync_log_maxfilesize = default('/configurations/usersync-log4j/ranger_usersync_log_maxfilesize',256) 
+ranger_usersync_log_maxbackupindex = default('/configurations/usersync-log4j/ranger_usersync_log_maxbackupindex',20)
+ranger_tagsync_log_maxfilesize = default('/configurations/tagsync-log4j/ranger_tagsync_log_maxfilesize',256)
+ranger_tagsync_log_number_of_backup_files = default('/configurations/tagsync-log4j/ranger_tagsync_log_number_of_backup_files',20)
+ranger_xa_log_maxfilesize = default('/configurations/admin-log4j/ranger_xa_log_maxfilesize',256)
+ranger_xa_log_maxbackupindex = default('/configurations/admin-log4j/ranger_xa_log_maxbackupindex',20)
+
+# ranger log4j.properties
+admin_log4j = config['configurations']['admin-log4j']['content']
+usersync_log4j = config['configurations']['usersync-log4j']['content']
+tagsync_log4j = config['configurations']['tagsync-log4j']['content']
+
+# ranger kerberos
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+namenode_hosts = default("/clusterHostInfo/namenode_host", [])
+has_namenode = len(namenode_hosts) > 0
+
+ugsync_policymgr_alias = config["configurations"]["ranger-ugsync-site"]["ranger.usersync.policymgr.alias"]
+ugsync_policymgr_keystore = config["configurations"]["ranger-ugsync-site"]["ranger.usersync.policymgr.keystore"]
+
+# ranger solr
+audit_solr_enabled = default('/configurations/ranger-env/xasecure.audit.destination.solr', False)
+ranger_solr_config_set = config['configurations']['ranger-env']['ranger_solr_config_set']
+ranger_solr_collection_name = config['configurations']['ranger-env']['ranger_solr_collection_name']
+ranger_solr_shards = config['configurations']['ranger-env']['ranger_solr_shards']
+replication_factor = config['configurations']['ranger-env']['ranger_solr_replication_factor']
+ranger_solr_conf = format('{ranger_home}/contrib/solr_for_audit_setup/conf')
+infra_solr_hosts = default("/clusterHostInfo/infra_solr_hosts", [])
+has_infra_solr = len(infra_solr_hosts) > 0
+is_solrCloud_enabled = default('/configurations/ranger-env/is_solrCloud_enabled', False)
+is_external_solrCloud_enabled = default('/configurations/ranger-env/is_external_solrCloud_enabled', False)
+solr_znode = '/ranger_audits'
+if stack_supports_infra_client and is_solrCloud_enabled:
+  solr_znode = default('/configurations/ranger-admin-site/ranger.audit.solr.zookeepers', 'NONE')
+  if solr_znode != '' and solr_znode.upper() != 'NONE':
+    solr_znode = solr_znode.split('/')
+    if len(solr_znode) > 1 and len(solr_znode) == 2:
+      solr_znode = solr_znode[1]
+      solr_znode = format('/{solr_znode}')
+  if has_infra_solr and not is_external_solrCloud_enabled:
+    solr_znode = config['configurations']['infra-solr-env']['infra_solr_znode']
+solr_user = unix_user
+if has_infra_solr and not is_external_solrCloud_enabled:
+  solr_user = default('/configurations/infra-solr-env/infra_solr_user', unix_user)
+  infra_solr_role_ranger_admin = default('configurations/infra-solr-security-json/infra_solr_role_ranger_admin', 'ranger_user')
+  infra_solr_role_ranger_audit = default('configurations/infra-solr-security-json/infra_solr_role_ranger_audit', 'ranger_audit_user')
+  infra_solr_role_dev = default('configurations/infra-solr-security-json/infra_solr_role_dev', 'dev')
+custom_log4j = has_infra_solr and not is_external_solrCloud_enabled
+
+ranger_audit_max_retention_days = config['configurations']['ranger-solr-configuration']['ranger_audit_max_retention_days']
+ranger_audit_logs_merge_factor = config['configurations']['ranger-solr-configuration']['ranger_audit_logs_merge_factor']
+ranger_solr_config_content = config['configurations']['ranger-solr-configuration']['content']
+
+# get comma separated list of zookeeper hosts
+zookeeper_port = default('/configurations/zoo.cfg/clientPort', None)
+zookeeper_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+index = 0
+zookeeper_quorum = ""
+for host in zookeeper_hosts:
+  zookeeper_quorum += host + ":" + str(zookeeper_port)
+  index += 1
+  if index < len(zookeeper_hosts):
+    zookeeper_quorum += ","
+
+# solr kerberised
+solr_jaas_file = None
+is_external_solrCloud_kerberos = default('/configurations/ranger-env/is_external_solrCloud_kerberos', False)
+
+if security_enabled:
+  if has_ranger_tagsync:
+    ranger_tagsync_principal = config['configurations']['ranger-tagsync-site']['ranger.tagsync.kerberos.principal']
+    if not is_empty(ranger_tagsync_principal) and ranger_tagsync_principal != '':
+      tagsync_jaas_principal = ranger_tagsync_principal.replace('_HOST', current_host.lower())
+    tagsync_keytab_path = config['configurations']['ranger-tagsync-site']['ranger.tagsync.kerberos.keytab']
+
+  if stack_supports_ranger_kerberos:
+    ranger_admin_keytab = config['configurations']['ranger-admin-site']['ranger.admin.kerberos.keytab']
+    ranger_admin_principal = config['configurations']['ranger-admin-site']['ranger.admin.kerberos.principal']
+    if not is_empty(ranger_admin_principal) and ranger_admin_principal != '':
+      ranger_admin_jaas_principal = ranger_admin_principal.replace('_HOST', ranger_host.lower())
+      if stack_supports_infra_client and is_solrCloud_enabled and is_external_solrCloud_enabled and is_external_solrCloud_kerberos:
+        solr_jaas_file = format('{ranger_home}/conf/ranger_solr_jaas.conf')
+        solr_kerberos_principal = ranger_admin_jaas_principal
+        solr_kerberos_keytab = ranger_admin_keytab
+      if stack_supports_infra_client and is_solrCloud_enabled and not is_external_solrCloud_enabled and not is_external_solrCloud_kerberos:
+        solr_jaas_file = format('{ranger_home}/conf/ranger_solr_jaas.conf')
+        solr_kerberos_principal = ranger_admin_jaas_principal
+        solr_kerberos_keytab = ranger_admin_keytab
+
+# logic to create core-site.xml if hdfs not installed
+if stack_supports_ranger_kerberos and not has_namenode:
+  core_site_property = {
+    'hadoop.security.authentication': 'kerberos' if security_enabled else 'simple'
+  }
+
+  if security_enabled:
+    realm = 'EXAMPLE.COM'
+    ranger_admin_bare_principal = 'rangeradmin'
+    ranger_usersync_bare_principal = 'rangerusersync'
+    ranger_tagsync_bare_principal = 'rangertagsync'
+
+    ranger_usersync_principal = config['configurations']['ranger-ugsync-site']['ranger.usersync.kerberos.principal']
+    if not is_empty(ranger_admin_principal) and ranger_admin_principal != '':
+      ranger_admin_bare_principal = get_bare_principal(ranger_admin_principal)
+    if not is_empty(ranger_usersync_principal) and ranger_usersync_principal != '':
+      ranger_usersync_bare_principal = get_bare_principal(ranger_usersync_principal)
+    realm = config['configurations']['kerberos-env']['realm']
+
+    rule_dict = [
+      {'principal': ranger_admin_bare_principal, 'user': unix_user},
+      {'principal': ranger_usersync_bare_principal, 'user': 'rangerusersync'},
+    ]
+
+    if has_ranger_tagsync:
+      if not is_empty(ranger_tagsync_principal) and ranger_tagsync_principal != '':
+        ranger_tagsync_bare_principal = get_bare_principal(ranger_tagsync_principal)
+      rule_dict.append({'principal': ranger_tagsync_bare_principal, 'user': 'rangertagsync'})
+
+    core_site_auth_to_local_property = ''
+    for item in range(len(rule_dict)):
+      rule_line = 'RULE:[2:$1@$0]({0}@{1})s/.*/{2}/\n'.format(rule_dict[item]['principal'], realm, rule_dict[item]['user'])
+      core_site_auth_to_local_property = rule_line + core_site_auth_to_local_property
+
+    core_site_auth_to_local_property = core_site_auth_to_local_property + 'DEFAULT'
+    core_site_property['hadoop.security.auth_to_local'] = core_site_auth_to_local_property
+
+upgrade_type = Script.get_upgrade_type(default("/commandParams/upgrade_type", ""))
+
+# ranger service pid
+user_group = config['configurations']['cluster-env']['user_group']
+ranger_admin_pid_file = format('{ranger_pid_dir}/rangeradmin.pid')
+ranger_usersync_pid_file = format('{ranger_pid_dir}/usersync.pid')
+
+# admin credential
+admin_username = config['configurations']['ranger-env']['admin_username']
+admin_password = config['configurations']['ranger-env']['admin_password']
+default_admin_password = 'admin'
+
+ranger_is_solr_kerberised = "false"
+if audit_solr_enabled and is_solrCloud_enabled:
+  # Check internal solrCloud
+  if security_enabled and not is_external_solrCloud_enabled:
+    ranger_is_solr_kerberised = "true"
+  # Check external solrCloud
+  if is_external_solrCloud_enabled and is_external_solrCloud_kerberos:
+    ranger_is_solr_kerberised = "true"
+
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+is_hbase_ha_enabled = True if len(hbase_master_hosts) > 1 else False
+is_namenode_ha_enabled = True if len(namenode_hosts) > 1 else False
+ranger_hbase_plugin_enabled = False
+ranger_hdfs_plugin_enabled = False
+
+
+if is_hbase_ha_enabled:
+  if not is_empty(config['configurations']['ranger-hbase-plugin-properties']['ranger-hbase-plugin-enabled']):
+    ranger_hbase_plugin_enabled = config['configurations']['ranger-hbase-plugin-properties']['ranger-hbase-plugin-enabled'].lower() == 'yes'
+if is_namenode_ha_enabled:
+  if not is_empty(config['configurations']['ranger-hdfs-plugin-properties']['ranger-hdfs-plugin-enabled']):
+    ranger_hdfs_plugin_enabled = config['configurations']['ranger-hdfs-plugin-properties']['ranger-hdfs-plugin-enabled'].lower() == 'yes'
+
+ranger_admin_password_properties = ['ranger.jpa.jdbc.password', 'ranger.jpa.audit.jdbc.password', 'ranger.ldap.bind.password', 'ranger.ldap.ad.bind.password']
+ranger_usersync_password_properties = ['ranger.usersync.ldap.ldapbindpassword']
+ranger_tagsync_password_properties = ['xasecure.policymgr.clientssl.keystore.password', 'xasecure.policymgr.clientssl.truststore.password']
+if stack_supports_secure_ssl_password:
+  ranger_admin_password_properties.extend(['ranger.service.https.attrib.keystore.pass', 'ranger.truststore.password'])
+  ranger_usersync_password_properties.extend(['ranger.usersync.keystore.password', 'ranger.usersync.truststore.password'])
+
+ranger_auth_method = config['configurations']['ranger-admin-site']['ranger.authentication.method']
+ranger_ldap_password_alias = default('/configurations/ranger-admin-site/ranger.ldap.binddn.credential.alias', 'ranger.ldap.bind.password')
+ranger_ad_password_alias = default('/configurations/ranger-admin-site/ranger.ldap.ad.binddn.credential.alias', 'ranger.ldap.ad.bind.password')
+ranger_https_keystore_alias = default('/configurations/ranger-admin-site/ranger.service.https.attrib.keystore.credential.alias', 'keyStoreCredentialAlias')
+ranger_truststore_alias = default('/configurations/ranger-admin-site/ranger.truststore.alias', 'trustStoreAlias')
+https_enabled = config['configurations']['ranger-admin-site']['ranger.service.https.attrib.ssl.enabled']
+http_enabled = config['configurations']['ranger-admin-site']['ranger.service.http.enabled']
+https_keystore_password = config['configurations']['ranger-admin-site']['ranger.service.https.attrib.keystore.pass']
+truststore_password = config['configurations']['ranger-admin-site']['ranger.truststore.password']
+
+# need this to capture cluster name for ranger tagsync
+cluster_name = config['clusterName']
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/ranger_admin.py b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/ranger_admin.py
new file mode 100644
index 0000000..b849d58
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/ranger_admin.py
@@ -0,0 +1,217 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.core.exceptions import Fail
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions.constants import Direction
+from resource_management.libraries.script import Script
+from resource_management.core.resources.system import Execute, File
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.libraries.functions.format import format
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+from ranger_service import ranger_service
+from setup_ranger_xml import setup_ranger_audit_solr, setup_ranger_admin_passwd_change
+from resource_management.libraries.functions import solr_cloud_util
+from ambari_commons.constants import UPGRADE_TYPE_NON_ROLLING, UPGRADE_TYPE_ROLLING
+from resource_management.libraries.functions.constants import Direction
+import upgrade
+import os, errno
+
+class RangerAdmin(Script):
+
+  def get_component_name(self):
+    return "ranger-admin"
+
+  def install(self, env):
+    self.install_packages(env)
+    import params
+    env.set_params(params)
+    # call config and setup db only in case of HDP version < 2.6
+    if not params.stack_supports_ranger_setup_db_on_start:
+      self.configure(env, setup_db=True)
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if upgrade_type == UPGRADE_TYPE_NON_ROLLING and params.upgrade_direction == Direction.UPGRADE:
+      if params.stack_supports_rolling_upgrade and not params.stack_supports_config_versioning and os.path.isfile(format('{ranger_home}/ews/stop-ranger-admin.sh')):
+        File(format('{ranger_home}/ews/stop-ranger-admin.sh'),
+          owner=params.unix_user,
+          group = params.unix_group
+        )
+
+    Execute(format('{params.ranger_stop}'), environment={'JAVA_HOME': params.java_home}, user=params.unix_user)
+    if params.stack_supports_pid:
+      File(params.ranger_admin_pid_file,
+        action = "delete"
+      )
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    upgrade.prestart(env, "ranger-admin")
+
+    self.set_ru_rangeradmin_in_progress(params.upgrade_marker_file)
+
+  def post_upgrade_restart(self,env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if os.path.isfile(params.upgrade_marker_file):
+      os.remove(params.upgrade_marker_file)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    # setup db only if in case HDP version is > 2.6
+    self.configure(env, upgrade_type=upgrade_type, setup_db=params.stack_supports_ranger_setup_db_on_start)
+
+    if params.stack_supports_infra_client and params.audit_solr_enabled and params.is_solrCloud_enabled:
+      solr_cloud_util.setup_solr_client(params.config, custom_log4j = params.custom_log4j)
+      setup_ranger_audit_solr()
+
+    ranger_service('ranger_admin')
+
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+
+    if status_params.stack_supports_pid:
+      check_process_status(status_params.ranger_admin_pid_file)
+      return
+
+    cmd = 'ps -ef | grep proc_rangeradmin | grep -v grep'
+    code, output = shell.call(cmd, timeout=20)
+
+    if code != 0:
+      if self.is_ru_rangeradmin_in_progress(status_params.upgrade_marker_file):
+        Logger.info('Ranger admin process not running - skipping as stack upgrade is in progress')
+      else:
+        Logger.debug('Ranger admin process not running')
+        raise ComponentIsNotRunning()
+    pass
+
+  def configure(self, env, upgrade_type=None, setup_db=False):
+    import params
+    env.set_params(params)
+    if params.xml_configurations_supported:
+      from setup_ranger_xml import ranger
+    else:
+      from setup_ranger import ranger
+
+    # set up db if we are not upgrading and setup_db is true
+    if setup_db and upgrade_type is None:
+      if params.xml_configurations_supported:
+        from setup_ranger_xml import setup_ranger_db
+        setup_ranger_db()
+
+    ranger('ranger_admin', upgrade_type=upgrade_type)
+
+    # set up java patches if we are not upgrading and setup_db is true
+    if setup_db and upgrade_type is None:
+      if params.xml_configurations_supported:
+        from setup_ranger_xml import setup_java_patch
+        setup_java_patch()
+
+      if params.stack_supports_ranger_admin_password_change:
+        setup_ranger_admin_passwd_change()
+
+  def set_ru_rangeradmin_in_progress(self, upgrade_marker_file):
+    config_dir = os.path.dirname(upgrade_marker_file)
+    try:
+      msg = "Starting Upgrade"
+      if (not os.path.exists(config_dir)):
+        os.makedirs(config_dir)
+      ofp = open(upgrade_marker_file, 'w')
+      ofp.write(msg)
+      ofp.close()
+    except OSError as exc:
+      if exc.errno == errno.EEXIST and os.path.isdir(config_dir):
+        pass
+      else:
+        raise
+
+  def is_ru_rangeradmin_in_progress(self, upgrade_marker_file):
+    return os.path.isfile(upgrade_marker_file)
+
+  def setup_ranger_database(self, env):
+    import params
+    env.set_params(params)
+
+    upgrade_stack = stack_select._get_upgrade_stack()
+    if upgrade_stack is None:
+      raise Fail('Unable to determine the stack and stack version')
+
+    stack_version = upgrade_stack[1]
+
+    if params.xml_configurations_supported and params.upgrade_direction == Direction.UPGRADE:
+      Logger.info(format('Setting Ranger database schema, using version {stack_version}'))
+
+      from setup_ranger_xml import setup_ranger_db
+      setup_ranger_db(stack_version=stack_version)
+
+  def setup_ranger_java_patches(self, env):
+    import params
+    env.set_params(params)
+
+    upgrade_stack = stack_select._get_upgrade_stack()
+    if upgrade_stack is None:
+      raise Fail('Unable to determine the stack and stack version')
+
+    stack_version = upgrade_stack[1]
+
+    if params.xml_configurations_supported and params.upgrade_direction == Direction.UPGRADE:
+      Logger.info(format('Applying Ranger java patches, using version {stack_version}'))
+
+      from setup_ranger_xml import setup_java_patch
+      setup_java_patch(stack_version=stack_version)
+
+  def set_pre_start(self, env):
+    import params
+    env.set_params(params)
+
+    upgrade_stack = stack_select._get_upgrade_stack()
+    if upgrade_stack is None:
+      raise Fail('Unable to determine the stack and stack version')
+
+    stack_name = upgrade_stack[0]
+    stack_version = upgrade_stack[1]
+
+    stack_select.select("ranger-admin", stack_version)
+    conf_select.select(stack_name, "ranger-admin", stack_version)
+
+  def get_log_folder(self):
+    import params
+    return params.admin_log_dir
+  
+  def get_user(self):
+    import params
+    return params.unix_user
+
+if __name__ == "__main__":
+  RangerAdmin().execute()
+
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/ranger_service.py b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/ranger_service.py
new file mode 100644
index 0000000..0355049
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/ranger_service.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.show_logs import show_logs
+from resource_management.core.resources.system import Execute
+
+def ranger_service(name, action=None):
+  import params
+
+  env_dict = {'JAVA_HOME': params.java_home}
+  if params.db_flavor.lower() == 'sqla':
+    env_dict = {'JAVA_HOME': params.java_home, 'LD_LIBRARY_PATH': params.ld_lib_path}
+  
+  if name == 'ranger_admin':
+    no_op_test = format('ps -ef | grep proc_rangeradmin | grep -v grep')
+    try:
+      Execute(params.ranger_start, environment=env_dict, user=params.unix_user, not_if=no_op_test)
+    except:
+      show_logs(params.admin_log_dir, params.unix_user)
+      raise
+  elif name == 'ranger_usersync':
+    no_op_test = format('ps -ef | grep proc_rangerusersync | grep -v grep')
+    if params.stack_supports_usersync_non_root:
+      try:
+        Execute(params.usersync_start,
+                environment=env_dict,
+                not_if=no_op_test,
+                user=params.unix_user
+        )
+      except:
+        show_logs(params.usersync_log_dir, params.unix_user)
+        raise
+    else:
+      # Usersync requires to be run as root for 2.2
+      Execute((params.usersync_start,),
+              environment={'JAVA_HOME': params.java_home},
+              not_if=no_op_test,
+              sudo=True
+      )
+  elif name == 'ranger_tagsync' and params.stack_supports_ranger_tagsync:
+    no_op_test = format('ps -ef | grep proc_rangertagsync | grep -v grep')
+    cmd = format('{tagsync_services_file} start')
+    try:
+      Execute(cmd,
+        environment=env_dict,
+        user=params.unix_user,
+        not_if=no_op_test
+      )
+    except:
+      show_logs(params.tagsync_log_dir, params.unix_user)
+      raise
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/ranger_tagsync.py b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/ranger_tagsync.py
new file mode 100644
index 0000000..008fb99
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/ranger_tagsync.py
@@ -0,0 +1,139 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.core.resources.system import Execute, File
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.libraries.functions.format import format
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+from ranger_service import ranger_service
+from setup_ranger_xml import ranger, ranger_credential_helper
+from resource_management.core.exceptions import Fail
+import upgrade
+
+class RangerTagsync(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    import params
+    env.set_params(params)
+
+    ranger_credential_helper(params.tagsync_cred_lib, 'tagadmin.user.password', 'rangertagsync', params.tagsync_jceks_path)
+    File(params.tagsync_jceks_path,
+       owner = params.unix_user,
+       group = params.unix_group,
+       mode = 0640
+    )
+    if params.stack_supports_ranger_tagsync_ssl_xml_support:
+      Logger.info("Stack support Atlas user for Tagsync, creating keystore for same.")
+      self.create_atlas_user_keystore(env)
+    else:
+      Logger.info("Stack does not support Atlas user for Tagsync, skipping keystore creation for same.")
+
+    self.configure(env)
+
+  def configure(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    ranger('ranger_tagsync', upgrade_type=upgrade_type)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    self.configure(env, upgrade_type=upgrade_type)
+    ranger_service('ranger_tagsync')
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    Execute(format('{tagsync_services_file} stop'), environment={'JAVA_HOME': params.java_home}, user=params.unix_user)
+    File(params.tagsync_pid_file,
+      action = "delete"
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+
+    check_process_status(status_params.tagsync_pid_file)
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.stack_supports_ranger_tagsync:
+      Logger.info("Executing Ranger Tagsync Stack Upgrade pre-restart")
+      conf_select.select(params.stack_name, "ranger-tagsync", params.version)
+      stack_select.select("ranger-tagsync", params.version)
+
+  def get_component_name(self):
+    return "ranger-tagsync"
+
+  def get_log_folder(self):
+    import params
+    return params.tagsync_log_dir
+  
+  def get_user(self):
+    import params
+    return params.unix_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.tagsync_pid_file]
+
+  def configure_atlas_user_for_tagsync(self, env):
+    Logger.info("Configuring Atlas user for Tagsync service.")
+    import params
+    env.set_params(params)
+
+    upgrade_stack = stack_select._get_upgrade_stack()
+    if upgrade_stack is None:
+      raise Fail('Unable to determine the stack and stack version')
+
+    stack_name = upgrade_stack[0]
+    stack_version = upgrade_stack[1]
+
+    stack_select.select("ranger-tagsync", stack_version)
+    conf_select.select(stack_name, "ranger-tagsync", stack_version)
+    if params.stack_supports_ranger_tagsync_ssl_xml_support:
+      Logger.info("Upgrading Tagsync, stack support Atlas user for Tagsync, creating keystore for same.")
+      self.create_atlas_user_keystore(env)
+    else:
+      Logger.info("Upgrading Tagsync, stack does not support Atlas user for Tagsync, skipping keystore creation for same.")
+
+    Logger.info("Configuring Atlas user for Tagsync service done.")
+
+  def create_atlas_user_keystore(self,env):
+    import params
+    env.set_params(params)
+    ranger_credential_helper(params.tagsync_cred_lib, 'atlas.user.password', 'admin', params.atlas_tagsync_jceks_path)
+    File(params.atlas_tagsync_jceks_path,
+         owner = params.unix_user,
+         group = params.unix_group,
+         mode = 0640
+    )
+
+if __name__ == "__main__":
+  RangerTagsync().execute()
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/ranger_usersync.py b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/ranger_usersync.py
new file mode 100644
index 0000000..b9366f6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/ranger_usersync.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.script import Script
+from resource_management.core.resources.system import Execute, File
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.libraries.functions.format import format
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+from ranger_service import ranger_service
+from ambari_commons.constants import UPGRADE_TYPE_NON_ROLLING, UPGRADE_TYPE_ROLLING
+from resource_management.libraries.functions.constants import Direction
+import upgrade
+import os
+
+class RangerUsersync(Script):
+  
+  def install(self, env):
+    self.install_packages(env)
+    import params
+    env.set_params(params)
+
+    if params.stack_supports_usersync_passwd:
+      from setup_ranger_xml import ranger_credential_helper
+      ranger_credential_helper(params.ugsync_cred_lib, params.ugsync_policymgr_alias, 'rangerusersync', params.ugsync_policymgr_keystore)
+
+      File(params.ugsync_policymgr_keystore,
+        owner = params.unix_user,
+        group = params.unix_group,
+        mode = 0640
+      )
+
+    self.configure(env)
+    
+  def configure(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.xml_configurations_supported:
+      from setup_ranger_xml import ranger
+    else:
+      from setup_ranger import ranger    
+    
+    ranger('ranger_usersync', upgrade_type=upgrade_type)
+    
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    
+    self.configure(env, upgrade_type=upgrade_type)
+    ranger_service('ranger_usersync')
+    
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if upgrade_type == UPGRADE_TYPE_NON_ROLLING and params.upgrade_direction == Direction.UPGRADE:
+      if params.stack_supports_usersync_non_root and os.path.isfile(params.usersync_services_file):
+        File(params.usersync_services_file,
+          mode = 0755
+        )
+        Execute(('ln','-sf', format('{usersync_services_file}'),'/usr/bin/ranger-usersync'),
+          not_if=format("ls /usr/bin/ranger-usersync"),
+          only_if=format("ls {usersync_services_file}"),
+          sudo=True
+        )
+
+    Execute((params.usersync_stop,), environment={'JAVA_HOME': params.java_home}, sudo=True)
+    if params.stack_supports_pid:
+      File(params.ranger_usersync_pid_file,
+        action = "delete"
+      )
+    
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+
+    if status_params.stack_supports_pid:
+      check_process_status(status_params.ranger_usersync_pid_file)
+      return
+
+    cmd = 'ps -ef | grep proc_rangerusersync | grep -v grep'
+    code, output = shell.call(cmd, timeout=20)
+
+    if code != 0:
+      Logger.debug('Ranger usersync process not running')
+      raise ComponentIsNotRunning()
+    pass
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    upgrade.prestart(env, "ranger-usersync")
+
+  def get_component_name(self):
+    return "ranger-usersync"
+
+  def get_log_folder(self):
+    import params
+    return params.usersync_log_dir
+  
+  def get_user(self):
+    import params
+    return params.unix_user
+
+if __name__ == "__main__":
+  RangerUsersync().execute()
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/service_check.py
new file mode 100644
index 0000000..fb6af95
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/service_check.py
@@ -0,0 +1,49 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script import Script
+from resource_management.core.resources.system import Execute
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.libraries.functions.format import format
+from resource_management.core.logger import Logger
+import os
+
+
+class RangerServiceCheck(Script):
+
+  def service_check(self, env):
+    import params
+
+    env.set_params(params)
+    self.check_ranger_admin_service(params.ranger_external_url, params.upgrade_marker_file)
+
+  def check_ranger_admin_service(self, ranger_external_url, upgrade_marker_file):
+    if (self.is_ru_rangeradmin_in_progress(upgrade_marker_file)):
+      Logger.info('Ranger admin process not running - skipping as stack upgrade is in progress')
+    else:
+      Execute(format("curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k {ranger_external_url}/login.jsp | grep 200"),
+        tries = 10,
+        try_sleep=3,
+        logoutput=True)
+
+  def is_ru_rangeradmin_in_progress(self, upgrade_marker_file):
+    return os.path.isfile(upgrade_marker_file)
+
+if __name__ == "__main__":
+  RangerServiceCheck().execute()
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/setup_ranger.py b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/setup_ranger.py
new file mode 100644
index 0000000..b0e8bad
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/setup_ranger.py
@@ -0,0 +1,153 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+import fileinput
+import os
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.resources.properties_file import PropertiesFile
+from resource_management.libraries.resources.modify_properties_file import ModifyPropertiesFile
+from resource_management.core.source import DownloadSource
+from resource_management.core.logger import Logger
+from resource_management.core.shell import as_sudo
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.system import Directory, Execute, File
+
+
+def ranger(name=None, upgrade_type=None):
+  if name == 'ranger_admin':
+    setup_ranger_admin(upgrade_type=upgrade_type)
+
+  if name == 'ranger_usersync':
+    setup_usersync(upgrade_type=upgrade_type)
+
+def setup_ranger_admin(upgrade_type=None):
+  import params
+
+  check_db_connnection()
+
+  if params.driver_curl_source and not params.driver_curl_source.endswith("/None"):
+    if params.previous_jdbc_jar and os.path.isfile(params.previous_jdbc_jar):
+      File(params.previous_jdbc_jar, action='delete')
+
+  File(params.downloaded_custom_connector,
+      content = DownloadSource(params.driver_curl_source),
+      mode = 0644
+  )
+
+  Execute(('cp', '--remove-destination', params.downloaded_custom_connector, params.driver_curl_target),
+          path=["/bin", "/usr/bin/"],
+          sudo=True)
+
+  File(params.driver_curl_target, mode=0644)
+
+  ModifyPropertiesFile(format("{ranger_home}/install.properties"),
+    properties = params.config['configurations']['admin-properties']
+  )
+
+  custom_config = dict()
+  custom_config['unix_user'] = params.unix_user
+  custom_config['unix_group'] = params.unix_group
+
+  ModifyPropertiesFile(format("{ranger_home}/install.properties"),
+    properties=custom_config
+  )
+
+  ModifyPropertiesFile(format("{ranger_home}/install.properties"),
+    properties = {'SQL_CONNECTOR_JAR': format('{driver_curl_target}')}
+  )
+
+  ##if db flavor == oracle - set oracle home env variable
+  if params.db_flavor.lower() == 'oracle' and params.oracle_home:
+    env_dict = {'JAVA_HOME': params.java_home, 'ORACLE_HOME':params.oracle_home, 'LD_LIBRARY_PATH':params.oracle_home} 
+  else: 
+    env_dict = {'JAVA_HOME': params.java_home}
+  
+  setup_sh = format("cd {ranger_home} && ") + as_sudo([format('{ranger_home}/setup.sh')])
+  Execute(setup_sh, 
+          environment=env_dict, 
+          logoutput=True,
+  )
+  
+  ModifyPropertiesFile(format("{ranger_conf}/xa_system.properties"),
+       properties = params.config['configurations']['ranger-site'],
+  )
+
+  ModifyPropertiesFile(format("{ranger_conf}/ranger_webserver.properties"),
+    properties = params.config['configurations']['ranger-site'],
+    mode=0744
+  )
+
+  Directory(params.admin_log_dir,
+    owner = params.unix_user,
+    group = params.unix_group
+  )
+
+def setup_usersync(upgrade_type=None):
+  import params
+
+  PropertiesFile(format("{usersync_home}/install.properties"),
+    properties = params.config['configurations']['usersync-properties'],
+  )
+
+  custom_config = dict()
+  custom_config['unix_user'] = params.unix_user
+  custom_config['unix_group'] = params.unix_group
+
+  ModifyPropertiesFile(format("{usersync_home}/install.properties"),
+    properties=custom_config
+  )
+
+  cmd = format("cd {usersync_home} && ") + as_sudo([format('{usersync_home}/setup.sh')])
+  Execute(cmd, environment={'JAVA_HOME': params.java_home}, logoutput=True)
+  
+  File([params.usersync_start, params.usersync_stop],
+       owner = params.unix_user
+  )
+  File(params.usersync_services_file,
+    mode = 0755,
+  )
+
+  Directory(params.usersync_log_dir,
+    owner = params.unix_user,
+    group = params.unix_group
+  )
+
+def check_db_connnection():
+  import params
+
+  Logger.info('Checking DB connection')
+  env_dict = {}
+  if params.db_flavor.lower() == 'mysql':
+    cmd = format('{sql_command_invoker} -u {db_root_user} --password={db_root_password!p} -h {db_host}  -s -e "select version();"')
+  elif params.db_flavor.lower() == 'oracle':
+    cmd = format("{sql_command_invoker} '{db_root_user}/\"{db_root_password}\"@{db_host}' AS SYSDBA")
+    env_dict = {'ORACLE_HOME':params.oracle_home, 'LD_LIBRARY_PATH':params.oracle_home}
+  elif params.db_flavor.lower() == 'postgres':
+    cmd = 'true'
+  elif params.db_flavor.lower() == 'mssql':
+    cmd = 'true'
+
+  try:
+    Execute(cmd,
+      environment=env_dict,
+      logoutput=True)
+  except Fail as ex:
+    Logger.error(str(ex))
+    raise Fail('Ranger Database connection check failed')
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/setup_ranger_xml.py b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/setup_ranger_xml.py
new file mode 100644
index 0000000..26e6578
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/setup_ranger_xml.py
@@ -0,0 +1,853 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import re
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions.default import default
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import File, Directory, Execute, Link
+from resource_management.core.source import DownloadSource, InlineTemplate, Template
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.resources.modify_properties_file import ModifyPropertiesFile
+from resource_management.libraries.resources.properties_file import PropertiesFile
+from resource_management.core.exceptions import Fail
+from resource_management.libraries.functions.decorator import retry
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.core.utils import PasswordString
+from resource_management.core.shell import as_sudo
+from resource_management.libraries.functions import solr_cloud_util
+from ambari_commons.constants import UPGRADE_TYPE_NON_ROLLING, UPGRADE_TYPE_ROLLING
+from resource_management.core.exceptions import ExecutionFailed
+
+# This file contains functions used for setup/configure of Ranger Admin and Ranger Usersync.
+# The design is to mimic what is done by the setup.sh script bundled by Ranger component currently.
+
+def ranger(name=None, upgrade_type=None):
+  """
+  parameter name: name of ranger service component
+  """
+  if name == 'ranger_admin':
+    setup_ranger_admin(upgrade_type=upgrade_type)
+
+  if name == 'ranger_usersync':
+    setup_usersync(upgrade_type=upgrade_type)
+
+  if name == 'ranger_tagsync':
+    setup_tagsync(upgrade_type=upgrade_type)
+
+def setup_ranger_admin(upgrade_type=None):
+  import params
+
+  if upgrade_type is None:
+    upgrade_type = Script.get_upgrade_type(default("/commandParams/upgrade_type", ""))
+
+  ranger_home = params.ranger_home
+  ranger_conf = params.ranger_conf
+
+  Directory(ranger_conf,
+    owner = params.unix_user,
+    group = params.unix_group,
+    create_parents = True
+  )
+
+  copy_jdbc_connector()
+
+  File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
+    content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")),
+    mode = 0644,
+  )
+
+  cp = format("{check_db_connection_jar}")
+  if params.db_flavor.lower() == 'sqla':
+    cp = cp + os.pathsep + format("{ranger_home}/ews/lib/sajdbc4.jar")
+  else:
+    cp = cp + os.pathsep + format("{driver_curl_target}")
+  cp = cp + os.pathsep + format("{ranger_home}/ews/lib/*")
+
+  db_connection_check_command = format(
+    "{java_home}/bin/java -cp {cp} org.apache.ambari.server.DBConnectionVerification '{ranger_jdbc_connection_url}' {ranger_db_user} {ranger_db_password!p} {ranger_jdbc_driver}")
+
+  env_dict = {}
+  if params.db_flavor.lower() == 'sqla':
+    env_dict = {'LD_LIBRARY_PATH':params.ld_lib_path}
+
+  Execute(db_connection_check_command, path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin', tries=5, try_sleep=10, environment=env_dict)
+
+  Execute(('ln','-sf', format('{ranger_home}/ews/webapp/WEB-INF/classes/conf'), format('{ranger_home}/conf')),
+    not_if=format("ls {ranger_home}/conf"),
+    only_if=format("ls {ranger_home}/ews/webapp/WEB-INF/classes/conf"),
+    sudo=True)
+
+  if upgrade_type is not None:
+    src_file = format('{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/ranger-admin-default-site.xml')
+    dst_file = format('{ranger_home}/conf/ranger-admin-default-site.xml')
+    Execute(('cp', '-f', src_file, dst_file), sudo=True)
+
+    src_file = format('{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/security-applicationContext.xml')
+    dst_file = format('{ranger_home}/conf/security-applicationContext.xml')
+
+    Execute(('cp', '-f', src_file, dst_file), sudo=True)
+
+  Directory(format('{ranger_home}/'),
+            owner = params.unix_user,
+            group = params.unix_group,
+            recursive_ownership = True,
+  )
+
+  Directory(params.ranger_pid_dir,
+    mode=0755,
+    owner = params.unix_user,
+    group = params.user_group,
+    cd_access = "a",
+    create_parents=True
+  )
+
+  if params.stack_supports_pid:
+    File(format('{ranger_conf}/ranger-admin-env-piddir.sh'),
+      content = format("export RANGER_PID_DIR_PATH={ranger_pid_dir}\nexport RANGER_USER={unix_user}"),
+      owner = params.unix_user,
+      group = params.unix_group,
+      mode=0755
+    )
+
+  Directory(params.admin_log_dir,
+    owner = params.unix_user,
+    group = params.unix_group,
+    create_parents = True,
+    cd_access='a',
+    mode=0755
+  )
+
+  File(format('{ranger_conf}/ranger-admin-env-logdir.sh'),
+    content = format("export RANGER_ADMIN_LOG_DIR={admin_log_dir}"),
+    owner = params.unix_user,
+    group = params.unix_group,
+    mode=0755
+  )
+
+  if os.path.isfile(params.ranger_admin_default_file):
+    File(params.ranger_admin_default_file, owner=params.unix_user, group=params.unix_group)
+  else:
+    Logger.warning('Required file {0} does not exist, copying the file to {1} path'.format(params.ranger_admin_default_file, ranger_conf))
+    src_file = format('{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/ranger-admin-default-site.xml')
+    dst_file = format('{ranger_home}/conf/ranger-admin-default-site.xml')
+    Execute(('cp', '-f', src_file, dst_file), sudo=True)
+    File(params.ranger_admin_default_file, owner=params.unix_user, group=params.unix_group)
+
+  if os.path.isfile(params.security_app_context_file):
+    File(params.security_app_context_file, owner=params.unix_user, group=params.unix_group)
+  else:
+    Logger.warning('Required file {0} does not exist, copying the file to {1} path'.format(params.security_app_context_file, ranger_conf))
+    src_file = format('{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/security-applicationContext.xml')
+    dst_file = format('{ranger_home}/conf/security-applicationContext.xml')
+    Execute(('cp', '-f', src_file, dst_file), sudo=True)
+    File(params.security_app_context_file, owner=params.unix_user, group=params.unix_group)
+
+  if upgrade_type is not None and params.stack_supports_config_versioning:
+    if os.path.islink('/usr/bin/ranger-admin'):
+      Link('/usr/bin/ranger-admin', action="delete")
+
+    Link('/usr/bin/ranger-admin',
+    to=format('{ranger_home}/ews/ranger-admin-services.sh'))
+  
+  if default("/configurations/ranger-admin-site/ranger.authentication.method", "") == 'PAM':
+    d = '/etc/pam.d'
+    if os.path.isdir(d):
+      if os.path.isfile(os.path.join(d, 'ranger-admin')):
+        Logger.info('ranger-admin PAM file already exists.')
+      else:
+        File(format('{d}/ranger-admin'),
+            content=Template('ranger_admin_pam.j2'),
+            owner = params.unix_user,
+            group = params.unix_group,
+            mode=0644
+            )
+      if os.path.isfile(os.path.join(d, 'ranger-remote')):
+        Logger.info('ranger-remote PAM file already exists.')
+      else:
+        File(format('{d}/ranger-remote'),
+            content=Template('ranger_remote_pam.j2'),
+            owner = params.unix_user,
+            group = params.unix_group,
+            mode=0644
+            )
+    else:
+    	Logger.error("Unable to use PAM authentication, /etc/pam.d/ directory does not exist.")
+
+  Execute(('ln','-sf', format('{ranger_home}/ews/ranger-admin-services.sh'),'/usr/bin/ranger-admin'),
+    not_if=format("ls /usr/bin/ranger-admin"),
+    only_if=format("ls {ranger_home}/ews/ranger-admin-services.sh"),
+    sudo=True)
+
+  # remove plain-text password from xml configs
+
+  ranger_admin_site_copy = {}
+  ranger_admin_site_copy.update(params.config['configurations']['ranger-admin-site'])
+  for prop in params.ranger_admin_password_properties:
+    if prop in ranger_admin_site_copy:
+      ranger_admin_site_copy[prop] = "_"
+
+  XmlConfig("ranger-admin-site.xml",
+    conf_dir=ranger_conf,
+    configurations=ranger_admin_site_copy,
+    configuration_attributes=params.config['configuration_attributes']['ranger-admin-site'],
+    owner=params.unix_user,
+    group=params.unix_group,
+    mode=0644)
+
+  Directory(os.path.join(ranger_conf,'ranger_jaas'),
+    mode=0700,
+    owner=params.unix_user,
+    group=params.unix_group,
+  )
+
+  if params.stack_supports_ranger_log4j:
+    File(format('{ranger_home}/ews/webapp/WEB-INF/log4j.properties'),
+      owner=params.unix_user,
+      group=params.unix_group,
+      content=InlineTemplate(params.admin_log4j),
+      mode=0644
+    )
+
+  do_keystore_setup(upgrade_type=upgrade_type)
+
+  create_core_site_xml(ranger_conf)
+
+  if params.stack_supports_ranger_kerberos and params.security_enabled:
+    if params.is_hbase_ha_enabled and params.ranger_hbase_plugin_enabled:
+      XmlConfig("hbase-site.xml",
+        conf_dir=ranger_conf,
+        configurations=params.config['configurations']['hbase-site'],
+        configuration_attributes=params.config['configuration_attributes']['hbase-site'],
+        owner=params.unix_user,
+        group=params.unix_group,
+        mode=0644
+      )
+
+    if params.is_namenode_ha_enabled and params.ranger_hdfs_plugin_enabled:
+      XmlConfig("hdfs-site.xml",
+        conf_dir=ranger_conf,
+        configurations=params.config['configurations']['hdfs-site'],
+        configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+        owner=params.unix_user,
+        group=params.unix_group,
+        mode=0644
+      )
+
+def setup_ranger_db(stack_version=None):
+  import params
+  
+  ranger_home = params.ranger_home
+  version = params.version
+  if stack_version is not None:
+    ranger_home = format("{stack_root}/{stack_version}/ranger-admin")
+    version = stack_version
+
+  copy_jdbc_connector(stack_version=version)
+
+  ModifyPropertiesFile(format("{ranger_home}/install.properties"),
+    properties = {'audit_store': params.ranger_audit_source_type},
+    owner = params.unix_user,
+  )
+
+  env_dict = {'RANGER_ADMIN_HOME':ranger_home, 'JAVA_HOME':params.java_home}
+  if params.db_flavor.lower() == 'sqla':
+    env_dict = {'RANGER_ADMIN_HOME':ranger_home, 'JAVA_HOME':params.java_home, 'LD_LIBRARY_PATH':params.ld_lib_path}
+
+  # User wants us to setup the DB user and DB?
+  if params.create_db_dbuser:
+    Logger.info('Setting up Ranger DB and DB User')
+    dba_setup = format('ambari-python-wrap {ranger_home}/dba_script.py -q')
+    Execute(dba_setup, 
+            environment=env_dict,
+            logoutput=True,
+            user=params.unix_user,
+    )
+  else:
+    Logger.info('Separate DBA property not set. Assuming Ranger DB and DB User exists!')
+
+  db_setup = format('ambari-python-wrap {ranger_home}/db_setup.py')
+  Execute(db_setup, 
+          environment=env_dict,
+          logoutput=True,
+          user=params.unix_user,
+  )
+
+
+def setup_java_patch(stack_version=None):
+  import params
+
+  ranger_home = params.ranger_home
+  if stack_version is not None:
+    ranger_home = format("{stack_root}/{stack_version}/ranger-admin")
+
+  env_dict = {'RANGER_ADMIN_HOME':ranger_home, 'JAVA_HOME':params.java_home}
+  if params.db_flavor.lower() == 'sqla':
+    env_dict = {'RANGER_ADMIN_HOME':ranger_home, 'JAVA_HOME':params.java_home, 'LD_LIBRARY_PATH':params.ld_lib_path}
+
+  setup_java_patch = format('ambari-python-wrap {ranger_home}/db_setup.py -javapatch')
+  Execute(setup_java_patch, 
+          environment=env_dict,
+          logoutput=True,
+          user=params.unix_user,
+  )
+
+
+def do_keystore_setup(upgrade_type=None):
+  import params
+
+  ranger_home = params.ranger_home
+  cred_lib_path = params.cred_lib_path
+
+  if not is_empty(params.ranger_credential_provider_path):
+    ranger_credential_helper(cred_lib_path, params.ranger_jpa_jdbc_credential_alias, params.ranger_ambari_db_password, params.ranger_credential_provider_path)
+
+    File(params.ranger_credential_provider_path,
+      owner = params.unix_user,
+      group = params.unix_group,
+      mode = 0640
+    )
+
+  if not is_empty(params.ranger_credential_provider_path) and (params.ranger_audit_source_type).lower() == 'db' and not is_empty(params.ranger_ambari_audit_db_password):
+    ranger_credential_helper(cred_lib_path, params.ranger_jpa_audit_jdbc_credential_alias, params.ranger_ambari_audit_db_password, params.ranger_credential_provider_path)
+
+    File(params.ranger_credential_provider_path,
+      owner = params.unix_user,
+      group = params.unix_group,
+      mode = 0640
+    )
+
+  if params.ranger_auth_method.upper() == "LDAP":
+    ranger_credential_helper(params.cred_lib_path, params.ranger_ldap_password_alias, params.ranger_usersync_ldap_ldapbindpassword, params.ranger_credential_provider_path)
+
+    File(params.ranger_credential_provider_path,
+      owner = params.unix_user,
+      group = params.unix_group,
+      mode = 0640
+    )
+
+  if params.ranger_auth_method.upper() == "ACTIVE_DIRECTORY":
+    ranger_credential_helper(params.cred_lib_path, params.ranger_ad_password_alias, params.ranger_usersync_ldap_ldapbindpassword, params.ranger_credential_provider_path)
+
+    File(params.ranger_credential_provider_path,
+      owner = params.unix_user,
+      group = params.unix_group,
+      mode = 0640
+    )
+
+  if params.stack_supports_secure_ssl_password:
+    ranger_credential_helper(params.cred_lib_path, params.ranger_truststore_alias, params.truststore_password, params.ranger_credential_provider_path)
+
+    if params.https_enabled and not params.http_enabled:
+      ranger_credential_helper(params.cred_lib_path, params.ranger_https_keystore_alias, params.https_keystore_password, params.ranger_credential_provider_path)
+
+    File(params.ranger_credential_provider_path,
+      owner = params.unix_user,
+      group = params.unix_group,
+      mode = 0640
+    )
+
+def password_validation(password):
+  import params
+  if password.strip() == "":
+    raise Fail("Blank password is not allowed for Bind user. Please enter valid password.")
+  if re.search("[\\\`'\"]",password):
+    raise Fail("LDAP/AD bind password contains one of the unsupported special characters like \" ' \ `")
+  else:
+    Logger.info("password validated")
+
+def copy_jdbc_connector(stack_version=None):
+  import params
+
+  if params.jdbc_jar_name is None and params.driver_curl_source.endswith("/None"):
+    error_message = format("{db_flavor} jdbc driver cannot be downloaded from {jdk_location}\nPlease run 'ambari-server setup --jdbc-db={db_flavor} --jdbc-driver={{path_to_jdbc}}' on ambari-server host.")
+    raise Fail(error_message)
+
+  if params.driver_curl_source and not params.driver_curl_source.endswith("/None"):
+    if params.previous_jdbc_jar and os.path.isfile(params.previous_jdbc_jar):
+      File(params.previous_jdbc_jar, action='delete')
+
+  File(params.downloaded_custom_connector,
+    content = DownloadSource(params.driver_curl_source),
+    mode = 0644
+  )
+
+  ranger_home = params.ranger_home
+  if stack_version is not None:
+    ranger_home = format("{stack_root}/{stack_version}/ranger-admin")
+
+  driver_curl_target = format("{ranger_home}/ews/lib/{jdbc_jar_name}")
+
+  if params.db_flavor.lower() == 'sqla':
+    Execute(('tar', '-xvf', params.downloaded_custom_connector, '-C', params.tmp_dir), sudo = True)
+
+    Execute(('cp', '--remove-destination', params.jar_path_in_archive, os.path.join(ranger_home, 'ews', 'lib')),
+      path=["/bin", "/usr/bin/"],
+      sudo=True)
+
+    File(os.path.join(ranger_home, 'ews', 'lib', 'sajdbc4.jar'), mode=0644)
+
+    Directory(params.jdbc_libs_dir,
+      cd_access="a",
+      create_parents=True)
+
+    Execute(as_sudo(['yes', '|', 'cp', params.libs_path_in_archive, params.jdbc_libs_dir], auto_escape=False),
+            path=["/bin", "/usr/bin/"])
+  else:
+    Execute(('cp', '--remove-destination', params.downloaded_custom_connector, os.path.join(ranger_home, 'ews', 'lib')),
+      path=["/bin", "/usr/bin/"],
+      sudo=True)
+
+    File(os.path.join(ranger_home, 'ews', 'lib',params.jdbc_jar_name), mode=0644)
+
+  ModifyPropertiesFile(format("{ranger_home}/install.properties"),
+    properties = params.config['configurations']['admin-properties'],
+    owner = params.unix_user,
+  )
+
+  if params.db_flavor.lower() == 'sqla':
+    ModifyPropertiesFile(format("{ranger_home}/install.properties"),
+      properties = {'SQL_CONNECTOR_JAR': format('{ranger_home}/ews/lib/sajdbc4.jar')},
+      owner = params.unix_user,
+    )
+  else:
+    ModifyPropertiesFile(format("{ranger_home}/install.properties"),
+      properties = {'SQL_CONNECTOR_JAR': format('{driver_curl_target}')},
+       owner = params.unix_user,
+    )
+ 
+def setup_usersync(upgrade_type=None):
+  import params
+
+  usersync_home = params.usersync_home
+  ranger_home = params.ranger_home
+  ranger_ugsync_conf = params.ranger_ugsync_conf
+
+  if not is_empty(params.ranger_usersync_ldap_ldapbindpassword) and params.ug_sync_source == 'org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder':
+    password_validation(params.ranger_usersync_ldap_ldapbindpassword)
+
+  Directory(params.ranger_pid_dir,
+    mode=0755,
+    owner = params.unix_user,
+    group = params.user_group,
+    cd_access = "a",
+    create_parents=True
+  )
+
+  if params.stack_supports_pid:
+    File(format('{ranger_ugsync_conf}/ranger-usersync-env-piddir.sh'),
+      content = format("export USERSYNC_PID_DIR_PATH={ranger_pid_dir}\nexport UNIX_USERSYNC_USER={unix_user}"),
+      owner = params.unix_user,
+      group = params.unix_group,
+      mode=0755
+    )
+
+  Directory(params.usersync_log_dir,
+    owner = params.unix_user,
+    group = params.unix_group,
+    cd_access = 'a',
+    create_parents=True,
+    mode=0755,
+    recursive_ownership = True
+  )
+
+  File(format('{ranger_ugsync_conf}/ranger-usersync-env-logdir.sh'),
+    content = format("export logdir={usersync_log_dir}"),
+    owner = params.unix_user,
+    group = params.unix_group,
+    mode=0755
+  )
+  
+  Directory(format("{ranger_ugsync_conf}/"),
+    owner = params.unix_user
+  )
+
+  if upgrade_type is not None:
+    src_file = format('{usersync_home}/conf.dist/ranger-ugsync-default.xml')
+    dst_file = format('{usersync_home}/conf/ranger-ugsync-default.xml')
+    Execute(('cp', '-f', src_file, dst_file), sudo=True)
+
+  if params.stack_supports_ranger_log4j:
+    File(format('{usersync_home}/conf/log4j.properties'),
+      owner=params.unix_user,
+      group=params.unix_group,
+      content=InlineTemplate(params.usersync_log4j),
+      mode=0644
+    )
+  elif upgrade_type is not None and not params.stack_supports_ranger_log4j:
+    src_file = format('{usersync_home}/conf.dist/log4j.xml')
+    dst_file = format('{usersync_home}/conf/log4j.xml')
+    Execute(('cp', '-f', src_file, dst_file), sudo=True)
+
+  # remove plain-text password from xml configs
+  ranger_ugsync_site_copy = {}
+  ranger_ugsync_site_copy.update(params.config['configurations']['ranger-ugsync-site'])
+  for prop in params.ranger_usersync_password_properties:
+    if prop in ranger_ugsync_site_copy:
+      ranger_ugsync_site_copy[prop] = "_"
+
+  XmlConfig("ranger-ugsync-site.xml",
+    conf_dir=ranger_ugsync_conf,
+    configurations=ranger_ugsync_site_copy,
+    configuration_attributes=params.config['configuration_attributes']['ranger-ugsync-site'],
+    owner=params.unix_user,
+    group=params.unix_group,
+    mode=0644)
+
+  if os.path.isfile(params.ranger_ugsync_default_file):
+    File(params.ranger_ugsync_default_file, owner=params.unix_user, group=params.unix_group)
+
+  if os.path.isfile(params.usgsync_log4j_file):
+    File(params.usgsync_log4j_file, owner=params.unix_user, group=params.unix_group)
+
+  if os.path.isfile(params.cred_validator_file):
+    File(params.cred_validator_file, group=params.unix_group, mode=04555)
+
+  ranger_credential_helper(params.ugsync_cred_lib, 'usersync.ssl.key.password', params.ranger_usersync_keystore_password, params.ugsync_jceks_path)
+
+  if not is_empty(params.ranger_usersync_ldap_ldapbindpassword) and params.ug_sync_source == 'org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder':
+    ranger_credential_helper(params.ugsync_cred_lib, 'ranger.usersync.ldap.bindalias', params.ranger_usersync_ldap_ldapbindpassword, params.ugsync_jceks_path)
+
+  ranger_credential_helper(params.ugsync_cred_lib, 'usersync.ssl.truststore.password', params.ranger_usersync_truststore_password, params.ugsync_jceks_path)
+
+  File(params.ugsync_jceks_path,
+       owner = params.unix_user,
+       group = params.unix_group,
+       mode = 0640
+  )
+  
+  File([params.usersync_start, params.usersync_stop],
+       owner = params.unix_user,
+       group = params.unix_group
+  )
+
+  File(params.usersync_services_file,
+    mode = 0755,
+  )
+
+  Execute(('ln','-sf', format('{usersync_services_file}'),'/usr/bin/ranger-usersync'),
+    not_if=format("ls /usr/bin/ranger-usersync"),
+    only_if=format("ls {usersync_services_file}"),
+    sudo=True)
+
+  if not os.path.isfile(params.ranger_usersync_keystore_file):
+    cmd = format("{java_home}/bin/keytool -genkeypair -keyalg RSA -alias selfsigned -keystore '{ranger_usersync_keystore_file}' -keypass {ranger_usersync_keystore_password!p} -storepass {ranger_usersync_keystore_password!p} -validity 3600 -keysize 2048 -dname '{default_dn_name}'")
+
+    Execute(cmd, logoutput=True, user = params.unix_user)
+
+    File(params.ranger_usersync_keystore_file,
+        owner = params.unix_user,
+        group = params.unix_group,
+        mode = 0640
+    )
+
+  create_core_site_xml(ranger_ugsync_conf)
+
+def setup_tagsync(upgrade_type=None):
+  import params
+
+  ranger_tagsync_home = params.ranger_tagsync_home
+  ranger_home = params.ranger_home
+  ranger_tagsync_conf = params.ranger_tagsync_conf
+
+  Directory(format("{ranger_tagsync_conf}"),
+    owner = params.unix_user,
+    group = params.unix_group,
+    create_parents = True
+  )
+
+  Directory(params.ranger_pid_dir,
+    mode=0755,
+    create_parents=True,
+    owner = params.unix_user,
+    group = params.user_group,
+    cd_access = "a",
+  )
+
+  if params.stack_supports_pid:
+    File(format('{ranger_tagsync_conf}/ranger-tagsync-env-piddir.sh'),
+      content = format("export TAGSYNC_PID_DIR_PATH={ranger_pid_dir}\nexport UNIX_TAGSYNC_USER={unix_user}"),
+      owner = params.unix_user,
+      group = params.unix_group,
+      mode=0755
+    )
+
+  Directory(params.tagsync_log_dir,
+    create_parents = True,
+    owner = params.unix_user,
+    group = params.unix_group,
+    cd_access = "a",
+    mode=0755
+  )
+
+  File(format('{ranger_tagsync_conf}/ranger-tagsync-env-logdir.sh'),
+    content = format("export RANGER_TAGSYNC_LOG_DIR={tagsync_log_dir}"),
+    owner = params.unix_user,
+    group = params.unix_group,
+    mode=0755
+  )
+
+  XmlConfig("ranger-tagsync-site.xml",
+    conf_dir=ranger_tagsync_conf,
+    configurations=params.config['configurations']['ranger-tagsync-site'],
+    configuration_attributes=params.config['configuration_attributes']['ranger-tagsync-site'],
+    owner=params.unix_user,
+    group=params.unix_group,
+    mode=0644)
+  if params.stack_supports_ranger_tagsync_ssl_xml_support:
+    Logger.info("Stack supports tagsync-ssl configurations, performing the same.")
+    setup_tagsync_ssl_configs()
+  else:
+    Logger.info("Stack doesnt support tagsync-ssl configurations, skipping the same.")
+
+  PropertiesFile(format('{ranger_tagsync_conf}/atlas-application.properties'),
+    properties = params.tagsync_application_properties,
+    mode=0755,
+    owner=params.unix_user,
+    group=params.unix_group
+  )
+
+  File(format('{ranger_tagsync_conf}/log4j.properties'),
+    owner=params.unix_user,
+    group=params.unix_group,
+    content=InlineTemplate(params.tagsync_log4j),
+    mode=0644
+  )
+
+  File(params.tagsync_services_file,
+    mode = 0755,
+  )
+
+  Execute(('ln','-sf', format('{tagsync_services_file}'),'/usr/bin/ranger-tagsync'),
+    not_if=format("ls /usr/bin/ranger-tagsync"),
+    only_if=format("ls {tagsync_services_file}"),
+    sudo=True)
+
+  create_core_site_xml(ranger_tagsync_conf)
+
+def ranger_credential_helper(lib_path, alias_key, alias_value, file_path):
+  import params
+
+  java_bin = format('{java_home}/bin/java')
+  file_path = format('jceks://file{file_path}')
+  cmd = (java_bin, '-cp', lib_path, 'org.apache.ranger.credentialapi.buildks', 'create', alias_key, '-value', PasswordString(alias_value), '-provider', file_path)
+  Execute(cmd, environment={'JAVA_HOME': params.java_home}, logoutput=True, sudo=True)
+
+def create_core_site_xml(conf_dir):
+  import params
+
+  if params.stack_supports_ranger_kerberos:
+    if params.has_namenode:
+      XmlConfig("core-site.xml",
+                conf_dir=conf_dir,
+                configurations=params.config['configurations']['core-site'],
+                configuration_attributes=params.config['configuration_attributes']['core-site'],
+                owner=params.unix_user,
+                group=params.unix_group,
+                mode=0644
+      )
+    else:
+      Logger.warning('HDFS service not installed. Creating core-site.xml file.')
+      XmlConfig("core-site.xml",
+        conf_dir=conf_dir,
+        configurations=params.core_site_property,
+        configuration_attributes={},
+        owner=params.unix_user,
+        group=params.unix_group,
+        mode=0644
+      )
+
+def setup_ranger_audit_solr():
+  import params
+
+  if params.security_enabled and params.stack_supports_ranger_kerberos:
+
+    if params.solr_jaas_file is not None:
+      File(format("{solr_jaas_file}"),
+        content=Template("ranger_solr_jaas_conf.j2"),
+        owner=params.unix_user
+      )
+  try:
+    check_znode()
+
+    if params.stack_supports_ranger_solr_configs:
+      Logger.info('Solr configrations supported,creating solr-configurations.')
+      File(format("{ranger_solr_conf}/solrconfig.xml"),
+           content=InlineTemplate(params.ranger_solr_config_content),
+           owner=params.unix_user,
+           group=params.unix_group,
+           mode=0644
+      )
+
+      solr_cloud_util.upload_configuration_to_zk(
+        zookeeper_quorum = params.zookeeper_quorum,
+        solr_znode = params.solr_znode,
+        config_set = params.ranger_solr_config_set,
+        config_set_dir = params.ranger_solr_conf,
+        tmp_dir = params.tmp_dir,
+        java64_home = params.java_home,
+        solrconfig_content = InlineTemplate(params.ranger_solr_config_content),
+        jaas_file=params.solr_jaas_file,
+        retry=30, interval=5
+      )
+
+    else:
+      Logger.info('Solr configrations not supported, skipping solr-configurations.')
+      solr_cloud_util.upload_configuration_to_zk(
+        zookeeper_quorum = params.zookeeper_quorum,
+        solr_znode = params.solr_znode,
+        config_set = params.ranger_solr_config_set,
+        config_set_dir = params.ranger_solr_conf,
+        tmp_dir = params.tmp_dir,
+        java64_home = params.java_home,
+        jaas_file=params.solr_jaas_file,
+        retry=30, interval=5)
+
+    if params.security_enabled and params.has_infra_solr \
+      and not params.is_external_solrCloud_enabled and params.stack_supports_ranger_kerberos:
+
+      solr_cloud_util.add_solr_roles(params.config,
+                                     roles = [params.infra_solr_role_ranger_admin, params.infra_solr_role_ranger_audit, params.infra_solr_role_dev],
+                                     new_service_principals = [params.ranger_admin_jaas_principal])
+      service_default_principals_map = [('hdfs', 'nn'), ('hbase', 'hbase'), ('hive', 'hive'), ('kafka', 'kafka'), ('kms', 'rangerkms'),
+                                                    ('knox', 'knox'), ('nifi', 'nifi'), ('storm', 'storm'), ('yanr', 'yarn')]
+      service_principals = get_ranger_plugin_principals(service_default_principals_map)
+      solr_cloud_util.add_solr_roles(params.config,
+                                     roles = [params.infra_solr_role_ranger_audit, params.infra_solr_role_dev],
+                                     new_service_principals = service_principals)
+
+
+    solr_cloud_util.create_collection(
+      zookeeper_quorum = params.zookeeper_quorum,
+      solr_znode = params.solr_znode,
+      collection = params.ranger_solr_collection_name,
+      config_set = params.ranger_solr_config_set,
+      java64_home = params.java_home,
+      shards = params.ranger_solr_shards,
+      replication_factor = int(params.replication_factor),
+      jaas_file = params.solr_jaas_file)
+
+    if params.security_enabled and params.has_infra_solr \
+      and not params.is_external_solrCloud_enabled and params.stack_supports_ranger_kerberos:
+      secure_znode(format('{solr_znode}/configs/{ranger_solr_config_set}'), params.solr_jaas_file)
+      secure_znode(format('{solr_znode}/collections/{ranger_solr_collection_name}'), params.solr_jaas_file)
+  except ExecutionFailed as execution_exception:
+    Logger.error('Error when configuring Solr for Ranger, Kindly check Solr/Zookeeper services to be up and running:\n {0}'.format(execution_exception))
+
+def setup_ranger_admin_passwd_change():
+  import params
+
+  if params.admin_password != params.default_admin_password:
+    cmd = format('ambari-python-wrap {ranger_home}/db_setup.py -changepassword {admin_username} {default_admin_password!p} {admin_password!p}')
+    Logger.info('Updating admin password')
+    Execute(cmd, environment={'JAVA_HOME': params.java_home, 'RANGER_ADMIN_HOME': params.ranger_home}, user=params.unix_user)
+
+@retry(times=10, sleep_time=5, err_class=Fail)
+def check_znode():
+  import params
+  solr_cloud_util.check_znode(
+    zookeeper_quorum=params.zookeeper_quorum,
+    solr_znode=params.solr_znode,
+    java64_home=params.java_home)
+
+def secure_znode(znode, jaasFile):
+  import params
+  solr_cloud_util.secure_znode(config=params.config, zookeeper_quorum=params.zookeeper_quorum,
+                               solr_znode=znode,
+                               jaas_file=jaasFile,
+                               java64_home=params.java_home, sasl_users=[params.ranger_admin_jaas_principal])
+
+def get_ranger_plugin_principals(services_defaults_tuple_list):
+  """
+  Get ranger plugin user principals from service-default value maps using ranger-*-audit configurations
+  """
+  import params
+  user_principals = []
+  if len(services_defaults_tuple_list) < 1:
+    raise Exception("Services - defaults map parameter is missing.")
+
+  for (service, default_value) in services_defaults_tuple_list:
+    user_principal = default(format("configurations/ranger-{service}-audit/xasecure.audit.jaas.Client.option.principal"), default_value)
+    user_principals.append(user_principal)
+  return user_principals
+
+
+def setup_tagsync_ssl_configs():
+  import params
+  Directory(params.security_store_path,
+            cd_access="a",
+            create_parents=True)
+
+  Directory(params.tagsync_etc_path,
+            cd_access="a",
+            owner=params.unix_user,
+            group=params.unix_group,
+            mode=0775,
+            create_parents=True)
+
+  # remove plain-text password from xml configs
+  ranger_tagsync_policymgr_ssl_copy = {}
+  ranger_tagsync_policymgr_ssl_copy.update(params.config['configurations']['ranger-tagsync-policymgr-ssl'])
+  for prop in params.ranger_tagsync_password_properties:
+    if prop in ranger_tagsync_policymgr_ssl_copy:
+      ranger_tagsync_policymgr_ssl_copy[prop] = "_"
+
+  XmlConfig("ranger-policymgr-ssl.xml",
+            conf_dir=params.ranger_tagsync_conf,
+            configurations=ranger_tagsync_policymgr_ssl_copy,
+            configuration_attributes=params.config['configuration_attributes']['ranger-tagsync-policymgr-ssl'],
+            owner=params.unix_user,
+            group=params.unix_group,
+            mode=0644)
+
+  ranger_credential_helper(params.tagsync_cred_lib, 'sslKeyStore', params.ranger_tagsync_keystore_password, params.ranger_tagsync_credential_file)
+  ranger_credential_helper(params.tagsync_cred_lib, 'sslTrustStore', params.ranger_tagsync_truststore_password, params.ranger_tagsync_credential_file)
+
+  File(params.ranger_tagsync_credential_file,
+       owner = params.unix_user,
+       group = params.unix_group,
+       mode = 0640
+       )
+
+  # remove plain-text password from xml configs
+  atlas_tagsync_ssl_copy = {}
+  atlas_tagsync_ssl_copy.update(params.config['configurations']['atlas-tagsync-ssl'])
+  for prop in params.ranger_tagsync_password_properties:
+    if prop in atlas_tagsync_ssl_copy:
+      atlas_tagsync_ssl_copy[prop] = "_"
+
+  XmlConfig("atlas-tagsync-ssl.xml",
+            conf_dir=params.ranger_tagsync_conf,
+            configurations=atlas_tagsync_ssl_copy,
+            configuration_attributes=params.config['configuration_attributes']['atlas-tagsync-ssl'],
+            owner=params.unix_user,
+            group=params.unix_group,
+            mode=0644)
+
+  ranger_credential_helper(params.tagsync_cred_lib, 'sslKeyStore', params.atlas_tagsync_keystore_password, params.atlas_tagsync_credential_file)
+  ranger_credential_helper(params.tagsync_cred_lib, 'sslTrustStore', params.atlas_tagsync_truststore_password, params.atlas_tagsync_credential_file)
+
+  File(params.atlas_tagsync_credential_file,
+       owner = params.unix_user,
+       group = params.unix_group,
+       mode = 0640
+       )
+  Logger.info("Configuring tagsync-ssl configurations done successfully.")
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/status_params.py
new file mode 100644
index 0000000..842430b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/status_params.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+
+config  = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+upgrade_marker_file = format("{tmp_dir}/rangeradmin_ru.inprogress")
+ranger_pid_dir = config['configurations']['ranger-env']['ranger_pid_dir']
+tagsync_pid_file = format('{ranger_pid_dir}/tagsync.pid')
+stack_name = default("/hostLevelParams/stack_name", None)
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+ranger_admin_pid_file = format('{ranger_pid_dir}/rangeradmin.pid')
+ranger_usersync_pid_file = format('{ranger_pid_dir}/usersync.pid')
+stack_supports_pid = stack_version_formatted and check_stack_feature(StackFeature.RANGER_PID_SUPPORT, stack_version_formatted)
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/upgrade.py b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/upgrade.py
new file mode 100644
index 0000000..a07a1fd
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/scripts/upgrade.py
@@ -0,0 +1,31 @@
+
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.format import format
+
+def prestart(env, stack_component):
+  import params
+
+  if params.version and params.stack_supports_rolling_upgrade:
+    conf_select.select(params.stack_name, stack_component, params.version)
+    stack_select.select(stack_component, params.version)
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/templates/input.config-ranger.json.j2 b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/templates/input.config-ranger.json.j2
new file mode 100644
index 0000000..6c5bb1f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/templates/input.config-ranger.json.j2
@@ -0,0 +1,79 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"ranger_admin",
+      "rowtype":"service",
+      "path":"{{default('/configurations/ranger-env/ranger_admin_log_dir', '/var/log/ranger/admin')}}/xa_portal.log"
+    },
+    {
+      "type":"ranger_dbpatch",
+      "is_enabled":"true",
+      "path":"{{default('/configurations/ranger-env/ranger_admin_log_dir', '/var/log/ranger/admin')}}/ranger_db_patch.log"
+    },
+    {
+      "type":"ranger_usersync",
+      "rowtype":"service",
+      "path":"{{default('/configurations/ranger-env/ranger_usersync_log_dir', '/var/log/ranger/usersync')}}/usersync.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "ranger_admin",
+            "ranger_dbpatch"
+          ]
+        }
+      },
+      "log4j_format":"%d [%t] %-5p %C{6} (%F:%L) - %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{INT:line_number}\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    },
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "ranger_usersync"
+          ]
+        }
+      },
+      "log4j_format":"%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n",
+      "multiline_pattern":"^(%{USER_SYNC_DATE:logtime})",
+      "message_pattern":"(?m)^%{USER_SYNC_DATE:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"dd MMM yyyy HH:mm:ss"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/templates/ranger_admin_pam.j2 b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/templates/ranger_admin_pam.j2
new file mode 100644
index 0000000..d69ad6c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/templates/ranger_admin_pam.j2
@@ -0,0 +1,22 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+#%PAM-1.0
+auth    sufficient        pam_unix.so
+auth    sufficient        pam_sss.so
+account sufficient        pam_unix.so
+account sufficient        pam_sss.so
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/templates/ranger_remote_pam.j2 b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/templates/ranger_remote_pam.j2
new file mode 100644
index 0000000..d69ad6c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/templates/ranger_remote_pam.j2
@@ -0,0 +1,22 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+#%PAM-1.0
+auth    sufficient        pam_unix.so
+auth    sufficient        pam_sss.so
+account sufficient        pam_unix.so
+account sufficient        pam_sss.so
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/templates/ranger_solr_jaas_conf.j2 b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/templates/ranger_solr_jaas_conf.j2
new file mode 100644
index 0000000..a456688
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/package/templates/ranger_solr_jaas_conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+  com.sun.security.auth.module.Krb5LoginModule required
+  useKeyTab=true
+  storeKey=true
+  useTicketCache=false
+  keyTab="{{solr_kerberos_keytab}}"
+  principal="{{solr_kerberos_principal}}";
+};
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/properties/ranger-solrconfig.xml.j2 b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/properties/ranger-solrconfig.xml.j2
new file mode 100644
index 0000000..25dbb7a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/properties/ranger-solrconfig.xml.j2
@@ -0,0 +1,1874 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!--
+     For more details about configurations options that may appear in
+     this file, see http://wiki.apache.org/solr/SolrConfigXml.
+-->
+<config>
+  <!-- In all configuration below, a prefix of "solr." for class names
+       is an alias that causes solr to search appropriate packages,
+       including org.apache.solr.(search|update|request|core|analysis)
+
+       You may also specify a fully qualified Java classname if you
+       have your own custom plugins.
+    -->
+
+  <!-- Controls what version of Lucene various components of Solr
+       adhere to.  Generally, you want to use the latest version to
+       get all bug fixes and improvements. It is highly recommended
+       that you fully re-index after changing this setting as it can
+       affect both how text is indexed and queried.
+  -->
+  <luceneMatchVersion>5.2.0</luceneMatchVersion>
+
+  <!-- <lib/> directives can be used to instruct Solr to load any Jars
+       identified and use them to resolve any "plugins" specified in
+       your solrconfig.xml or schema.xml (ie: Analyzers, Request
+       Handlers, etc...).
+
+       All directories and paths are resolved relative to the
+       instanceDir.
+
+       Please note that <lib/> directives are processed in the order
+       that they appear in your solrconfig.xml file, and are "stacked"
+       on top of each other when building a ClassLoader - so if you have
+       plugin jars with dependencies on other jars, the "lower level"
+       dependency jars should be loaded first.
+
+       If a "./lib" directory exists in your instanceDir, all files
+       found in it are included as if you had used the following
+       syntax...
+
+              <lib dir="./lib" />
+    -->
+
+  <!-- A 'dir' option by itself adds any files found in the directory
+       to the classpath, this is useful for including all jars in a
+       directory.
+
+       When a 'regex' is specified in addition to a 'dir', only the
+       files in that directory which completely match the regex
+       (anchored on both ends) will be included.
+
+       If a 'dir' option (with or without a regex) is used and nothing
+       is found that matches, a warning will be logged.
+
+       The examples below can be used to load some solr-contribs along
+       with their external dependencies.
+    -->
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-dataimporthandler-.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/contrib/extraction/lib" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-cell-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/contrib/clustering/lib/" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-clustering-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/contrib/langid/lib/" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-langid-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/contrib/velocity/lib" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-velocity-\d.*\.jar" />
+
+  <!-- an exact 'path' can be used instead of a 'dir' to specify a
+       specific jar file.  This will cause a serious error to be logged
+       if it can't be loaded.
+    -->
+  <!--
+     <lib path="../a-jar-that-does-not-exist.jar" />
+  -->
+
+  <!-- Data Directory
+
+       Used to specify an alternate directory to hold all index data
+       other than the default ./data under the Solr home.  If
+       replication is in use, this should match the replication
+       configuration.
+    -->
+  <dataDir>${solr.data.dir:}</dataDir>
+
+
+  <!-- The DirectoryFactory to use for indexes.
+
+       solr.StandardDirectoryFactory is filesystem
+       based and tries to pick the best implementation for the current
+       JVM and platform.  solr.NRTCachingDirectoryFactory, the default,
+       wraps solr.StandardDirectoryFactory and caches small files in memory
+       for better NRT performance.
+
+       One can force a particular implementation via solr.MMapDirectoryFactory,
+       solr.NIOFSDirectoryFactory, or solr.SimpleFSDirectoryFactory.
+
+       solr.RAMDirectoryFactory is memory based, not
+       persistent, and doesn't work with replication.
+    -->
+  <directoryFactory name="DirectoryFactory"
+                    class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}">
+
+
+    <!-- These will be used if you are using the solr.HdfsDirectoryFactory,
+         otherwise they will be ignored. If you don't plan on using hdfs,
+         you can safely remove this section. -->
+    <!-- The root directory that collection data should be written to. -->
+    <str name="solr.hdfs.home">${solr.hdfs.home:}</str>
+    <!-- The hadoop configuration files to use for the hdfs client. -->
+    <str name="solr.hdfs.confdir">${solr.hdfs.confdir:}</str>
+    <!-- Enable/Disable the hdfs cache. -->
+    <str name="solr.hdfs.blockcache.enabled">${solr.hdfs.blockcache.enabled:true}</str>
+    <!-- Enable/Disable using one global cache for all SolrCores.
+         The settings used will be from the first HdfsDirectoryFactory created. -->
+    <str name="solr.hdfs.blockcache.global">${solr.hdfs.blockcache.global:true}</str>
+
+  </directoryFactory>
+
+  <!-- The CodecFactory for defining the format of the inverted index.
+       The default implementation is SchemaCodecFactory, which is the official Lucene
+       index format, but hooks into the schema to provide per-field customization of
+       the postings lists and per-document values in the fieldType element
+       (postingsFormat/docValuesFormat). Note that most of the alternative implementations
+       are experimental, so if you choose to customize the index format, it's a good
+       idea to convert back to the official format e.g. via IndexWriter.addIndexes(IndexReader)
+       before upgrading to a newer version to avoid unnecessary reindexing.
+  -->
+  <codecFactory class="solr.SchemaCodecFactory"/>
+
+  <!-- To enable dynamic schema REST APIs, use the following for <schemaFactory>: -->
+
+       <schemaFactory class="ManagedIndexSchemaFactory">
+         <bool name="mutable">true</bool>
+         <str name="managedSchemaResourceName">managed-schema</str>
+       </schemaFactory>
+<!--
+       When ManagedIndexSchemaFactory is specified, Solr will load the schema from
+       the resource named in 'managedSchemaResourceName', rather than from schema.xml.
+       Note that the managed schema resource CANNOT be named schema.xml.  If the managed
+       schema does not exist, Solr will create it after reading schema.xml, then rename
+       'schema.xml' to 'schema.xml.bak'.
+
+       Do NOT hand edit the managed schema - external modifications will be ignored and
+       overwritten as a result of schema modification REST API calls.
+
+       When ManagedIndexSchemaFactory is specified with mutable = true, schema
+       modification REST API calls will be allowed; otherwise, error responses will be
+       sent back for these requests.
+
+  <schemaFactory class="ClassicIndexSchemaFactory"/>
+  -->
+
+  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+       Index Config - These settings control low-level behavior of indexing
+       Most example settings here show the default value, but are commented
+       out, to more easily see where customizations have been made.
+
+       Note: This replaces <indexDefaults> and <mainIndex> from older versions
+       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
+  <indexConfig>
+    <!-- maxFieldLength was removed in 4.0. To get similar behavior, include a
+         LimitTokenCountFilterFactory in your fieldType definition. E.g.
+     <filter class="solr.LimitTokenCountFilterFactory" maxTokenCount="10000"/>
+    -->
+    <!-- Maximum time to wait for a write lock (ms) for an IndexWriter. Default: 1000 -->
+    <!-- <writeLockTimeout>1000</writeLockTimeout>  -->
+
+    <!-- The maximum number of simultaneous threads that may be
+         indexing documents at once in IndexWriter; if more than this
+         many threads arrive they will wait for others to finish.
+         Default in Solr/Lucene is 8. -->
+    <!-- <maxIndexingThreads>8</maxIndexingThreads>  -->
+
+    <!-- Expert: Enabling compound file will use less files for the index,
+         using fewer file descriptors on the expense of performance decrease.
+         Default in Lucene is "true". Default in Solr is "false" (since 3.6) -->
+    <!-- <useCompoundFile>false</useCompoundFile> -->
+
+    <!-- ramBufferSizeMB sets the amount of RAM that may be used by Lucene
+         indexing for buffering added documents and deletions before they are
+         flushed to the Directory.
+         maxBufferedDocs sets a limit on the number of documents buffered
+         before flushing.
+         If both ramBufferSizeMB and maxBufferedDocs is set, then
+         Lucene will flush based on whichever limit is hit first.
+         The default is 100 MB.  -->
+    <!-- <ramBufferSizeMB>100</ramBufferSizeMB> -->
+    <!-- <maxBufferedDocs>1000</maxBufferedDocs> -->
+
+    <!-- Expert: Merge Policy
+         The Merge Policy in Lucene controls how merging of segments is done.
+         The default since Solr/Lucene 3.3 is TieredMergePolicy.
+         The default since Lucene 2.3 was the LogByteSizeMergePolicy,
+         Even older versions of Lucene used LogDocMergePolicy.
+      -->
+    <!--
+        <mergePolicy class="org.apache.lucene.index.TieredMergePolicy">
+          <int name="maxMergeAtOnce">10</int>
+          <int name="segmentsPerTier">10</int>
+        </mergePolicy>
+      -->
+
+    <!-- Merge Factor
+         The merge factor controls how many segments will get merged at a time.
+         For TieredMergePolicy, mergeFactor is a convenience parameter which
+         will set both MaxMergeAtOnce and SegmentsPerTier at once.
+         For LogByteSizeMergePolicy, mergeFactor decides how many new segments
+         will be allowed before they are merged into one.
+         Default is 10 for both merge policies.
+      -->
+    <!--
+    <mergeFactor>10</mergeFactor>
+      -->
+
+    <!-- Ranger customization. Set to 5 to trigger purging of deleted documents more often -->
+    <mergeFactor>{{ranger_audit_logs_merge_factor}}</mergeFactor>
+
+    <!-- Expert: Merge Scheduler
+         The Merge Scheduler in Lucene controls how merges are
+         performed.  The ConcurrentMergeScheduler (Lucene 2.3 default)
+         can perform merges in the background using separate threads.
+         The SerialMergeScheduler (Lucene 2.2 default) does not.
+     -->
+    <!--
+       <mergeScheduler class="org.apache.lucene.index.ConcurrentMergeScheduler"/>
+       -->
+
+    <!-- LockFactory
+
+         This option specifies which Lucene LockFactory implementation
+         to use.
+
+         single = SingleInstanceLockFactory - suggested for a
+                  read-only index or when there is no possibility of
+                  another process trying to modify the index.
+         native = NativeFSLockFactory - uses OS native file locking.
+                  Do not use when multiple solr webapps in the same
+                  JVM are attempting to share a single index.
+         simple = SimpleFSLockFactory  - uses a plain file for locking
+
+         Defaults: 'native' is default for Solr3.6 and later, otherwise
+                   'simple' is the default
+
+         More details on the nuances of each LockFactory...
+         http://wiki.apache.org/lucene-java/AvailableLockFactories
+    -->
+    <lockType>${solr.lock.type:native}</lockType>
+
+    <!-- Unlock On Startup
+
+         If true, unlock any held write or commit locks on startup.
+         This defeats the locking mechanism that allows multiple
+         processes to safely access a lucene index, and should be used
+         with care. Default is "false".
+
+         This is not needed if lock type is 'single'
+     -->
+    <!--
+    <unlockOnStartup>false</unlockOnStartup>
+      -->
+
+    <!-- Commit Deletion Policy
+         Custom deletion policies can be specified here. The class must
+         implement org.apache.lucene.index.IndexDeletionPolicy.
+
+         The default Solr IndexDeletionPolicy implementation supports
+         deleting index commit points on number of commits, age of
+         commit point and optimized status.
+
+         The latest commit point should always be preserved regardless
+         of the criteria.
+    -->
+    <!--
+    <deletionPolicy class="solr.SolrDeletionPolicy">
+    -->
+      <!-- The number of commit points to be kept -->
+      <!-- <str name="maxCommitsToKeep">1</str> -->
+      <!-- The number of optimized commit points to be kept -->
+      <!-- <str name="maxOptimizedCommitsToKeep">0</str> -->
+      <!--
+          Delete all commit points once they have reached the given age.
+          Supports DateMathParser syntax e.g.
+        -->
+      <!--
+         <str name="maxCommitAge">30MINUTES</str>
+         <str name="maxCommitAge">1DAY</str>
+      -->
+    <!--
+    </deletionPolicy>
+    -->
+
+    <!-- Lucene Infostream
+
+         To aid in advanced debugging, Lucene provides an "InfoStream"
+         of detailed information when indexing.
+
+         Setting the value to true will instruct the underlying Lucene
+         IndexWriter to write its info stream to solr's log. By default,
+         this is enabled here, and controlled through log4j.properties.
+      -->
+     <infoStream>true</infoStream>
+  </indexConfig>
+
+
+  <!-- JMX
+
+       This example enables JMX if and only if an existing MBeanServer
+       is found, use this if you want to configure JMX through JVM
+       parameters. Remove this to disable exposing Solr configuration
+       and statistics to JMX.
+
+       For more details see http://wiki.apache.org/solr/SolrJmx
+    -->
+  <jmx />
+  <!-- If you want to connect to a particular server, specify the
+       agentId
+    -->
+  <!-- <jmx agentId="myAgent" /> -->
+  <!-- If you want to start a new MBeanServer, specify the serviceUrl -->
+  <!-- <jmx serviceUrl="service:jmx:rmi:///jndi/rmi://localhost:9999/solr"/>
+    -->
+
+  <!-- The default high-performance update handler -->
+  <updateHandler class="solr.DirectUpdateHandler2">
+
+    <!-- Enables a transaction log, used for real-time get, durability, and
+         and solr cloud replica recovery.  The log can grow as big as
+         uncommitted changes to the index, so use of a hard autoCommit
+         is recommended (see below).
+         "dir" - the target directory for transaction logs, defaults to the
+                solr data directory.  -->
+    <updateLog>
+      <str name="dir">${solr.ulog.dir:}</str>
+    </updateLog>
+
+    <!-- AutoCommit
+
+         Perform a hard commit automatically under certain conditions.
+         Instead of enabling autoCommit, consider using "commitWithin"
+         when adding documents.
+
+         http://wiki.apache.org/solr/UpdateXmlMessages
+
+         maxDocs - Maximum number of documents to add since the last
+                   commit before automatically triggering a new commit.
+
+         maxTime - Maximum amount of time in ms that is allowed to pass
+                   since a document was added before automatically
+                   triggering a new commit.
+         openSearcher - if false, the commit causes recent index changes
+           to be flushed to stable storage, but does not cause a new
+           searcher to be opened to make those changes visible.
+
+         If the updateLog is enabled, then it's highly recommended to
+         have some sort of hard autoCommit to limit the log size.
+      -->
+     <autoCommit>
+       <maxTime>${solr.autoCommit.maxTime:15000}</maxTime>
+       <openSearcher>false</openSearcher>
+     </autoCommit>
+
+    <!-- softAutoCommit is like autoCommit except it causes a
+         'soft' commit which only ensures that changes are visible
+         but does not ensure that data is synced to disk.  This is
+         faster and more near-realtime friendly than a hard commit.
+      -->
+
+     <autoSoftCommit>
+       <maxTime>${solr.autoSoftCommit.maxTime:5000}</maxTime>
+     </autoSoftCommit>
+
+    <!-- Update Related Event Listeners
+
+         Various IndexWriter related events can trigger Listeners to
+         take actions.
+
+         postCommit - fired after every commit or optimize command
+         postOptimize - fired after every optimize command
+      -->
+    <!-- The RunExecutableListener executes an external command from a
+         hook such as postCommit or postOptimize.
+
+         exe - the name of the executable to run
+         dir - dir to use as the current working directory. (default=".")
+         wait - the calling thread waits until the executable returns.
+                (default="true")
+         args - the arguments to pass to the program.  (default is none)
+         env - environment variables to set.  (default is none)
+      -->
+    <!-- This example shows how RunExecutableListener could be used
+         with the script based replication...
+         http://wiki.apache.org/solr/CollectionDistribution
+      -->
+    <!--
+       <listener event="postCommit" class="solr.RunExecutableListener">
+         <str name="exe">solr/bin/snapshooter</str>
+         <str name="dir">.</str>
+         <bool name="wait">true</bool>
+         <arr name="args"> <str>arg1</str> <str>arg2</str> </arr>
+         <arr name="env"> <str>MYVAR=val1</str> </arr>
+       </listener>
+      -->
+
+  </updateHandler>
+
+  <!-- IndexReaderFactory
+
+       Use the following format to specify a custom IndexReaderFactory,
+       which allows for alternate IndexReader implementations.
+
+       ** Experimental Feature **
+
+       Please note - Using a custom IndexReaderFactory may prevent
+       certain other features from working. The API to
+       IndexReaderFactory may change without warning or may even be
+       removed from future releases if the problems cannot be
+       resolved.
+
+
+       ** Features that may not work with custom IndexReaderFactory **
+
+       The ReplicationHandler assumes a disk-resident index. Using a
+       custom IndexReader implementation may cause incompatibility
+       with ReplicationHandler and may cause replication to not work
+       correctly. See SOLR-1366 for details.
+
+    -->
+  <!--
+  <indexReaderFactory name="IndexReaderFactory" class="package.class">
+    <str name="someArg">Some Value</str>
+  </indexReaderFactory >
+  -->
+
+  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+       Query section - these settings control query time things like caches
+       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
+  <query>
+    <!-- Max Boolean Clauses
+
+         Maximum number of clauses in each BooleanQuery,  an exception
+         is thrown if exceeded.
+
+         ** WARNING **
+
+         This option actually modifies a global Lucene property that
+         will affect all SolrCores.  If multiple solrconfig.xml files
+         disagree on this property, the value at any given moment will
+         be based on the last SolrCore to be initialized.
+
+      -->
+    <maxBooleanClauses>1024</maxBooleanClauses>
+
+
+    <!-- Solr Internal Query Caches
+
+         There are two implementations of cache available for Solr,
+         LRUCache, based on a synchronized LinkedHashMap, and
+         FastLRUCache, based on a ConcurrentHashMap.
+
+         FastLRUCache has faster gets and slower puts in single
+         threaded operation and thus is generally faster than LRUCache
+         when the hit ratio of the cache is high (> 75%), and may be
+         faster under other scenarios on multi-cpu systems.
+    -->
+
+    <!-- Filter Cache
+
+         Cache used by SolrIndexSearcher for filters (DocSets),
+         unordered sets of *all* documents that match a query.  When a
+         new searcher is opened, its caches may be prepopulated or
+         "autowarmed" using data from caches in the old searcher.
+         autowarmCount is the number of items to prepopulate.  For
+         LRUCache, the autowarmed items will be the most recently
+         accessed items.
+
+         Parameters:
+           class - the SolrCache implementation LRUCache or
+               (LRUCache or FastLRUCache)
+           size - the maximum number of entries in the cache
+           initialSize - the initial capacity (number of entries) of
+               the cache.  (see java.util.HashMap)
+           autowarmCount - the number of entries to prepopulate from
+               and old cache.
+      -->
+    <filterCache class="solr.FastLRUCache"
+                 size="512"
+                 initialSize="512"
+                 autowarmCount="0"/>
+
+    <!-- Query Result Cache
+
+         Caches results of searches - ordered lists of document ids
+         (DocList) based on a query, a sort, and the range of documents requested.
+      -->
+    <queryResultCache class="solr.LRUCache"
+                     size="512"
+                     initialSize="512"
+                     autowarmCount="0"/>
+
+    <!-- Document Cache
+
+         Caches Lucene Document objects (the stored fields for each
+         document).  Since Lucene internal document ids are transient,
+         this cache will not be autowarmed.
+      -->
+    <documentCache class="solr.LRUCache"
+                   size="512"
+                   initialSize="512"
+                   autowarmCount="0"/>
+
+    <!-- custom cache currently used by block join -->
+    <cache name="perSegFilter"
+      class="solr.search.LRUCache"
+      size="10"
+      initialSize="0"
+      autowarmCount="10"
+      regenerator="solr.NoOpRegenerator" />
+
+    <!-- Field Value Cache
+
+         Cache used to hold field values that are quickly accessible
+         by document id.  The fieldValueCache is created by default
+         even if not configured here.
+      -->
+    <!--
+       <fieldValueCache class="solr.FastLRUCache"
+                        size="512"
+                        autowarmCount="128"
+                        showItems="32" />
+      -->
+
+    <!-- Custom Cache
+
+         Example of a generic cache.  These caches may be accessed by
+         name through SolrIndexSearcher.getCache(),cacheLookup(), and
+         cacheInsert().  The purpose is to enable easy caching of
+         user/application level data.  The regenerator argument should
+         be specified as an implementation of solr.CacheRegenerator
+         if autowarming is desired.
+      -->
+    <!--
+       <cache name="myUserCache"
+              class="solr.LRUCache"
+              size="4096"
+              initialSize="1024"
+              autowarmCount="1024"
+              regenerator="com.mycompany.MyRegenerator"
+              />
+      -->
+
+
+    <!-- Lazy Field Loading
+
+         If true, stored fields that are not requested will be loaded
+         lazily.  This can result in a significant speed improvement
+         if the usual case is to not load all stored fields,
+         especially if the skipped fields are large compressed text
+         fields.
+    -->
+    <enableLazyFieldLoading>true</enableLazyFieldLoading>
+
+   <!-- Use Filter For Sorted Query
+
+        A possible optimization that attempts to use a filter to
+        satisfy a search.  If the requested sort does not include
+        score, then the filterCache will be checked for a filter
+        matching the query. If found, the filter will be used as the
+        source of document ids, and then the sort will be applied to
+        that.
+
+        For most situations, this will not be useful unless you
+        frequently get the same search repeatedly with different sort
+        options, and none of them ever use "score"
+     -->
+   <!--
+      <useFilterForSortedQuery>true</useFilterForSortedQuery>
+     -->
+
+   <!-- Result Window Size
+
+        An optimization for use with the queryResultCache.  When a search
+        is requested, a superset of the requested number of document ids
+        are collected.  For example, if a search for a particular query
+        requests matching documents 10 through 19, and queryWindowSize is 50,
+        then documents 0 through 49 will be collected and cached.  Any further
+        requests in that range can be satisfied via the cache.
+     -->
+   <queryResultWindowSize>20</queryResultWindowSize>
+
+   <!-- Maximum number of documents to cache for any entry in the
+        queryResultCache.
+     -->
+   <queryResultMaxDocsCached>200</queryResultMaxDocsCached>
+
+   <!-- Query Related Event Listeners
+
+        Various IndexSearcher related events can trigger Listeners to
+        take actions.
+
+        newSearcher - fired whenever a new searcher is being prepared
+        and there is a current searcher handling requests (aka
+        registered).  It can be used to prime certain caches to
+        prevent long request times for certain requests.
+
+        firstSearcher - fired whenever a new searcher is being
+        prepared but there is no current registered searcher to handle
+        requests or to gain autowarming data from.
+
+
+     -->
+    <!-- QuerySenderListener takes an array of NamedList and executes a
+         local query request for each NamedList in sequence.
+      -->
+    <listener event="newSearcher" class="solr.QuerySenderListener">
+      <arr name="queries">
+        <!--
+           <lst><str name="q">solr</str><str name="sort">price asc</str></lst>
+           <lst><str name="q">rocks</str><str name="sort">weight asc</str></lst>
+          -->
+      </arr>
+    </listener>
+    <listener event="firstSearcher" class="solr.QuerySenderListener">
+      <arr name="queries">
+        <lst>
+          <str name="q">static firstSearcher warming in solrconfig.xml</str>
+        </lst>
+      </arr>
+    </listener>
+
+    <!-- Use Cold Searcher
+
+         If a search request comes in and there is no current
+         registered searcher, then immediately register the still
+         warming searcher and use it.  If "false" then all requests
+         will block until the first searcher is done warming.
+      -->
+    <useColdSearcher>true</useColdSearcher>
+
+    <!-- Max Warming Searchers
+
+         Maximum number of searchers that may be warming in the
+         background concurrently.  An error is returned if this limit
+         is exceeded.
+
+         Recommend values of 1-2 for read-only slaves, higher for
+         masters w/o cache warming.
+      -->
+    <maxWarmingSearchers>2</maxWarmingSearchers>
+
+  </query>
+
+
+  <!-- Request Dispatcher
+
+       This section contains instructions for how the SolrDispatchFilter
+       should behave when processing requests for this SolrCore.
+
+       handleSelect is a legacy option that affects the behavior of requests
+       such as /select?qt=XXX
+
+       handleSelect="true" will cause the SolrDispatchFilter to process
+       the request and dispatch the query to a handler specified by the
+       "qt" param, assuming "/select" isn't already registered.
+
+       handleSelect="false" will cause the SolrDispatchFilter to
+       ignore "/select" requests, resulting in a 404 unless a handler
+       is explicitly registered with the name "/select"
+
+       handleSelect="true" is not recommended for new users, but is the default
+       for backwards compatibility
+    -->
+  <requestDispatcher handleSelect="false" >
+    <!-- Request Parsing
+
+         These settings indicate how Solr Requests may be parsed, and
+         what restrictions may be placed on the ContentStreams from
+         those requests
+
+         enableRemoteStreaming - enables use of the stream.file
+         and stream.url parameters for specifying remote streams.
+
+         multipartUploadLimitInKB - specifies the max size (in KiB) of
+         Multipart File Uploads that Solr will allow in a Request.
+
+         formdataUploadLimitInKB - specifies the max size (in KiB) of
+         form data (application/x-www-form-urlencoded) sent via
+         POST. You can use POST to pass request parameters not
+         fitting into the URL.
+
+         addHttpRequestToContext - if set to true, it will instruct
+         the requestParsers to include the original HttpServletRequest
+         object in the context map of the SolrQueryRequest under the
+         key "httpRequest". It will not be used by any of the existing
+         Solr components, but may be useful when developing custom
+         plugins.
+
+         *** WARNING ***
+         The settings below authorize Solr to fetch remote files, You
+         should make sure your system has some authentication before
+         using enableRemoteStreaming="true"
+
+      -->
+    <requestParsers enableRemoteStreaming="true"
+                    multipartUploadLimitInKB="2048000"
+                    formdataUploadLimitInKB="2048"
+                    addHttpRequestToContext="false"/>
+
+    <!-- HTTP Caching
+
+         Set HTTP caching related parameters (for proxy caches and clients).
+
+         The options below instruct Solr not to output any HTTP Caching
+         related headers
+      -->
+    <httpCaching never304="true" />
+    <!-- If you include a <cacheControl> directive, it will be used to
+         generate a Cache-Control header (as well as an Expires header
+         if the value contains "max-age=")
+
+         By default, no Cache-Control header is generated.
+
+         You can use the <cacheControl> option even if you have set
+         never304="true"
+      -->
+    <!--
+       <httpCaching never304="true" >
+         <cacheControl>max-age=30, public</cacheControl>
+       </httpCaching>
+      -->
+    <!-- To enable Solr to respond with automatically generated HTTP
+         Caching headers, and to response to Cache Validation requests
+         correctly, set the value of never304="false"
+
+         This will cause Solr to generate Last-Modified and ETag
+         headers based on the properties of the Index.
+
+         The following options can also be specified to affect the
+         values of these headers...
+
+         lastModFrom - the default value is "openTime" which means the
+         Last-Modified value (and validation against If-Modified-Since
+         requests) will all be relative to when the current Searcher
+         was opened.  You can change it to lastModFrom="dirLastMod" if
+         you want the value to exactly correspond to when the physical
+         index was last modified.
+
+         etagSeed="..." is an option you can change to force the ETag
+         header (and validation against If-None-Match requests) to be
+         different even if the index has not changed (ie: when making
+         significant changes to your config file)
+
+         (lastModifiedFrom and etagSeed are both ignored if you use
+         the never304="true" option)
+      -->
+    <!--
+       <httpCaching lastModifiedFrom="openTime"
+                    etagSeed="Solr">
+         <cacheControl>max-age=30, public</cacheControl>
+       </httpCaching>
+      -->
+  </requestDispatcher>
+
+  <!-- Request Handlers
+
+       http://wiki.apache.org/solr/SolrRequestHandler
+
+       Incoming queries will be dispatched to a specific handler by name
+       based on the path specified in the request.
+
+       Legacy behavior: If the request path uses "/select" but no Request
+       Handler has that name, and if handleSelect="true" has been specified in
+       the requestDispatcher, then the Request Handler is dispatched based on
+       the qt parameter.  Handlers without a leading '/' are accessed this way
+       like so: http://host/app/[core/]select?qt=name  If no qt is
+       given, then the requestHandler that declares default="true" will be
+       used or the one named "standard".
+
+       If a Request Handler is declared with startup="lazy", then it will
+       not be initialized until the first request that uses it.
+
+    -->
+
+  <requestHandler name="/dataimport" class="solr.DataImportHandler">
+    <lst name="defaults">
+      <str name="config">solr-data-config.xml</str>
+    </lst>
+  </requestHandler>
+
+  <!-- SearchHandler
+
+       http://wiki.apache.org/solr/SearchHandler
+
+       For processing Search Queries, the primary Request Handler
+       provided with Solr is "SearchHandler" It delegates to a sequent
+       of SearchComponents (see below) and supports distributed
+       queries across multiple shards
+    -->
+  <requestHandler name="/select" class="solr.SearchHandler">
+    <!-- default values for query parameters can be specified, these
+         will be overridden by parameters in the request
+      -->
+     <lst name="defaults">
+       <str name="echoParams">explicit</str>
+       <int name="rows">10</int>
+       <str name="df">text</str>
+     </lst>
+    <!-- In addition to defaults, "appends" params can be specified
+         to identify values which should be appended to the list of
+         multi-val params from the query (or the existing "defaults").
+      -->
+    <!-- In this example, the param "fq=instock:true" would be appended to
+         any query time fq params the user may specify, as a mechanism for
+         partitioning the index, independent of any user selected filtering
+         that may also be desired (perhaps as a result of faceted searching).
+
+         NOTE: there is *absolutely* nothing a client can do to prevent these
+         "appends" values from being used, so don't use this mechanism
+         unless you are sure you always want it.
+      -->
+    <!--
+       <lst name="appends">
+         <str name="fq">inStock:true</str>
+       </lst>
+      -->
+    <!-- "invariants" are a way of letting the Solr maintainer lock down
+         the options available to Solr clients.  Any params values
+         specified here are used regardless of what values may be specified
+         in either the query, the "defaults", or the "appends" params.
+
+         In this example, the facet.field and facet.query params would
+         be fixed, limiting the facets clients can use.  Faceting is
+         not turned on by default - but if the client does specify
+         facet=true in the request, these are the only facets they
+         will be able to see counts for; regardless of what other
+         facet.field or facet.query params they may specify.
+
+         NOTE: there is *absolutely* nothing a client can do to prevent these
+         "invariants" values from being used, so don't use this mechanism
+         unless you are sure you always want it.
+      -->
+    <!--
+       <lst name="invariants">
+         <str name="facet.field">cat</str>
+         <str name="facet.field">manu_exact</str>
+         <str name="facet.query">price:[* TO 500]</str>
+         <str name="facet.query">price:[500 TO *]</str>
+       </lst>
+      -->
+    <!-- If the default list of SearchComponents is not desired, that
+         list can either be overridden completely, or components can be
+         prepended or appended to the default list.  (see below)
+      -->
+    <!--
+       <arr name="components">
+         <str>nameOfCustomComponent1</str>
+         <str>nameOfCustomComponent2</str>
+       </arr>
+      -->
+    </requestHandler>
+
+  <!-- A request handler that returns indented JSON by default -->
+  <requestHandler name="/query" class="solr.SearchHandler">
+     <lst name="defaults">
+       <str name="echoParams">explicit</str>
+       <str name="wt">json</str>
+       <str name="indent">true</str>
+       <str name="df">text</str>
+     </lst>
+  </requestHandler>
+
+
+  <!-- realtime get handler, guaranteed to return the latest stored fields of
+       any document, without the need to commit or open a new searcher.  The
+       current implementation relies on the updateLog feature being enabled.
+
+       ** WARNING **
+       Do NOT disable the realtime get handler at /get if you are using
+       SolrCloud otherwise any leader election will cause a full sync in ALL
+       replicas for the shard in question. Similarly, a replica recovery will
+       also always fetch the complete index from the leader because a partial
+       sync will not be possible in the absence of this handler.
+  -->
+  <requestHandler name="/get" class="solr.RealTimeGetHandler">
+     <lst name="defaults">
+       <str name="omitHeader">true</str>
+       <str name="wt">json</str>
+       <str name="indent">true</str>
+     </lst>
+  </requestHandler>
+
+
+  <!-- A Robust Example
+
+       This example SearchHandler declaration shows off usage of the
+       SearchHandler with many defaults declared
+
+       Note that multiple instances of the same Request Handler
+       (SearchHandler) can be registered multiple times with different
+       names (and different init parameters)
+    -->
+  <requestHandler name="/browse" class="solr.SearchHandler">
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+
+      <!-- VelocityResponseWriter settings -->
+      <str name="wt">velocity</str>
+      <str name="v.template">browse</str>
+      <str name="v.layout">layout</str>
+
+      <!-- Query settings -->
+      <str name="defType">edismax</str>
+      <str name="q.alt">*:*</str>
+      <str name="rows">10</str>
+      <str name="fl">*,score</str>
+
+      <!-- Faceting defaults -->
+      <str name="facet">on</str>
+      <str name="facet.mincount">1</str>
+    </lst>
+  </requestHandler>
+
+
+  <initParams path="/update/**,/query,/select,/tvrh,/elevate,/spell,/browse">
+    <lst name="defaults">
+      <str name="df">text</str>
+      <str name="update.chain">add-unknown-fields-to-the-schema</str>
+    </lst>
+  </initParams>
+
+  <!-- Update Request Handler.
+
+       http://wiki.apache.org/solr/UpdateXmlMessages
+
+       The canonical Request Handler for Modifying the Index through
+       commands specified using XML, JSON, CSV, or JAVABIN
+
+       Note: Since solr1.1 requestHandlers requires a valid content
+       type header if posted in the body. For example, curl now
+       requires: -H 'Content-type:text/xml; charset=utf-8'
+
+       To override the request content type and force a specific
+       Content-type, use the request parameter:
+         ?update.contentType=text/csv
+
+       This handler will pick a response format to match the input
+       if the 'wt' parameter is not explicit
+    -->
+  <requestHandler name="/update" class="solr.UpdateRequestHandler">
+    <!-- See below for information on defining
+         updateRequestProcessorChains that can be used by name
+         on each Update Request
+      -->
+    <!--
+       <lst name="defaults">
+         <str name="update.chain">dedupe</str>
+       </lst>
+       -->
+  </requestHandler>
+
+  <!-- Solr Cell Update Request Handler
+
+       http://wiki.apache.org/solr/ExtractingRequestHandler
+
+    -->
+  <requestHandler name="/update/extract"
+                  startup="lazy"
+                  class="solr.extraction.ExtractingRequestHandler" >
+    <lst name="defaults">
+      <str name="lowernames">true</str>
+      <str name="uprefix">ignored_</str>
+
+      <!-- capture link hrefs but ignore div attributes -->
+      <str name="captureAttr">true</str>
+      <str name="fmap.a">links</str>
+      <str name="fmap.div">ignored_</str>
+    </lst>
+  </requestHandler>
+
+
+  <!-- Field Analysis Request Handler
+
+       RequestHandler that provides much the same functionality as
+       analysis.jsp. Provides the ability to specify multiple field
+       types and field names in the same request and outputs
+       index-time and query-time analysis for each of them.
+
+       Request parameters are:
+       analysis.fieldname - field name whose analyzers are to be used
+
+       analysis.fieldtype - field type whose analyzers are to be used
+       analysis.fieldvalue - text for index-time analysis
+       q (or analysis.q) - text for query time analysis
+       analysis.showmatch (true|false) - When set to true and when
+           query analysis is performed, the produced tokens of the
+           field value analysis will be marked as "matched" for every
+           token that is produces by the query analysis
+   -->
+  <requestHandler name="/analysis/field"
+                  startup="lazy"
+                  class="solr.FieldAnalysisRequestHandler" />
+
+
+  <!-- Document Analysis Handler
+
+       http://wiki.apache.org/solr/AnalysisRequestHandler
+
+       An analysis handler that provides a breakdown of the analysis
+       process of provided documents. This handler expects a (single)
+       content stream with the following format:
+
+       <docs>
+         <doc>
+           <field name="id">1</field>
+           <field name="name">The Name</field>
+           <field name="text">The Text Value</field>
+         </doc>
+         <doc>...</doc>
+         <doc>...</doc>
+         ...
+       </docs>
+
+    Note: Each document must contain a field which serves as the
+    unique key. This key is used in the returned response to associate
+    an analysis breakdown to the analyzed document.
+
+    Like the FieldAnalysisRequestHandler, this handler also supports
+    query analysis by sending either an "analysis.query" or "q"
+    request parameter that holds the query text to be analyzed. It
+    also supports the "analysis.showmatch" parameter which when set to
+    true, all field tokens that match the query tokens will be marked
+    as a "match".
+  -->
+  <requestHandler name="/analysis/document"
+                  class="solr.DocumentAnalysisRequestHandler"
+                  startup="lazy" />
+
+  <!-- This single handler is equivalent to the following... -->
+  <!--
+     <requestHandler name="/admin/luke"       class="solr.admin.LukeRequestHandler" />
+     <requestHandler name="/admin/system"     class="solr.admin.SystemInfoHandler" />
+     <requestHandler name="/admin/plugins"    class="solr.admin.PluginInfoHandler" />
+     <requestHandler name="/admin/threads"    class="solr.admin.ThreadDumpHandler" />
+     <requestHandler name="/admin/properties" class="solr.admin.PropertiesRequestHandler" />
+     <requestHandler name="/admin/file"       class="solr.admin.ShowFileRequestHandler" >
+    -->
+  <!-- If you wish to hide files under ${solr.home}/conf, explicitly
+       register the ShowFileRequestHandler using the definition below.
+       NOTE: The glob pattern ('*') is the only pattern supported at present, *.xml will
+             not exclude all files ending in '.xml'. Use it to exclude _all_ updates
+    -->
+  <!--
+     <requestHandler name="/admin/file"
+                     class="solr.admin.ShowFileRequestHandler" >
+       <lst name="invariants">
+         <str name="hidden">synonyms.txt</str>
+         <str name="hidden">anotherfile.txt</str>
+         <str name="hidden">*</str>
+       </lst>
+     </requestHandler>
+    -->
+
+  <!--
+    Enabling this request handler (which is NOT a default part of the admin handler) will allow the Solr UI to edit
+    all the config files. This is intended for secure/development use ONLY! Leaving available and publically
+    accessible is a security vulnerability and should be done with extreme caution!
+  -->
+  <!--
+  <requestHandler name="/admin/fileedit" class="solr.admin.EditFileRequestHandler" >
+    <lst name="invariants">
+         <str name="hidden">synonyms.txt</str>
+         <str name="hidden">anotherfile.txt</str>
+    </lst>
+  </requestHandler>
+  -->
+  <!-- ping/healthcheck -->
+  <requestHandler name="/admin/ping" class="solr.PingRequestHandler">
+    <lst name="invariants">
+      <str name="q">solrpingquery</str>
+    </lst>
+    <lst name="defaults">
+      <str name="echoParams">all</str>
+    </lst>
+    <!-- An optional feature of the PingRequestHandler is to configure the
+         handler with a "healthcheckFile" which can be used to enable/disable
+         the PingRequestHandler.
+         relative paths are resolved against the data dir
+      -->
+    <!-- <str name="healthcheckFile">server-enabled.txt</str> -->
+  </requestHandler>
+
+  <!-- Echo the request contents back to the client -->
+  <requestHandler name="/debug/dump" class="solr.DumpRequestHandler" >
+    <lst name="defaults">
+     <str name="echoParams">explicit</str>
+     <str name="echoHandler">true</str>
+    </lst>
+  </requestHandler>
+
+  <!-- Solr Replication
+
+       The SolrReplicationHandler supports replicating indexes from a
+       "master" used for indexing and "slaves" used for queries.
+
+       http://wiki.apache.org/solr/SolrReplication
+
+       It is also necessary for SolrCloud to function (in Cloud mode, the
+       replication handler is used to bulk transfer segments when nodes
+       are added or need to recover).
+
+       https://wiki.apache.org/solr/SolrCloud/
+    -->
+  <requestHandler name="/replication" class="solr.ReplicationHandler" >
+    <!--
+       To enable simple master/slave replication, uncomment one of the
+       sections below, depending on whether this solr instance should be
+       the "master" or a "slave".  If this instance is a "slave" you will
+       also need to fill in the masterUrl to point to a real machine.
+    -->
+    <!--
+       <lst name="master">
+         <str name="replicateAfter">commit</str>
+         <str name="replicateAfter">startup</str>
+         <str name="confFiles">schema.xml,stopwords.txt</str>
+       </lst>
+    -->
+    <!--
+       <lst name="slave">
+         <str name="masterUrl">http://your-master-hostname:8983/solr</str>
+         <str name="pollInterval">00:00:60</str>
+       </lst>
+    -->
+  </requestHandler>
+
+  <!-- Search Components
+
+       Search components are registered to SolrCore and used by
+       instances of SearchHandler (which can access them by name)
+
+       By default, the following components are available:
+
+       <searchComponent name="query"     class="solr.QueryComponent" />
+       <searchComponent name="facet"     class="solr.FacetComponent" />
+       <searchComponent name="mlt"       class="solr.MoreLikeThisComponent" />
+       <searchComponent name="highlight" class="solr.HighlightComponent" />
+       <searchComponent name="stats"     class="solr.StatsComponent" />
+       <searchComponent name="debug"     class="solr.DebugComponent" />
+
+       Default configuration in a requestHandler would look like:
+
+       <arr name="components">
+         <str>query</str>
+         <str>facet</str>
+         <str>mlt</str>
+         <str>highlight</str>
+         <str>stats</str>
+         <str>debug</str>
+       </arr>
+
+       If you register a searchComponent to one of the standard names,
+       that will be used instead of the default.
+
+       To insert components before or after the 'standard' components, use:
+
+       <arr name="first-components">
+         <str>myFirstComponentName</str>
+       </arr>
+
+       <arr name="last-components">
+         <str>myLastComponentName</str>
+       </arr>
+
+       NOTE: The component registered with the name "debug" will
+       always be executed after the "last-components"
+
+     -->
+
+   <!-- Spell Check
+
+        The spell check component can return a list of alternative spelling
+        suggestions.
+
+        http://wiki.apache.org/solr/SpellCheckComponent
+     -->
+  <searchComponent name="spellcheck" class="solr.SpellCheckComponent">
+
+    <str name="queryAnalyzerFieldType">text_general</str>
+
+    <!-- Multiple "Spell Checkers" can be declared and used by this
+         component
+      -->
+
+    <!-- a spellchecker built from a field of the main index -->
+    <lst name="spellchecker">
+      <str name="name">default</str>
+      <str name="field">text</str>
+      <str name="classname">solr.DirectSolrSpellChecker</str>
+      <!-- the spellcheck distance measure used, the default is the internal levenshtein -->
+      <str name="distanceMeasure">internal</str>
+      <!-- minimum accuracy needed to be considered a valid spellcheck suggestion -->
+      <float name="accuracy">0.5</float>
+      <!-- the maximum #edits we consider when enumerating terms: can be 1 or 2 -->
+      <int name="maxEdits">2</int>
+      <!-- the minimum shared prefix when enumerating terms -->
+      <int name="minPrefix">1</int>
+      <!-- maximum number of inspections per result. -->
+      <int name="maxInspections">5</int>
+      <!-- minimum length of a query term to be considered for correction -->
+      <int name="minQueryLength">4</int>
+      <!-- maximum threshold of documents a query term can appear to be considered for correction -->
+      <float name="maxQueryFrequency">0.01</float>
+      <!-- uncomment this to require suggestions to occur in 1% of the documents
+      	<float name="thresholdTokenFrequency">.01</float>
+      -->
+    </lst>
+
+    <!-- a spellchecker that can break or combine words.  See "/spell" handler below for usage -->
+    <lst name="spellchecker">
+      <str name="name">wordbreak</str>
+      <str name="classname">solr.WordBreakSolrSpellChecker</str>
+      <str name="field">name</str>
+      <str name="combineWords">true</str>
+      <str name="breakWords">true</str>
+      <int name="maxChanges">10</int>
+    </lst>
+
+    <!-- a spellchecker that uses a different distance measure -->
+    <!--
+       <lst name="spellchecker">
+         <str name="name">jarowinkler</str>
+         <str name="field">spell</str>
+         <str name="classname">solr.DirectSolrSpellChecker</str>
+         <str name="distanceMeasure">
+           org.apache.lucene.search.spell.JaroWinklerDistance
+         </str>
+       </lst>
+     -->
+
+    <!-- a spellchecker that use an alternate comparator
+
+         comparatorClass be one of:
+          1. score (default)
+          2. freq (Frequency first, then score)
+          3. A fully qualified class name
+      -->
+    <!--
+       <lst name="spellchecker">
+         <str name="name">freq</str>
+         <str name="field">lowerfilt</str>
+         <str name="classname">solr.DirectSolrSpellChecker</str>
+         <str name="comparatorClass">freq</str>
+      -->
+
+    <!-- A spellchecker that reads the list of words from a file -->
+    <!--
+       <lst name="spellchecker">
+         <str name="classname">solr.FileBasedSpellChecker</str>
+         <str name="name">file</str>
+         <str name="sourceLocation">spellings.txt</str>
+         <str name="characterEncoding">UTF-8</str>
+         <str name="spellcheckIndexDir">spellcheckerFile</str>
+       </lst>
+      -->
+  </searchComponent>
+
+  <!-- A request handler for demonstrating the spellcheck component.
+
+       NOTE: This is purely as an example.  The whole purpose of the
+       SpellCheckComponent is to hook it into the request handler that
+       handles your normal user queries so that a separate request is
+       not needed to get suggestions.
+
+       IN OTHER WORDS, THERE IS REALLY GOOD CHANCE THE SETUP BELOW IS
+       NOT WHAT YOU WANT FOR YOUR PRODUCTION SYSTEM!
+
+       See http://wiki.apache.org/solr/SpellCheckComponent for details
+       on the request parameters.
+    -->
+  <requestHandler name="/spell" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <str name="df">text</str>
+      <!-- Solr will use suggestions from both the 'default' spellchecker
+           and from the 'wordbreak' spellchecker and combine them.
+           collations (re-written queries) can include a combination of
+           corrections from both spellcheckers -->
+      <str name="spellcheck.dictionary">default</str>
+      <str name="spellcheck.dictionary">wordbreak</str>
+      <str name="spellcheck">on</str>
+      <str name="spellcheck.extendedResults">true</str>
+      <str name="spellcheck.count">10</str>
+      <str name="spellcheck.alternativeTermCount">5</str>
+      <str name="spellcheck.maxResultsForSuggest">5</str>
+      <str name="spellcheck.collate">true</str>
+      <str name="spellcheck.collateExtendedResults">true</str>
+      <str name="spellcheck.maxCollationTries">10</str>
+      <str name="spellcheck.maxCollations">5</str>
+    </lst>
+    <arr name="last-components">
+      <str>spellcheck</str>
+    </arr>
+  </requestHandler>
+
+  <searchComponent name="suggest" class="solr.SuggestComponent">
+  	<lst name="suggester">
+      <str name="name">mySuggester</str>
+      <str name="lookupImpl">FuzzyLookupFactory</str>      <!-- org.apache.solr.spelling.suggest.fst -->
+      <str name="dictionaryImpl">DocumentDictionaryFactory</str>     <!-- org.apache.solr.spelling.suggest.HighFrequencyDictionaryFactory -->
+      <str name="field">cat</str>
+      <str name="weightField">price</str>
+      <str name="suggestAnalyzerFieldType">string</str>
+    </lst>
+  </searchComponent>
+
+  <requestHandler name="/suggest" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <str name="suggest">true</str>
+      <str name="suggest.count">10</str>
+    </lst>
+    <arr name="components">
+      <str>suggest</str>
+    </arr>
+  </requestHandler>
+  <!-- Term Vector Component
+
+       http://wiki.apache.org/solr/TermVectorComponent
+    -->
+  <searchComponent name="tvComponent" class="solr.TermVectorComponent"/>
+
+  <!-- A request handler for demonstrating the term vector component
+
+       This is purely as an example.
+
+       In reality you will likely want to add the component to your
+       already specified request handlers.
+    -->
+  <requestHandler name="/tvrh" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <str name="df">text</str>
+      <bool name="tv">true</bool>
+    </lst>
+    <arr name="last-components">
+      <str>tvComponent</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Clustering Component
+
+       You'll need to set the solr.clustering.enabled system property
+       when running solr to run with clustering enabled:
+
+            java -Dsolr.clustering.enabled=true -jar start.jar
+
+       http://wiki.apache.org/solr/ClusteringComponent
+       http://carrot2.github.io/solr-integration-strategies/
+    -->
+  <searchComponent name="clustering"
+                   enable="${solr.clustering.enabled:false}"
+                   class="solr.clustering.ClusteringComponent" >
+    <lst name="engine">
+      <str name="name">lingo</str>
+
+      <!-- Class name of a clustering algorithm compatible with the Carrot2 framework.
+
+           Currently available open source algorithms are:
+           * org.carrot2.clustering.lingo.LingoClusteringAlgorithm
+           * org.carrot2.clustering.stc.STCClusteringAlgorithm
+           * org.carrot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm
+
+           See http://project.carrot2.org/algorithms.html for more information.
+
+           A commercial algorithm Lingo3G (needs to be installed separately) is defined as:
+           * com.carrotsearch.lingo3g.Lingo3GClusteringAlgorithm
+        -->
+      <str name="carrot.algorithm">org.carrot2.clustering.lingo.LingoClusteringAlgorithm</str>
+
+      <!-- Override location of the clustering algorithm's resources
+           (attribute definitions and lexical resources).
+
+           A directory from which to load algorithm-specific stop words,
+           stop labels and attribute definition XMLs.
+
+           For an overview of Carrot2 lexical resources, see:
+           http://download.carrot2.org/head/manual/#chapter.lexical-resources
+
+           For an overview of Lingo3G lexical resources, see:
+           http://download.carrotsearch.com/lingo3g/manual/#chapter.lexical-resources
+       -->
+      <str name="carrot.resourcesDir">clustering/carrot2</str>
+    </lst>
+
+    <!-- An example definition for the STC clustering algorithm. -->
+    <lst name="engine">
+      <str name="name">stc</str>
+      <str name="carrot.algorithm">org.carrot2.clustering.stc.STCClusteringAlgorithm</str>
+    </lst>
+
+    <!-- An example definition for the bisecting kmeans clustering algorithm. -->
+    <lst name="engine">
+      <str name="name">kmeans</str>
+      <str name="carrot.algorithm">org.carrot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm</str>
+    </lst>
+  </searchComponent>
+
+  <!-- A request handler for demonstrating the clustering component
+
+       This is purely as an example.
+
+       In reality you will likely want to add the component to your
+       already specified request handlers.
+    -->
+  <requestHandler name="/clustering"
+                  startup="lazy"
+                  enable="${solr.clustering.enabled:false}"
+                  class="solr.SearchHandler">
+    <lst name="defaults">
+      <bool name="clustering">true</bool>
+      <bool name="clustering.results">true</bool>
+      <!-- Field name with the logical "title" of a each document (optional) -->
+      <str name="carrot.title">name</str>
+      <!-- Field name with the logical "URL" of a each document (optional) -->
+      <str name="carrot.url">id</str>
+      <!-- Field name with the logical "content" of a each document (optional) -->
+      <str name="carrot.snippet">features</str>
+      <!-- Apply highlighter to the title/ content and use this for clustering. -->
+      <bool name="carrot.produceSummary">true</bool>
+      <!-- the maximum number of labels per cluster -->
+      <!--<int name="carrot.numDescriptions">5</int>-->
+      <!-- produce sub clusters -->
+      <bool name="carrot.outputSubClusters">false</bool>
+
+      <!-- Configure the remaining request handler parameters. -->
+      <str name="defType">edismax</str>
+      <str name="qf">
+        text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
+      </str>
+      <str name="q.alt">*:*</str>
+      <str name="rows">10</str>
+      <str name="fl">*,score</str>
+    </lst>
+    <arr name="last-components">
+      <str>clustering</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Terms Component
+
+       http://wiki.apache.org/solr/TermsComponent
+
+       A component to return terms and document frequency of those
+       terms
+    -->
+  <searchComponent name="terms" class="solr.TermsComponent"/>
+
+  <!-- A request handler for demonstrating the terms component -->
+  <requestHandler name="/terms" class="solr.SearchHandler" startup="lazy">
+     <lst name="defaults">
+      <bool name="terms">true</bool>
+      <bool name="distrib">false</bool>
+    </lst>
+    <arr name="components">
+      <str>terms</str>
+    </arr>
+  </requestHandler>
+
+
+  <!-- Query Elevation Component
+
+       http://wiki.apache.org/solr/QueryElevationComponent
+
+       a search component that enables you to configure the top
+       results for a given query regardless of the normal lucene
+       scoring.
+    -->
+  <searchComponent name="elevator" class="solr.QueryElevationComponent" >
+    <!-- pick a fieldType to analyze queries -->
+    <str name="queryFieldType">string</str>
+    <str name="config-file">elevate.xml</str>
+  </searchComponent>
+
+  <!-- A request handler for demonstrating the elevator component -->
+  <requestHandler name="/elevate" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+      <str name="df">text</str>
+    </lst>
+    <arr name="last-components">
+      <str>elevator</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Highlighting Component
+
+       http://wiki.apache.org/solr/HighlightingParameters
+    -->
+  <searchComponent class="solr.HighlightComponent" name="highlight">
+    <highlighting>
+      <!-- Configure the standard fragmenter -->
+      <!-- This could most likely be commented out in the "default" case -->
+      <fragmenter name="gap"
+                  default="true"
+                  class="solr.highlight.GapFragmenter">
+        <lst name="defaults">
+          <int name="hl.fragsize">100</int>
+        </lst>
+      </fragmenter>
+
+      <!-- A regular-expression-based fragmenter
+           (for sentence extraction)
+        -->
+      <fragmenter name="regex"
+                  class="solr.highlight.RegexFragmenter">
+        <lst name="defaults">
+          <!-- slightly smaller fragsizes work better because of slop -->
+          <int name="hl.fragsize">70</int>
+          <!-- allow 50% slop on fragment sizes -->
+          <float name="hl.regex.slop">0.5</float>
+          <!-- a basic sentence pattern -->
+          <str name="hl.regex.pattern">[-\w ,/\n\&quot;&apos;]{20,200}</str>
+        </lst>
+      </fragmenter>
+
+      <!-- Configure the standard formatter -->
+      <formatter name="html"
+                 default="true"
+                 class="solr.highlight.HtmlFormatter">
+        <lst name="defaults">
+          <str name="hl.simple.pre"><![CDATA[<em>]]></str>
+          <str name="hl.simple.post"><![CDATA[</em>]]></str>
+        </lst>
+      </formatter>
+
+      <!-- Configure the standard encoder -->
+      <encoder name="html"
+               class="solr.highlight.HtmlEncoder" />
+
+      <!-- Configure the standard fragListBuilder -->
+      <fragListBuilder name="simple"
+                       class="solr.highlight.SimpleFragListBuilder"/>
+
+      <!-- Configure the single fragListBuilder -->
+      <fragListBuilder name="single"
+                       class="solr.highlight.SingleFragListBuilder"/>
+
+      <!-- Configure the weighted fragListBuilder -->
+      <fragListBuilder name="weighted"
+                       default="true"
+                       class="solr.highlight.WeightedFragListBuilder"/>
+
+      <!-- default tag FragmentsBuilder -->
+      <fragmentsBuilder name="default"
+                        default="true"
+                        class="solr.highlight.ScoreOrderFragmentsBuilder">
+        <!--
+        <lst name="defaults">
+          <str name="hl.multiValuedSeparatorChar">/</str>
+        </lst>
+        -->
+      </fragmentsBuilder>
+
+      <!-- multi-colored tag FragmentsBuilder -->
+      <fragmentsBuilder name="colored"
+                        class="solr.highlight.ScoreOrderFragmentsBuilder">
+        <lst name="defaults">
+          <str name="hl.tag.pre"><![CDATA[
+               <b style="background:yellow">,<b style="background:lawgreen">,
+               <b style="background:aquamarine">,<b style="background:magenta">,
+               <b style="background:palegreen">,<b style="background:coral">,
+               <b style="background:wheat">,<b style="background:khaki">,
+               <b style="background:lime">,<b style="background:deepskyblue">]]></str>
+          <str name="hl.tag.post"><![CDATA[</b>]]></str>
+        </lst>
+      </fragmentsBuilder>
+
+      <boundaryScanner name="default"
+                       default="true"
+                       class="solr.highlight.SimpleBoundaryScanner">
+        <lst name="defaults">
+          <str name="hl.bs.maxScan">10</str>
+          <str name="hl.bs.chars">.,!? &#9;&#10;&#13;</str>
+        </lst>
+      </boundaryScanner>
+
+      <boundaryScanner name="breakIterator"
+                       class="solr.highlight.BreakIteratorBoundaryScanner">
+        <lst name="defaults">
+          <!-- type should be one of CHARACTER, WORD(default), LINE and SENTENCE -->
+          <str name="hl.bs.type">WORD</str>
+          <!-- language and country are used when constructing Locale object.  -->
+          <!-- And the Locale object will be used when getting instance of BreakIterator -->
+          <str name="hl.bs.language">en</str>
+          <str name="hl.bs.country">US</str>
+        </lst>
+      </boundaryScanner>
+    </highlighting>
+  </searchComponent>
+
+  <!-- Update Processors
+
+       Chains of Update Processor Factories for dealing with Update
+       Requests can be declared, and then used by name in Update
+       Request Processors
+
+       http://wiki.apache.org/solr/UpdateRequestProcessor
+
+    -->
+
+  <!-- Add unknown fields to the schema
+
+       An example field type guessing update processor that will
+       attempt to parse string-typed field values as Booleans, Longs,
+       Doubles, or Dates, and then add schema fields with the guessed
+       field types.
+
+       This requires that the schema is both managed and mutable, by
+       declaring schemaFactory as ManagedIndexSchemaFactory, with
+       mutable specified as true.
+
+       See http://wiki.apache.org/solr/GuessingFieldTypes
+    -->
+  <updateRequestProcessorChain name="add-unknown-fields-to-the-schema">
+    <processor class="solr.DefaultValueUpdateProcessorFactory">
+        <str name="fieldName">_ttl_</str>
+        <str name="value">+{{ranger_audit_max_retention_days}}DAYS</str>
+    </processor>
+    <processor class="solr.processor.DocExpirationUpdateProcessorFactory">
+        <int name="autoDeletePeriodSeconds">86400</int>
+        <str name="ttlFieldName">_ttl_</str>
+        <str name="expirationFieldName">_expire_at_</str>
+    </processor>
+    <processor class="solr.FirstFieldValueUpdateProcessorFactory">
+      <str name="fieldName">_expire_at_</str>
+    </processor>
+
+    <processor class="solr.RemoveBlankFieldUpdateProcessorFactory"/>
+    <processor class="solr.ParseBooleanFieldUpdateProcessorFactory"/>
+    <processor class="solr.ParseLongFieldUpdateProcessorFactory"/>
+    <processor class="solr.ParseDoubleFieldUpdateProcessorFactory"/>
+    <processor class="solr.ParseDateFieldUpdateProcessorFactory">
+      <arr name="format">
+        <str>yyyy-MM-dd'T'HH:mm:ss.SSSZ</str>
+        <str>yyyy-MM-dd'T'HH:mm:ss,SSSZ</str>
+        <str>yyyy-MM-dd'T'HH:mm:ss.SSS</str>
+        <str>yyyy-MM-dd'T'HH:mm:ss,SSS</str>
+        <str>yyyy-MM-dd'T'HH:mm:ssZ</str>
+        <str>yyyy-MM-dd'T'HH:mm:ss</str>
+        <str>yyyy-MM-dd'T'HH:mmZ</str>
+        <str>yyyy-MM-dd'T'HH:mm</str>
+        <str>yyyy-MM-dd HH:mm:ss.SSSZ</str>
+        <str>yyyy-MM-dd HH:mm:ss,SSSZ</str>
+        <str>yyyy-MM-dd HH:mm:ss.SSS</str>
+        <str>yyyy-MM-dd HH:mm:ss,SSS</str>
+        <str>yyyy-MM-dd HH:mm:ssZ</str>
+        <str>yyyy-MM-dd HH:mm:ss</str>
+        <str>yyyy-MM-dd HH:mmZ</str>
+        <str>yyyy-MM-dd HH:mm</str>
+        <str>yyyy-MM-dd</str>
+      </arr>
+    </processor>
+    <processor class="solr.AddSchemaFieldsUpdateProcessorFactory">
+      <str name="defaultFieldType">key_lower_case</str>
+      <lst name="typeMapping">
+        <str name="valueClass">java.lang.Boolean</str>
+        <str name="fieldType">boolean</str>
+      </lst>
+      <lst name="typeMapping">
+        <str name="valueClass">java.util.Date</str>
+        <str name="fieldType">tdate</str>
+      </lst>
+      <lst name="typeMapping">
+        <str name="valueClass">java.lang.Long</str>
+        <str name="valueClass">java.lang.Integer</str>
+        <str name="fieldType">tlong</str>
+      </lst>
+      <lst name="typeMapping">
+        <str name="valueClass">java.lang.Number</str>
+        <str name="fieldType">tdouble</str>
+      </lst>
+    </processor>
+    <processor class="solr.LogUpdateProcessorFactory"/>
+    <processor class="solr.RunUpdateProcessorFactory"/>
+  </updateRequestProcessorChain>
+
+
+  <!-- Deduplication
+
+       An example dedup update processor that creates the "id" field
+       on the fly based on the hash code of some other fields.  This
+       example has overwriteDupes set to false since we are using the
+       id field as the signatureField and Solr will maintain
+       uniqueness based on that anyway.
+
+    -->
+  <!--
+     <updateRequestProcessorChain name="dedupe">
+       <processor class="solr.processor.SignatureUpdateProcessorFactory">
+         <bool name="enabled">true</bool>
+         <str name="signatureField">id</str>
+         <bool name="overwriteDupes">false</bool>
+         <str name="fields">name,features,cat</str>
+         <str name="signatureClass">solr.processor.Lookup3Signature</str>
+       </processor>
+       <processor class="solr.LogUpdateProcessorFactory" />
+       <processor class="solr.RunUpdateProcessorFactory" />
+     </updateRequestProcessorChain>
+    -->
+
+  <!-- Language identification
+
+       This example update chain identifies the language of the incoming
+       documents using the langid contrib. The detected language is
+       written to field language_s. No field name mapping is done.
+       The fields used for detection are text, title, subject and description,
+       making this example suitable for detecting languages form full-text
+       rich documents injected via ExtractingRequestHandler.
+       See more about langId at http://wiki.apache.org/solr/LanguageDetection
+    -->
+    <!--
+     <updateRequestProcessorChain name="langid">
+       <processor class="org.apache.solr.update.processor.TikaLanguageIdentifierUpdateProcessorFactory">
+         <str name="langid.fl">text,title,subject,description</str>
+         <str name="langid.langField">language_s</str>
+         <str name="langid.fallback">en</str>
+       </processor>
+       <processor class="solr.LogUpdateProcessorFactory" />
+       <processor class="solr.RunUpdateProcessorFactory" />
+     </updateRequestProcessorChain>
+    -->
+
+  <!-- Script update processor
+
+    This example hooks in an update processor implemented using JavaScript.
+
+    See more about the script update processor at http://wiki.apache.org/solr/ScriptUpdateProcessor
+  -->
+  <!--
+    <updateRequestProcessorChain name="script">
+      <processor class="solr.StatelessScriptUpdateProcessorFactory">
+        <str name="script">update-script.js</str>
+        <lst name="params">
+          <str name="config_param">example config parameter</str>
+        </lst>
+      </processor>
+      <processor class="solr.RunUpdateProcessorFactory" />
+    </updateRequestProcessorChain>
+  -->
+
+  <!-- Response Writers
+
+       http://wiki.apache.org/solr/QueryResponseWriter
+
+       Request responses will be written using the writer specified by
+       the 'wt' request parameter matching the name of a registered
+       writer.
+
+       The "default" writer is the default and will be used if 'wt' is
+       not specified in the request.
+    -->
+  <!-- The following response writers are implicitly configured unless
+       overridden...
+    -->
+  <!--
+     <queryResponseWriter name="xml"
+                          default="true"
+                          class="solr.XMLResponseWriter" />
+     <queryResponseWriter name="json" class="solr.JSONResponseWriter"/>
+     <queryResponseWriter name="python" class="solr.PythonResponseWriter"/>
+     <queryResponseWriter name="ruby" class="solr.RubyResponseWriter"/>
+     <queryResponseWriter name="php" class="solr.PHPResponseWriter"/>
+     <queryResponseWriter name="phps" class="solr.PHPSerializedResponseWriter"/>
+     <queryResponseWriter name="csv" class="solr.CSVResponseWriter"/>
+     <queryResponseWriter name="schema.xml" class="solr.SchemaXmlResponseWriter"/>
+    -->
+
+  <queryResponseWriter name="json" class="solr.JSONResponseWriter">
+     <!-- For the purposes of the tutorial, JSON responses are written as
+      plain text so that they are easy to read in *any* browser.
+      If you expect a MIME type of "application/json" just remove this override.
+     -->
+    <str name="content-type">text/plain; charset=UTF-8</str>
+  </queryResponseWriter>
+
+  <!--
+     Custom response writers can be declared as needed...
+    -->
+  <queryResponseWriter name="velocity" class="solr.VelocityResponseWriter" startup="lazy">
+    <str name="template.base.dir">${velocity.template.base.dir:}</str>
+  </queryResponseWriter>
+
+  <!-- XSLT response writer transforms the XML output by any xslt file found
+       in Solr's conf/xslt directory.  Changes to xslt files are checked for
+       every xsltCacheLifetimeSeconds.
+    -->
+  <queryResponseWriter name="xslt" class="solr.XSLTResponseWriter">
+    <int name="xsltCacheLifetimeSeconds">5</int>
+  </queryResponseWriter>
+
+  <!-- Query Parsers
+
+       http://wiki.apache.org/solr/SolrQuerySyntax
+
+       Multiple QParserPlugins can be registered by name, and then
+       used in either the "defType" param for the QueryComponent (used
+       by SearchHandler) or in LocalParams
+    -->
+  <!-- example of registering a query parser -->
+  <!--
+     <queryParser name="myparser" class="com.mycompany.MyQParserPlugin"/>
+    -->
+
+  <!-- Function Parsers
+
+       http://wiki.apache.org/solr/FunctionQuery
+
+       Multiple ValueSourceParsers can be registered by name, and then
+       used as function names when using the "func" QParser.
+    -->
+  <!-- example of registering a custom function parser  -->
+  <!--
+     <valueSourceParser name="myfunc"
+                        class="com.mycompany.MyValueSourceParser" />
+    -->
+
+
+  <!-- Document Transformers
+       http://wiki.apache.org/solr/DocTransformers
+    -->
+  <!--
+     Could be something like:
+     <transformer name="db" class="com.mycompany.LoadFromDatabaseTransformer" >
+       <int name="connection">jdbc://....</int>
+     </transformer>
+
+     To add a constant value to all docs, use:
+     <transformer name="mytrans2" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
+       <int name="value">5</int>
+     </transformer>
+
+     If you want the user to still be able to change it with _value:something_ use this:
+     <transformer name="mytrans3" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
+       <double name="defaultValue">5</double>
+     </transformer>
+
+      If you are using the QueryElevationComponent, you may wish to mark documents that get boosted.  The
+      EditorialMarkerFactory will do exactly that:
+     <transformer name="qecBooster" class="org.apache.solr.response.transform.EditorialMarkerFactory" />
+    -->
+
+
+  <!-- Legacy config for the admin interface -->
+  <admin>
+    <defaultQuery>*:*</defaultQuery>
+  </admin>
+
+</config>
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/quicklinks/quicklinks.json
new file mode 100644
index 0000000..d75d5f1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/quicklinks/quicklinks.json
@@ -0,0 +1,41 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"https",
+      "checks":[
+        {
+          "property":"ranger.service.https.attrib.ssl.enabled",
+          "desired":"true",
+          "site":"ranger-admin-site"
+        },
+        {
+          "property":"ranger.service.http.enabled",
+          "desired":"false",
+          "site":"ranger-admin-site"
+        }
+      ]
+    },
+
+    "links": [
+      {
+        "name": "ranger_admin_ui",
+        "label": "Ranger Admin UI",
+        "component_name" : "RANGER_ADMIN",
+        "requires_user_name": "false",
+        "url": "%@://%@:%@",
+        "attributes": ["authenticated", "sso"],
+        "port":{
+          "http_property": "ranger.service.http.port",
+          "http_default_port": "6080",
+          "https_property": "ranger.service.https.port",
+          "https_default_port": "6182",
+          "regex": "(\\d*)+",
+          "site": "ranger-admin-site"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/role_command_order.json b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/role_command_order.json
new file mode 100644
index 0000000..557e9ac
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/role_command_order.json
@@ -0,0 +1,9 @@
+{
+  "general_deps" : {
+    "_comment" : "dependencies for RANGER",
+    "RANGER_SERVICE_CHECK-SERVICE_CHECK" : ["RANGER_ADMIN-START"],
+    "RANGER_SERVICE_CHECK-SERVICE_CHECK" : ["RANGER_USERSYNC-START"],
+    "RANGER_USERSYNC-START" : ["RANGER_ADMIN-START"],
+    "RANGER_ADMIN-START": ["ZOOKEEPER_SERVER-START", "INFRA_SOLR-START"]
+  }
+}
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/service_advisor.py
new file mode 100644
index 0000000..875fa30
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/service_advisor.py
@@ -0,0 +1,793 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+# Python imports
+import imp
+import os
+import traceback
+import re
+import socket
+import fnmatch
+
+
+from resource_management.core.logger import Logger
+
+SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
+STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../stacks/')
+PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
+
+try:
+  with open(PARENT_FILE, 'rb') as fp:
+    service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE, ('.py', 'rb', imp.PY_SOURCE))
+except Exception as e:
+  traceback.print_exc()
+  print "Failed to load parent"
+
+DB_TYPE_DEFAULT_PORT_MAP = {"MYSQL":"3306", "ORACLE":"1521", "POSTGRES":"5432", "MSSQL":"1433", "SQLA":"2638"}
+
+class RangerServiceAdvisor(service_advisor.ServiceAdvisor):
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(RangerServiceAdvisor, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    # Always call these methods
+    self.modifyMastersWithMultipleInstances()
+    self.modifyCardinalitiesDict()
+    self.modifyHeapSizeProperties()
+    self.modifyNotValuableComponents()
+    self.modifyComponentsNotPreferableOnServer()
+    self.modifyComponentLayoutSchemes()
+
+  def modifyMastersWithMultipleInstances(self):
+    """
+    Modify the set of masters with multiple instances.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyCardinalitiesDict(self):
+    """
+    Modify the dictionary of cardinalities.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyHeapSizeProperties(self):
+    """
+    Modify the dictionary of heap size properties.
+    Must be overriden in child class.
+    """
+    pass
+
+  def modifyNotValuableComponents(self):
+    """
+    Modify the set of components whose host assignment is based on other services.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentsNotPreferableOnServer(self):
+    """
+    Modify the set of components that are not preferable on the server.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentLayoutSchemes(self):
+    """
+    Modify layout scheme dictionaries for components.
+    The scheme dictionary basically maps the number of hosts to
+    host index where component should exist.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def getServiceComponentLayoutValidations(self, services, hosts):
+    """
+    Get a list of errors.
+    Must be overriden in child class.
+    """
+
+    return []
+
+  def getServiceConfigurationRecommendations(self, configurations, clusterData, services, hosts):
+    """
+    Entry point.
+    Must be overriden in child class.
+    """
+    #Logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    recommender = RangerRecommender()
+    recommender.recommendRangerConfigurationsFromHDP206(configurations, clusterData, services, hosts)
+    recommender.recommendRangerConfigurationsFromHDP22(configurations, clusterData, services, hosts)
+    recommender.recommendRangerConfigurationsFromHDP23(configurations, clusterData, services, hosts)
+    recommender.recommendRangerConfigurationsFromHDP25(configurations, clusterData, services, hosts)
+    recommender.recommendRangerConfigurationsFromHDP26(configurations, clusterData, services, hosts)
+
+
+
+  def getServiceConfigurationsValidationItems(self, configurations, recommendedDefaults, services, hosts):
+    """
+    Entry point.
+    Validate configurations for the service. Return a list of errors.
+    The code for this function should be the same for each Service Advisor.
+    """
+    #Logger.info("Class: %s, Method: %s. Validating Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    validator = RangerValidator()
+    # Calls the methods of the validator using arguments,
+    # method(siteProperties, siteRecommendations, configurations, services, hosts)
+    return validator.validateListOfConfigUsingMethod(configurations, recommendedDefaults, services, hosts, validator.validators)
+
+
+
+class RangerRecommender(service_advisor.ServiceAdvisor):
+  """
+  Ranger Recommender suggests properties when adding the service for the first time or modifying configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(RangerRecommender, self)
+    self.as_super.__init__(*args, **kwargs)
+
+  def recommendRangerConfigurationsFromHDP206(self, configurations, clusterData, services, hosts):
+
+    putRangerAdminProperty = self.putProperty(configurations, "admin-properties", services)
+
+    # Build policymgr_external_url
+    protocol = 'http'
+    ranger_admin_host = 'localhost'
+    port = '6080'
+
+    # Check if http is disabled. For HDP-2.3 this can be checked in ranger-admin-site/ranger.service.http.enabled
+    # For Ranger-0.4.0 this can be checked in ranger-site/http.enabled
+    if ('ranger-site' in services['configurations'] and 'http.enabled' in services['configurations']['ranger-site']['properties'] \
+          and services['configurations']['ranger-site']['properties']['http.enabled'].lower() == 'false') or \
+            ('ranger-admin-site' in services['configurations'] and 'ranger.service.http.enabled' in services['configurations']['ranger-admin-site']['properties'] \
+               and services['configurations']['ranger-admin-site']['properties']['ranger.service.http.enabled'].lower() == 'false'):
+      # HTTPS protocol is used
+      protocol = 'https'
+      # Starting Ranger-0.5.0.2.3 port stored in ranger-admin-site ranger.service.https.port
+      if 'ranger-admin-site' in services['configurations'] and \
+                      'ranger.service.https.port' in services['configurations']['ranger-admin-site']['properties']:
+        port = services['configurations']['ranger-admin-site']['properties']['ranger.service.https.port']
+      # In Ranger-0.4.0 port stored in ranger-site https.service.port
+      elif 'ranger-site' in services['configurations'] and \
+                      'https.service.port' in services['configurations']['ranger-site']['properties']:
+        port = services['configurations']['ranger-site']['properties']['https.service.port']
+    else:
+      # HTTP protocol is used
+      # Starting Ranger-0.5.0.2.3 port stored in ranger-admin-site ranger.service.http.port
+      if 'ranger-admin-site' in services['configurations'] and \
+                      'ranger.service.http.port' in services['configurations']['ranger-admin-site']['properties']:
+        port = services['configurations']['ranger-admin-site']['properties']['ranger.service.http.port']
+      # In Ranger-0.4.0 port stored in ranger-site http.service.port
+      elif 'ranger-site' in services['configurations'] and \
+                      'http.service.port' in services['configurations']['ranger-site']['properties']:
+        port = services['configurations']['ranger-site']['properties']['http.service.port']
+
+    ranger_admin_hosts = self.getComponentHostNames(services, "RANGER", "RANGER_ADMIN")
+    if ranger_admin_hosts:
+      if len(ranger_admin_hosts) > 1 \
+        and services['configurations'] \
+        and 'admin-properties' in services['configurations'] and 'policymgr_external_url' in services['configurations']['admin-properties']['properties'] \
+        and services['configurations']['admin-properties']['properties']['policymgr_external_url'] \
+        and services['configurations']['admin-properties']['properties']['policymgr_external_url'].strip():
+
+        # in case of HA deployment keep the policymgr_external_url specified in the config
+        policymgr_external_url = services['configurations']['admin-properties']['properties']['policymgr_external_url']
+      else:
+
+        ranger_admin_host = ranger_admin_hosts[0]
+        policymgr_external_url = "%s://%s:%s" % (protocol, ranger_admin_host, port)
+
+      putRangerAdminProperty('policymgr_external_url', policymgr_external_url)
+
+    rangerServiceVersion = [service['StackServices']['service_version'] for service in services["services"] if service['StackServices']['service_name'] == 'RANGER'][0]
+    if rangerServiceVersion == '0.4.0':
+      # Recommend ldap settings based on ambari.properties configuration
+      # If 'ambari.ldap.isConfigured' == true
+      # For Ranger version 0.4.0
+      if 'ambari-server-properties' in services and \
+                      'ambari.ldap.isConfigured' in services['ambari-server-properties'] and \
+                      services['ambari-server-properties']['ambari.ldap.isConfigured'].lower() == "true":
+        putUserSyncProperty = self.putProperty(configurations, "usersync-properties", services)
+        serverProperties = services['ambari-server-properties']
+        if 'authentication.ldap.managerDn' in serverProperties:
+          putUserSyncProperty('SYNC_LDAP_BIND_DN', serverProperties['authentication.ldap.managerDn'])
+        if 'authentication.ldap.primaryUrl' in serverProperties:
+          ldap_protocol =  'ldap://'
+          if 'authentication.ldap.useSSL' in serverProperties and serverProperties['authentication.ldap.useSSL'] == 'true':
+            ldap_protocol =  'ldaps://'
+          ldapUrl = ldap_protocol + serverProperties['authentication.ldap.primaryUrl'] if serverProperties['authentication.ldap.primaryUrl'] else serverProperties['authentication.ldap.primaryUrl']
+          putUserSyncProperty('SYNC_LDAP_URL', ldapUrl)
+        if 'authentication.ldap.userObjectClass' in serverProperties:
+          putUserSyncProperty('SYNC_LDAP_USER_OBJECT_CLASS', serverProperties['authentication.ldap.userObjectClass'])
+        if 'authentication.ldap.usernameAttribute' in serverProperties:
+          putUserSyncProperty('SYNC_LDAP_USER_NAME_ATTRIBUTE', serverProperties['authentication.ldap.usernameAttribute'])
+
+
+      # Set Ranger Admin Authentication method
+      if 'admin-properties' in services['configurations'] and 'usersync-properties' in services['configurations'] and \
+                      'SYNC_SOURCE' in services['configurations']['usersync-properties']['properties']:
+        rangerUserSyncSource = services['configurations']['usersync-properties']['properties']['SYNC_SOURCE']
+        authenticationMethod = rangerUserSyncSource.upper()
+        if authenticationMethod != 'FILE':
+          putRangerAdminProperty('authentication_method', authenticationMethod)
+
+      # Recommend xasecure.audit.destination.hdfs.dir
+      # For Ranger version 0.4.0
+      servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+      putRangerEnvProperty = self.putProperty(configurations, "ranger-env", services)
+      include_hdfs = "HDFS" in servicesList
+      if include_hdfs:
+        if 'core-site' in services['configurations'] and ('fs.defaultFS' in services['configurations']['core-site']['properties']):
+          default_fs = services['configurations']['core-site']['properties']['fs.defaultFS']
+          default_fs += '/ranger/audit/%app-type%/%time:yyyyMMdd%'
+          putRangerEnvProperty('xasecure.audit.destination.hdfs.dir', default_fs)
+
+      # Recommend Ranger Audit properties for ranger supported services
+      # For Ranger version 0.4.0
+      ranger_services = [
+        {'service_name': 'HDFS', 'audit_file': 'ranger-hdfs-plugin-properties'},
+        {'service_name': 'HBASE', 'audit_file': 'ranger-hbase-plugin-properties'},
+        {'service_name': 'HIVE', 'audit_file': 'ranger-hive-plugin-properties'},
+        {'service_name': 'KNOX', 'audit_file': 'ranger-knox-plugin-properties'},
+        {'service_name': 'STORM', 'audit_file': 'ranger-storm-plugin-properties'}
+      ]
+
+      for item in range(len(ranger_services)):
+        if ranger_services[item]['service_name'] in servicesList:
+          component_audit_file =  ranger_services[item]['audit_file']
+          if component_audit_file in services["configurations"]:
+            ranger_audit_dict = [
+              {'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.db', 'target_configname': 'XAAUDIT.DB.IS_ENABLED'},
+              {'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.hdfs', 'target_configname': 'XAAUDIT.HDFS.IS_ENABLED'},
+              {'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.hdfs.dir', 'target_configname': 'XAAUDIT.HDFS.DESTINATION_DIRECTORY'}
+            ]
+            putRangerAuditProperty = self.putProperty(configurations, component_audit_file, services)
+
+            for item in ranger_audit_dict:
+              if item['filename'] in services["configurations"] and item['configname'] in  services["configurations"][item['filename']]["properties"]:
+                if item['filename'] in configurations and item['configname'] in  configurations[item['filename']]["properties"]:
+                  rangerAuditProperty = configurations[item['filename']]["properties"][item['configname']]
+                else:
+                  rangerAuditProperty = services["configurations"][item['filename']]["properties"][item['configname']]
+                putRangerAuditProperty(item['target_configname'], rangerAuditProperty)
+
+
+
+
+  def recommendRangerConfigurationsFromHDP22(self, configurations, clusterData, services, hosts):
+    putRangerEnvProperty = self.putProperty(configurations, "ranger-env")
+    cluster_env = self.getServicesSiteProperties(services, "cluster-env")
+    security_enabled = cluster_env is not None and "security_enabled" in cluster_env and \
+                       cluster_env["security_enabled"].lower() == "true"
+    if "ranger-env" in configurations and not security_enabled:
+      putRangerEnvProperty("ranger-storm-plugin-enabled", "No")
+
+  def getDBConnectionHostPort(self, db_type, db_host):
+    connection_string = ""
+    if db_type is None or db_type == "":
+      return connection_string
+    else:
+      colon_count = db_host.count(':')
+      if colon_count == 0:
+        if DB_TYPE_DEFAULT_PORT_MAP.has_key(db_type):
+          connection_string = db_host + ":" + DB_TYPE_DEFAULT_PORT_MAP[db_type]
+        else:
+          connection_string = db_host
+      elif colon_count == 1:
+        connection_string = db_host
+      elif colon_count == 2:
+        connection_string = db_host
+
+    return connection_string
+
+
+  def getOracleDBConnectionHostPort(self, db_type, db_host, rangerDbName):
+    connection_string = self.getDBConnectionHostPort(db_type, db_host)
+    colon_count = db_host.count(':')
+    if colon_count == 1 and '/' in db_host:
+      connection_string = "//" + connection_string
+    elif colon_count == 0 or colon_count == 1:
+      connection_string = "//" + connection_string + "/" + rangerDbName if rangerDbName else "//" + connection_string
+
+    return connection_string
+
+  def recommendRangerUrlConfigurations(self, configurations, services, requiredServices):
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+
+    policymgr_external_url = ""
+    if 'admin-properties' in services['configurations'] and 'policymgr_external_url' in services['configurations']['admin-properties']['properties']:
+      if 'admin-properties' in configurations and 'policymgr_external_url' in configurations['admin-properties']['properties']:
+        policymgr_external_url = configurations['admin-properties']['properties']['policymgr_external_url']
+      else:
+        policymgr_external_url = services['configurations']['admin-properties']['properties']['policymgr_external_url']
+
+    for index in range(len(requiredServices)):
+      if requiredServices[index]['service_name'] in servicesList:
+        component_config_type = requiredServices[index]['config_type']
+        component_name = requiredServices[index]['service_name']
+        component_config_property = 'ranger.plugin.{0}.policy.rest.url'.format(component_name.lower())
+        if requiredServices[index]['service_name'] == 'RANGER_KMS':
+          component_config_property = 'ranger.plugin.kms.policy.rest.url'
+        putRangerSecurityProperty = self.putProperty(configurations, component_config_type, services)
+        if component_config_type in services["configurations"] and component_config_property in services["configurations"][component_config_type]["properties"]:
+          putRangerSecurityProperty(component_config_property, policymgr_external_url)
+
+  def recommendRangerConfigurationsFromHDP23(self, configurations, clusterData, services, hosts):
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    putRangerAdminProperty = self.putProperty(configurations, "ranger-admin-site", services)
+    putRangerEnvProperty = self.putProperty(configurations, "ranger-env", services)
+    putRangerUgsyncSite = self.putProperty(configurations, "ranger-ugsync-site", services)
+
+    if 'admin-properties' in services['configurations'] and ('DB_FLAVOR' in services['configurations']['admin-properties']['properties'])\
+      and ('db_host' in services['configurations']['admin-properties']['properties']) and ('db_name' in services['configurations']['admin-properties']['properties']):
+
+      rangerDbFlavor = services['configurations']["admin-properties"]["properties"]["DB_FLAVOR"]
+      rangerDbHost =   services['configurations']["admin-properties"]["properties"]["db_host"]
+      rangerDbName =   services['configurations']["admin-properties"]["properties"]["db_name"]
+      ranger_db_url_dict = {
+        'MYSQL': {'ranger.jpa.jdbc.driver': 'com.mysql.jdbc.Driver',
+                  'ranger.jpa.jdbc.url': 'jdbc:mysql://' + self.getDBConnectionHostPort(rangerDbFlavor, rangerDbHost) + '/' + rangerDbName},
+        'ORACLE': {'ranger.jpa.jdbc.driver': 'oracle.jdbc.driver.OracleDriver',
+                   'ranger.jpa.jdbc.url': 'jdbc:oracle:thin:@' + self.getOracleDBConnectionHostPort(rangerDbFlavor, rangerDbHost, rangerDbName)},
+        'POSTGRES': {'ranger.jpa.jdbc.driver': 'org.postgresql.Driver',
+                     'ranger.jpa.jdbc.url': 'jdbc:postgresql://' + self.getDBConnectionHostPort(rangerDbFlavor, rangerDbHost) + '/' + rangerDbName},
+        'MSSQL': {'ranger.jpa.jdbc.driver': 'com.microsoft.sqlserver.jdbc.SQLServerDriver',
+                  'ranger.jpa.jdbc.url': 'jdbc:sqlserver://' + self.getDBConnectionHostPort(rangerDbFlavor, rangerDbHost) + ';databaseName=' + rangerDbName},
+        'SQLA': {'ranger.jpa.jdbc.driver': 'sap.jdbc4.sqlanywhere.IDriver',
+                 'ranger.jpa.jdbc.url': 'jdbc:sqlanywhere:host=' + self.getDBConnectionHostPort(rangerDbFlavor, rangerDbHost) + ';database=' + rangerDbName}
+      }
+      rangerDbProperties = ranger_db_url_dict.get(rangerDbFlavor, ranger_db_url_dict['MYSQL'])
+      for key in rangerDbProperties:
+        putRangerAdminProperty(key, rangerDbProperties.get(key))
+
+      if 'admin-properties' in services['configurations'] and ('DB_FLAVOR' in services['configurations']['admin-properties']['properties']) \
+        and ('db_host' in services['configurations']['admin-properties']['properties']):
+
+        rangerDbFlavor = services['configurations']["admin-properties"]["properties"]["DB_FLAVOR"]
+        rangerDbHost =   services['configurations']["admin-properties"]["properties"]["db_host"]
+        ranger_db_privelege_url_dict = {
+          'MYSQL': {'ranger_privelege_user_jdbc_url': 'jdbc:mysql://' + self.getDBConnectionHostPort(rangerDbFlavor, rangerDbHost)},
+          'ORACLE': {'ranger_privelege_user_jdbc_url': 'jdbc:oracle:thin:@' + self.getOracleDBConnectionHostPort(rangerDbFlavor, rangerDbHost, None)},
+          'POSTGRES': {'ranger_privelege_user_jdbc_url': 'jdbc:postgresql://' + self.getDBConnectionHostPort(rangerDbFlavor, rangerDbHost) + '/postgres'},
+          'MSSQL': {'ranger_privelege_user_jdbc_url': 'jdbc:sqlserver://' + self.getDBConnectionHostPort(rangerDbFlavor, rangerDbHost) + ';'},
+          'SQLA': {'ranger_privelege_user_jdbc_url': 'jdbc:sqlanywhere:host=' + self.getDBConnectionHostPort(rangerDbFlavor, rangerDbHost) + ';'}
+        }
+        rangerPrivelegeDbProperties = ranger_db_privelege_url_dict.get(rangerDbFlavor, ranger_db_privelege_url_dict['MYSQL'])
+        for key in rangerPrivelegeDbProperties:
+          putRangerEnvProperty(key, rangerPrivelegeDbProperties.get(key))
+
+    # Recommend ldap settings based on ambari.properties configuration
+    if 'ambari-server-properties' in services and \
+        'ambari.ldap.isConfigured' in services['ambari-server-properties'] and \
+        services['ambari-server-properties']['ambari.ldap.isConfigured'].lower() == "true":
+      serverProperties = services['ambari-server-properties']
+      if 'authentication.ldap.baseDn' in serverProperties:
+        putRangerUgsyncSite('ranger.usersync.ldap.searchBase', serverProperties['authentication.ldap.baseDn'])
+      if 'authentication.ldap.groupMembershipAttr' in serverProperties:
+        putRangerUgsyncSite('ranger.usersync.group.memberattributename', serverProperties['authentication.ldap.groupMembershipAttr'])
+      if 'authentication.ldap.groupNamingAttr' in serverProperties:
+        putRangerUgsyncSite('ranger.usersync.group.nameattribute', serverProperties['authentication.ldap.groupNamingAttr'])
+      if 'authentication.ldap.groupObjectClass' in serverProperties:
+        putRangerUgsyncSite('ranger.usersync.group.objectclass', serverProperties['authentication.ldap.groupObjectClass'])
+      if 'authentication.ldap.managerDn' in serverProperties:
+        putRangerUgsyncSite('ranger.usersync.ldap.binddn', serverProperties['authentication.ldap.managerDn'])
+      if 'authentication.ldap.primaryUrl' in serverProperties:
+        ldap_protocol =  'ldap://'
+        if 'authentication.ldap.useSSL' in serverProperties and serverProperties['authentication.ldap.useSSL'] == 'true':
+          ldap_protocol =  'ldaps://'
+        ldapUrl = ldap_protocol + serverProperties['authentication.ldap.primaryUrl'] if serverProperties['authentication.ldap.primaryUrl'] else serverProperties['authentication.ldap.primaryUrl']
+        putRangerUgsyncSite('ranger.usersync.ldap.url', ldapUrl)
+      if 'authentication.ldap.userObjectClass' in serverProperties:
+        putRangerUgsyncSite('ranger.usersync.ldap.user.objectclass', serverProperties['authentication.ldap.userObjectClass'])
+      if 'authentication.ldap.usernameAttribute' in serverProperties:
+        putRangerUgsyncSite('ranger.usersync.ldap.user.nameattribute', serverProperties['authentication.ldap.usernameAttribute'])
+
+
+    # Recommend Ranger Authentication method
+    authMap = {
+      'org.apache.ranger.unixusersync.process.UnixUserGroupBuilder': 'UNIX',
+      'org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder': 'LDAP'
+    }
+
+    if 'ranger-ugsync-site' in services['configurations'] and 'ranger.usersync.source.impl.class' in services['configurations']["ranger-ugsync-site"]["properties"]:
+      rangerUserSyncClass = services['configurations']["ranger-ugsync-site"]["properties"]["ranger.usersync.source.impl.class"]
+      if rangerUserSyncClass in authMap:
+        rangerSqlConnectorProperty = authMap.get(rangerUserSyncClass)
+        putRangerAdminProperty('ranger.authentication.method', rangerSqlConnectorProperty)
+
+
+    if 'ranger-env' in services['configurations'] and 'is_solrCloud_enabled' in services['configurations']["ranger-env"]["properties"]:
+      isSolrCloudEnabled = services['configurations']["ranger-env"]["properties"]["is_solrCloud_enabled"]  == "true"
+    else:
+      isSolrCloudEnabled = False
+
+    if isSolrCloudEnabled:
+      zookeeper_host_port = self.getZKHostPortString(services)
+      ranger_audit_zk_port = ''
+      if zookeeper_host_port:
+        ranger_audit_zk_port = '{0}/{1}'.format(zookeeper_host_port, 'ranger_audits')
+        putRangerAdminProperty('ranger.audit.solr.zookeepers', ranger_audit_zk_port)
+    else:
+      putRangerAdminProperty('ranger.audit.solr.zookeepers', 'NONE')
+
+    # Recommend ranger.audit.solr.zookeepers and xasecure.audit.destination.hdfs.dir
+    include_hdfs = "HDFS" in servicesList
+    if include_hdfs:
+      if 'core-site' in services['configurations'] and ('fs.defaultFS' in services['configurations']['core-site']['properties']):
+        default_fs = services['configurations']['core-site']['properties']['fs.defaultFS']
+        putRangerEnvProperty('xasecure.audit.destination.hdfs.dir', '{0}/{1}/{2}'.format(default_fs,'ranger','audit'))
+
+    # Recommend Ranger supported service's audit properties
+    ranger_services = [
+      {'service_name': 'HDFS', 'audit_file': 'ranger-hdfs-audit'},
+      {'service_name': 'YARN', 'audit_file': 'ranger-yarn-audit'},
+      {'service_name': 'HBASE', 'audit_file': 'ranger-hbase-audit'},
+      {'service_name': 'HIVE', 'audit_file': 'ranger-hive-audit'},
+      {'service_name': 'KNOX', 'audit_file': 'ranger-knox-audit'},
+      {'service_name': 'KAFKA', 'audit_file': 'ranger-kafka-audit'},
+      {'service_name': 'STORM', 'audit_file': 'ranger-storm-audit'}
+    ]
+
+    for item in range(len(ranger_services)):
+      if ranger_services[item]['service_name'] in servicesList:
+        component_audit_file =  ranger_services[item]['audit_file']
+        if component_audit_file in services["configurations"]:
+          ranger_audit_dict = [
+            {'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.db', 'target_configname': 'xasecure.audit.destination.db'},
+            {'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.hdfs', 'target_configname': 'xasecure.audit.destination.hdfs'},
+            {'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.hdfs.dir', 'target_configname': 'xasecure.audit.destination.hdfs.dir'},
+            {'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.solr', 'target_configname': 'xasecure.audit.destination.solr'},
+            {'filename': 'ranger-admin-site', 'configname': 'ranger.audit.solr.urls', 'target_configname': 'xasecure.audit.destination.solr.urls'},
+            {'filename': 'ranger-admin-site', 'configname': 'ranger.audit.solr.zookeepers', 'target_configname': 'xasecure.audit.destination.solr.zookeepers'}
+          ]
+          putRangerAuditProperty = self.putProperty(configurations, component_audit_file, services)
+
+          for item in ranger_audit_dict:
+            if item['filename'] in services["configurations"] and item['configname'] in  services["configurations"][item['filename']]["properties"]:
+              if item['filename'] in configurations and item['configname'] in  configurations[item['filename']]["properties"]:
+                rangerAuditProperty = configurations[item['filename']]["properties"][item['configname']]
+              else:
+                rangerAuditProperty = services["configurations"][item['filename']]["properties"][item['configname']]
+              putRangerAuditProperty(item['target_configname'], rangerAuditProperty)
+
+    audit_solr_flag = 'false'
+    audit_db_flag = 'false'
+    ranger_audit_source_type = 'solr'
+    if 'ranger-env' in services['configurations'] and 'xasecure.audit.destination.solr' in services['configurations']["ranger-env"]["properties"]:
+      audit_solr_flag = services['configurations']["ranger-env"]["properties"]['xasecure.audit.destination.solr']
+    if 'ranger-env' in services['configurations'] and 'xasecure.audit.destination.db' in services['configurations']["ranger-env"]["properties"]:
+      audit_db_flag = services['configurations']["ranger-env"]["properties"]['xasecure.audit.destination.db']
+
+    if audit_db_flag == 'true' and audit_solr_flag == 'false':
+      ranger_audit_source_type = 'db'
+    putRangerAdminProperty('ranger.audit.source.type',ranger_audit_source_type)
+
+    knox_host = 'localhost'
+    knox_port = '8443'
+    if 'KNOX' in servicesList:
+      knox_hosts = self.getComponentHostNames(services, "KNOX", "KNOX_GATEWAY")
+      if len(knox_hosts) > 0:
+        knox_hosts.sort()
+        knox_host = knox_hosts[0]
+      if 'gateway-site' in services['configurations'] and 'gateway.port' in services['configurations']["gateway-site"]["properties"]:
+        knox_port = services['configurations']["gateway-site"]["properties"]['gateway.port']
+      putRangerAdminProperty('ranger.sso.providerurl', 'https://{0}:{1}/gateway/knoxsso/api/v1/websso'.format(knox_host, knox_port))
+
+    required_services = [
+      {'service_name': 'HDFS', 'config_type': 'ranger-hdfs-security'},
+      {'service_name': 'YARN', 'config_type': 'ranger-yarn-security'},
+      {'service_name': 'HBASE', 'config_type': 'ranger-hbase-security'},
+      {'service_name': 'HIVE', 'config_type': 'ranger-hive-security'},
+      {'service_name': 'KNOX', 'config_type': 'ranger-knox-security'},
+      {'service_name': 'KAFKA', 'config_type': 'ranger-kafka-security'},
+      {'service_name': 'RANGER_KMS','config_type': 'ranger-kms-security'},
+      {'service_name': 'STORM', 'config_type': 'ranger-storm-security'}
+    ]
+
+    # recommendation for ranger url for ranger-supported plugins
+    self.recommendRangerUrlConfigurations(configurations, services, required_services)
+
+
+  def recommendRangerConfigurationsFromHDP25(self, configurations, clusterData, services, hosts):
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    has_ranger_tagsync = False
+
+    putTagsyncAppProperty = self.putProperty(configurations, "tagsync-application-properties", services)
+    putTagsyncSiteProperty = self.putProperty(configurations, "ranger-tagsync-site", services)
+    putRangerAdminProperty = self.putProperty(configurations, "ranger-admin-site", services)
+    putRangerEnvProperty = self.putProperty(configurations, "ranger-env", services)
+
+    application_properties = self.getServicesSiteProperties(services, "application-properties")
+
+    ranger_tagsync_host = self.getHostsForComponent(services, "RANGER", "RANGER_TAGSYNC")
+    has_ranger_tagsync = len(ranger_tagsync_host) > 0
+
+    if 'ATLAS' in servicesList and has_ranger_tagsync:
+      atlas_hosts = self.getHostNamesWithComponent("ATLAS", "ATLAS_SERVER", services)
+      atlas_host = 'localhost' if len(atlas_hosts) == 0 else atlas_hosts[0]
+      protocol = 'http'
+      atlas_port = '21000'
+
+      if application_properties and 'atlas.enableTLS' in application_properties and application_properties['atlas.enableTLS'].lower() == 'true':
+        protocol = 'https'
+        if 'atlas.server.https.port' in application_properties:
+          atlas_port = application_properties['atlas.server.https.port']
+      else:
+        protocol = 'http'
+        if application_properties and 'atlas.server.http.port' in application_properties:
+          atlas_port = application_properties['atlas.server.http.port']
+
+      atlas_rest_endpoint = '{0}://{1}:{2}'.format(protocol, atlas_host, atlas_port)
+
+      putTagsyncSiteProperty('ranger.tagsync.source.atlas', 'true')
+      putTagsyncSiteProperty('ranger.tagsync.source.atlasrest.endpoint', atlas_rest_endpoint)
+
+    zookeeper_host_port = self.getZKHostPortString(services)
+    if zookeeper_host_port and has_ranger_tagsync:
+      putTagsyncAppProperty('atlas.kafka.zookeeper.connect', zookeeper_host_port)
+
+    if 'KAFKA' in servicesList and has_ranger_tagsync:
+      kafka_hosts = self.getHostNamesWithComponent("KAFKA", "KAFKA_BROKER", services)
+      kafka_port = '6667'
+      if 'kafka-broker' in services['configurations'] and (
+          'port' in services['configurations']['kafka-broker']['properties']):
+        kafka_port = services['configurations']['kafka-broker']['properties']['port']
+      kafka_host_port = []
+      for i in range(len(kafka_hosts)):
+        kafka_host_port.append(kafka_hosts[i] + ':' + kafka_port)
+
+      final_kafka_host = ",".join(kafka_host_port)
+      putTagsyncAppProperty('atlas.kafka.bootstrap.servers', final_kafka_host)
+
+    is_solr_cloud_enabled = False
+    if 'ranger-env' in services['configurations'] and 'is_solrCloud_enabled' in services['configurations']['ranger-env']['properties']:
+      is_solr_cloud_enabled = services['configurations']['ranger-env']['properties']['is_solrCloud_enabled']  == 'true'
+
+    is_external_solr_cloud_enabled = False
+    if 'ranger-env' in services['configurations'] and 'is_external_solrCloud_enabled' in services['configurations']['ranger-env']['properties']:
+      is_external_solr_cloud_enabled = services['configurations']['ranger-env']['properties']['is_external_solrCloud_enabled']  == 'true'
+
+    ranger_audit_zk_port = ''
+
+    if 'AMBARI_INFRA' in servicesList and zookeeper_host_port and is_solr_cloud_enabled and not is_external_solr_cloud_enabled:
+      zookeeper_host_port = zookeeper_host_port.split(',')
+      zookeeper_host_port.sort()
+      zookeeper_host_port = ",".join(zookeeper_host_port)
+      infra_solr_znode = '/infra-solr'
+
+      if 'infra-solr-env' in services['configurations'] and \
+        ('infra_solr_znode' in services['configurations']['infra-solr-env']['properties']):
+        infra_solr_znode = services['configurations']['infra-solr-env']['properties']['infra_solr_znode']
+        ranger_audit_zk_port = '{0}{1}'.format(zookeeper_host_port, infra_solr_znode)
+      putRangerAdminProperty('ranger.audit.solr.zookeepers', ranger_audit_zk_port)
+    elif zookeeper_host_port and is_solr_cloud_enabled and is_external_solr_cloud_enabled:
+      ranger_audit_zk_port = '{0}/{1}'.format(zookeeper_host_port, 'ranger_audits')
+      putRangerAdminProperty('ranger.audit.solr.zookeepers', ranger_audit_zk_port)
+    else:
+      putRangerAdminProperty('ranger.audit.solr.zookeepers', 'NONE')
+
+    ranger_services = [
+      {'service_name': 'HDFS', 'audit_file': 'ranger-hdfs-audit'},
+      {'service_name': 'YARN', 'audit_file': 'ranger-yarn-audit'},
+      {'service_name': 'HBASE', 'audit_file': 'ranger-hbase-audit'},
+      {'service_name': 'HIVE', 'audit_file': 'ranger-hive-audit'},
+      {'service_name': 'KNOX', 'audit_file': 'ranger-knox-audit'},
+      {'service_name': 'KAFKA', 'audit_file': 'ranger-kafka-audit'},
+      {'service_name': 'STORM', 'audit_file': 'ranger-storm-audit'},
+      {'service_name': 'RANGER_KMS', 'audit_file': 'ranger-kms-audit'},
+      {'service_name': 'ATLAS', 'audit_file': 'ranger-atlas-audit'}
+    ]
+
+    for item in range(len(ranger_services)):
+      if ranger_services[item]['service_name'] in servicesList:
+        component_audit_file =  ranger_services[item]['audit_file']
+        if component_audit_file in services["configurations"]:
+          ranger_audit_dict = [
+            {'filename': 'ranger-admin-site', 'configname': 'ranger.audit.solr.urls', 'target_configname': 'xasecure.audit.destination.solr.urls'},
+            {'filename': 'ranger-admin-site', 'configname': 'ranger.audit.solr.zookeepers', 'target_configname': 'xasecure.audit.destination.solr.zookeepers'}
+          ]
+          putRangerAuditProperty = self.putProperty(configurations, component_audit_file, services)
+
+          for item in ranger_audit_dict:
+            if item['filename'] in services["configurations"] and item['configname'] in  services["configurations"][item['filename']]["properties"]:
+              if item['filename'] in configurations and item['configname'] in  configurations[item['filename']]["properties"]:
+                rangerAuditProperty = configurations[item['filename']]["properties"][item['configname']]
+              else:
+                rangerAuditProperty = services["configurations"][item['filename']]["properties"][item['configname']]
+              putRangerAuditProperty(item['target_configname'], rangerAuditProperty)
+
+    if "HDFS" in servicesList:
+      hdfs_user = None
+      if "hadoop-env" in services["configurations"] and "hdfs_user" in services["configurations"]["hadoop-env"]["properties"]:
+        hdfs_user = services["configurations"]["hadoop-env"]["properties"]["hdfs_user"]
+        putRangerAdminProperty('ranger.kms.service.user.hdfs', hdfs_user)
+
+    if "HIVE" in servicesList:
+      hive_user = None
+      if "hive-env" in services["configurations"] and "hive_user" in services["configurations"]["hive-env"]["properties"]:
+        hive_user = services["configurations"]["hive-env"]["properties"]["hive_user"]
+        putRangerAdminProperty('ranger.kms.service.user.hive', hive_user)
+
+    ranger_plugins_serviceuser = [
+      {'service_name': 'HDFS', 'file_name': 'hadoop-env', 'config_name': 'hdfs_user', 'target_configname': 'ranger.plugins.hdfs.serviceuser'},
+      {'service_name': 'HIVE', 'file_name': 'hive-env', 'config_name': 'hive_user', 'target_configname': 'ranger.plugins.hive.serviceuser'},
+      {'service_name': 'YARN', 'file_name': 'yarn-env', 'config_name': 'yarn_user', 'target_configname': 'ranger.plugins.yarn.serviceuser'},
+      {'service_name': 'HBASE', 'file_name': 'hbase-env', 'config_name': 'hbase_user', 'target_configname': 'ranger.plugins.hbase.serviceuser'},
+      {'service_name': 'KNOX', 'file_name': 'knox-env', 'config_name': 'knox_user', 'target_configname': 'ranger.plugins.knox.serviceuser'},
+      {'service_name': 'STORM', 'file_name': 'storm-env', 'config_name': 'storm_user', 'target_configname': 'ranger.plugins.storm.serviceuser'},
+      {'service_name': 'KAFKA', 'file_name': 'kafka-env', 'config_name': 'kafka_user', 'target_configname': 'ranger.plugins.kafka.serviceuser'},
+      {'service_name': 'RANGER_KMS', 'file_name': 'kms-env', 'config_name': 'kms_user', 'target_configname': 'ranger.plugins.kms.serviceuser'},
+      {'service_name': 'ATLAS', 'file_name': 'atlas-env', 'config_name': 'metadata_user', 'target_configname': 'ranger.plugins.atlas.serviceuser'}
+    ]
+
+    for item in range(len(ranger_plugins_serviceuser)):
+      if ranger_plugins_serviceuser[item]['service_name'] in servicesList:
+        file_name = ranger_plugins_serviceuser[item]['file_name']
+        config_name = ranger_plugins_serviceuser[item]['config_name']
+        target_configname = ranger_plugins_serviceuser[item]['target_configname']
+        if file_name in services["configurations"] and config_name in services["configurations"][file_name]["properties"]:
+          service_user = services["configurations"][file_name]["properties"][config_name]
+          putRangerAdminProperty(target_configname, service_user)
+
+    if "ATLAS" in servicesList:
+      if "ranger-env" in services["configurations"]:
+        putAtlasRangerAuditProperty = self.putProperty(configurations, 'ranger-atlas-audit', services)
+        xasecure_audit_destination_hdfs = ''
+        xasecure_audit_destination_hdfs_dir = ''
+        xasecure_audit_destination_solr = ''
+        if 'xasecure.audit.destination.hdfs' in configurations['ranger-env']['properties']:
+          xasecure_audit_destination_hdfs = configurations['ranger-env']['properties']['xasecure.audit.destination.hdfs']
+        else:
+          xasecure_audit_destination_hdfs = services['configurations']['ranger-env']['properties']['xasecure.audit.destination.hdfs']
+
+        if 'core-site' in services['configurations'] and ('fs.defaultFS' in services['configurations']['core-site']['properties']):
+          xasecure_audit_destination_hdfs_dir = '{0}/{1}/{2}'.format(services['configurations']['core-site']['properties']['fs.defaultFS'] ,'ranger','audit')
+
+        if 'xasecure.audit.destination.solr' in configurations['ranger-env']['properties']:
+          xasecure_audit_destination_solr = configurations['ranger-env']['properties']['xasecure.audit.destination.solr']
+        else:
+          xasecure_audit_destination_solr = services['configurations']['ranger-env']['properties']['xasecure.audit.destination.solr']
+
+        putAtlasRangerAuditProperty('xasecure.audit.destination.hdfs',xasecure_audit_destination_hdfs)
+        putAtlasRangerAuditProperty('xasecure.audit.destination.hdfs.dir',xasecure_audit_destination_hdfs_dir)
+        putAtlasRangerAuditProperty('xasecure.audit.destination.solr',xasecure_audit_destination_solr)
+    required_services = [
+      {'service_name': 'ATLAS', 'config_type': 'ranger-atlas-security'}
+    ]
+
+    # recommendation for ranger url for ranger-supported plugins
+    self.recommendRangerUrlConfigurations(configurations, services, required_services)
+
+
+  def recommendRangerConfigurationsFromHDP26(self, configurations, clusterData, services, hosts):
+
+    putRangerUgsyncSite = self.putProperty(configurations, 'ranger-ugsync-site', services)
+
+    delta_sync_enabled = False
+    if 'ranger-ugsync-site' in services['configurations'] and 'ranger.usersync.ldap.deltasync' in services['configurations']['ranger-ugsync-site']['properties']:
+      delta_sync_enabled = services['configurations']['ranger-ugsync-site']['properties']['ranger.usersync.ldap.deltasync'] == "true"
+
+    if delta_sync_enabled:
+      putRangerUgsyncSite("ranger.usersync.group.searchenabled", "true")
+    else:
+      putRangerUgsyncSite("ranger.usersync.group.searchenabled", "false")
+
+
+
+
+
+class RangerValidator(service_advisor.ServiceAdvisor):
+  """
+  Ranger Validator checks the correctness of properties whenever the service is first added or the user attempts to
+  change configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(RangerValidator, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    self.validators = [("ranger-env", self.validateRangerConfigurationsEnvFromHDP22),
+                       ("admin-properties", self.validateRangerAdminConfigurationsFromHDP23),
+                       ("ranger-env", self.validateRangerConfigurationsEnvFromHDP23),
+                       ("ranger-tagsync-site", self.validateRangerTagsyncConfigurationsFromHDP25),
+                       ("ranger-ugsync-site", self.validateRangerUsersyncConfigurationsFromHDP26)]
+
+
+
+  def validateRangerConfigurationsEnvFromHDP22(self, properties, recommendedDefaults, configurations, services, hosts):
+    ranger_env_properties = properties
+    validationItems = []
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if "ranger-storm-plugin-enabled" in ranger_env_properties and ranger_env_properties['ranger-storm-plugin-enabled'].lower() == 'yes' and not 'KERBEROS' in servicesList:
+      validationItems.append({"config-name": "ranger-storm-plugin-enabled",
+                              "item": self.getWarnItem("Ranger Storm plugin should not be enabled in non-kerberos environment.")})
+    return self.toConfigurationValidationProblems(validationItems, "ranger-env")
+
+  def validateRangerAdminConfigurationsFromHDP23(self, properties, recommendedDefaults, configurations, services, hosts):
+    ranger_site = properties
+    validationItems = []
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if 'RANGER' in servicesList and 'policymgr_external_url' in ranger_site:
+      policymgr_mgr_url = ranger_site['policymgr_external_url']
+      if policymgr_mgr_url.endswith('/'):
+        validationItems.append({'config-name':'policymgr_external_url',
+                                'item':self.getWarnItem('Ranger External URL should not contain trailing slash "/"')})
+    return self.toConfigurationValidationProblems(validationItems,'admin-properties')
+
+
+  def validateRangerConfigurationsEnvFromHDP23(self, properties, recommendedDefaults, configurations, services, hosts):
+    ranger_env_properties = properties
+    validationItems = []
+    security_enabled = self.isSecurityEnabled(services)
+
+    if "ranger-kafka-plugin-enabled" in ranger_env_properties and ranger_env_properties["ranger-kafka-plugin-enabled"].lower() == 'yes' and not security_enabled:
+      validationItems.append({"config-name": "ranger-kafka-plugin-enabled",
+                              "item": self.getWarnItem(
+                                "Ranger Kafka plugin should not be enabled in non-kerberos environment.")})
+
+    validationProblems = self.toConfigurationValidationProblems(validationItems, "ranger-env")
+    return validationProblems
+
+
+  def validateRangerTagsyncConfigurationsFromHDP25(self, properties, recommendedDefaults, configurations, services, hosts):
+    ranger_tagsync_properties = properties
+    validationItems = []
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+
+    has_atlas = False
+    if "RANGER" in servicesList:
+      has_atlas = not "ATLAS" in servicesList
+
+      if has_atlas and 'ranger.tagsync.source.atlas' in ranger_tagsync_properties and \
+                      ranger_tagsync_properties['ranger.tagsync.source.atlas'].lower() == 'true':
+        validationItems.append({"config-name": "ranger.tagsync.source.atlas",
+                                "item": self.getWarnItem(
+                                  "Need to Install ATLAS service to set ranger.tagsync.source.atlas as true.")})
+
+    return self.toConfigurationValidationProblems(validationItems, "ranger-tagsync-site")
+
+  def validateRangerUsersyncConfigurationsFromHDP26(self, properties, recommendedDefaults, configurations, services, hosts):
+    ranger_usersync_properties = properties
+    validationItems = []
+
+    delta_sync_enabled = 'ranger.usersync.ldap.deltasync' in ranger_usersync_properties \
+      and ranger_usersync_properties['ranger.usersync.ldap.deltasync'].lower() == 'true'
+    group_sync_enabled = 'ranger.usersync.group.searchenabled' in ranger_usersync_properties \
+      and ranger_usersync_properties['ranger.usersync.group.searchenabled'].lower() == 'true'
+    usersync_source_ldap_enabled = 'ranger.usersync.source.impl.class' in ranger_usersync_properties \
+      and ranger_usersync_properties['ranger.usersync.source.impl.class'] == 'org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder'
+
+    if usersync_source_ldap_enabled and delta_sync_enabled and not group_sync_enabled:
+      validationItems.append({"config-name": "ranger.usersync.group.searchenabled",
+                            "item": self.getWarnItem(
+                            "Need to set ranger.usersync.group.searchenabled as true, as ranger.usersync.ldap.deltasync is enabled")})
+
+    return self.toConfigurationValidationProblems(validationItems, "ranger-ugsync-site")
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/themes/theme_version_1.json b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/themes/theme_version_1.json
new file mode 100644
index 0000000..e6724cd
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/themes/theme_version_1.json
@@ -0,0 +1,722 @@
+{
+  "name": "default",
+  "description": "Default theme for Ranger service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "ranger_admin_settings",
+            "display-name": "Ranger Admin",
+            "layout": {
+              "tab-columns": "2",
+              "tab-rows": "2",
+              "sections": [
+                {
+                  "name": "section-ranger-admin",
+                  "display-name": "Ranger Admin",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "3",
+                  "column-span": "2",
+                  "section-columns": "2",
+                  "section-rows": "3",
+                  "subsections": [
+                    {
+                      "name": "subsection-ranger-db-row1-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-ranger-db-row1-col2",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-ranger-db-root-user-col1",
+                      "row-index": "1",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-ranger-db-root-user-col2",
+                      "row-index": "1",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          },
+          {
+            "name": "ranger_user_info",
+            "display-name": "Ranger User Info",
+            "layout": {
+              "tab-columns": "1",
+              "tab-rows": "1",
+              "sections": [
+                {
+                  "name": "section-user-info",
+                  "display-name": "Ranger User Info",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "2",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "2",
+                  "subsections": [
+                    {
+                      "name": "subsection-ranger-user-row2-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1",
+                      "subsection-tabs": [
+                        {
+                          "name": "ldap-common-configs",
+                          "display-name": "Common Configs",
+                          "depends-on": [
+                            {
+                              "configs": [
+                                "usersync-properties/SYNC_SOURCE"
+                              ],
+                              "if": "${usersync-properties/SYNC_SOURCE} === ldap",
+                              "then": {
+                                "property_value_attributes": {
+                                  "visible": true
+                                }
+                              },
+                              "else": {
+                                "property_value_attributes": {
+                                  "visible": false
+                                }
+                              }
+                            }
+                          ]
+                        },
+                        {
+                          "name": "ldap-user-configs",
+                          "display-name": "User Configs",
+                          "depends-on": [
+                            {
+                              "configs": [
+                                "usersync-properties/SYNC_SOURCE"
+                              ],
+                              "if": "${usersync-properties/SYNC_SOURCE} === ldap",
+                              "then": {
+                                "property_value_attributes": {
+                                  "visible": true
+                                }
+                              },
+                              "else": {
+                                "property_value_attributes": {
+                                  "visible": false
+                                }
+                              }
+                            }
+                          ]
+                        }
+                      ]
+                    }
+                  ]
+                }
+              ]
+            }
+          },
+          {
+            "name": "ranger_plugin",
+            "display-name": "Ranger Plugin",
+            "layout": {
+              "tab-columns": "1",
+              "tab-rows": "1",
+              "sections": [
+                {
+                  "name": "section-ranger-plugin",
+                  "display-name": "Ranger Plugin",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "3",
+                  "section-columns": "3",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "section-ranger-plugin-row1-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "section-ranger-plugin-row1-col2",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "section-ranger-plugin-row1-col3",
+                      "row-index": "0",
+                      "column-index": "2",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          },
+          {
+            "name": "ranger_audit_settings",
+            "display-name": "Ranger Audit",
+            "layout": {
+              "tab-columns": "1",
+              "tab-rows": "2",
+              "sections": [
+                {
+                  "name": "section-ranger-audit-hdfs",
+                  "display-name": "Audit to HDFS",
+                  "row-index": "0",
+                  "column-index": "1",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-ranger-hdfs-row1-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-ranger-audit-db",
+                  "display-name": "Audit to DB",
+                  "row-index": "1",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-ranger-audit-db-row2-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-ranger-audit-db-row2-col2",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "admin-properties/DB_FLAVOR",
+          "subsection-name": "subsection-ranger-db-row1-col1"
+        },
+        {
+          "config": "admin-properties/db_name",
+          "subsection-name": "subsection-ranger-db-row1-col1"
+        },
+        {
+          "config": "admin-properties/db_user",
+          "subsection-name": "subsection-ranger-db-row1-col1"
+        },
+        {
+          "config": "ranger-admin-site/ranger.jpa.jdbc.url",
+          "subsection-name": "subsection-ranger-db-row1-col1"
+        },
+        {
+          "config": "admin-properties/db_host",
+          "subsection-name": "subsection-ranger-db-row1-col2"
+        },
+        {
+          "config": "ranger-admin-site/ranger.jpa.jdbc.driver",
+          "subsection-name": "subsection-ranger-db-row1-col2"
+        },
+        {
+          "config": "admin-properties/db_password",
+          "subsection-name": "subsection-ranger-db-row1-col2"
+        },
+        {
+          "config": "admin-properties/db_root_user",
+          "subsection-name": "subsection-ranger-db-root-user-col1"
+        },
+        {
+          "config": "admin-properties/db_root_password",
+          "subsection-name": "subsection-ranger-db-root-user-col2"
+        },
+        {
+          "config": "usersync-properties/SYNC_SOURCE",
+          "subsection-name": "subsection-ranger-user-row2-col1"
+        },
+
+        {
+          "config": "usersync-properties/MIN_UNIX_USER_ID_TO_SYNC",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "usersync-properties/SYNC_SOURCE"
+              ],
+              "if": "${usersync-properties/SYNC_SOURCE} === unix",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "usersync-properties/SYNC_LDAP_URL",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-common-configs"
+        },
+        {
+          "config": "ranger-env/bind_anonymous",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-common-configs"
+        },
+        {
+          "config": "usersync-properties/SYNC_LDAP_BIND_DN",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-common-configs",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-env/bind_anonymous"
+              ],
+              "if": "${ranger-env/bind_anonymous}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              }
+            }
+          ]
+
+        },
+        {
+          "config": "usersync-properties/SYNC_LDAP_BIND_PASSWORD",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-common-configs",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-env/bind_anonymous"
+              ],
+              "if": "${ranger-env/bind_anonymous}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "usersync-properties/SYNC_LDAP_USER_NAME_ATTRIBUTE",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-user-configs"
+        },
+        {
+          "config": "usersync-properties/SYNC_LDAP_USER_OBJECT_CLASS",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-user-configs"
+        },
+        {
+          "config": "usersync-properties/SYNC_LDAP_USER_SEARCH_BASE",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-user-configs"
+        },
+        {
+          "config": "usersync-properties/SYNC_LDAP_USER_SEARCH_FILTER",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-user-configs"
+        },
+        {
+          "config": "usersync-properties/SYNC_LDAP_USER_SEARCH_SCOPE",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-user-configs"
+        },
+        {
+          "config": "usersync-properties/SYNC_LDAP_USER_GROUP_NAME_ATTRIBUTE",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-user-configs"
+        },
+        {
+          "config": "ranger-env/ranger-hdfs-plugin-enabled",
+          "subsection-name": "section-ranger-plugin-row1-col1",
+          "depends-on": [
+            {
+              "resource": "service",
+              "if": "HDFS",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-env/ranger-hive-plugin-enabled",
+          "subsection-name": "section-ranger-plugin-row1-col1",
+          "depends-on": [
+            {
+              "resource": "service",
+              "if": "HIVE",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-env/ranger-hbase-plugin-enabled",
+          "subsection-name": "section-ranger-plugin-row1-col2",
+          "depends-on": [
+            {
+              "resource": "service",
+              "if": "HBASE",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-env/ranger-storm-plugin-enabled",
+          "subsection-name": "section-ranger-plugin-row1-col2",
+          "depends-on": [
+            {
+              "resource": "service",
+              "if": "STORM",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-env/ranger-knox-plugin-enabled",
+          "subsection-name": "section-ranger-plugin-row1-col3",
+          "depends-on": [
+            {
+              "resource": "service",
+              "if": "KNOX",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-env/xasecure.audit.destination.hdfs",
+          "subsection-name": "subsection-ranger-hdfs-row1-col1"
+        },
+        {
+          "config": "ranger-env/xasecure.audit.destination.hdfs.dir",
+          "subsection-name": "subsection-ranger-hdfs-row1-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-env/xasecure.audit.destination.hdfs"
+              ],
+              "if": "${ranger-env/xasecure.audit.destination.hdfs}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-env/xasecure.audit.destination.db",
+          "subsection-name": "subsection-ranger-audit-db-row2-col1"
+        },
+        {
+          "config": "admin-properties/audit_db_user",
+          "subsection-name": "subsection-ranger-audit-db-row2-col1"
+        },
+        {
+          "config": "admin-properties/audit_db_name",
+          "subsection-name": "subsection-ranger-audit-db-row2-col2"
+        },
+        {
+          "config": "admin-properties/audit_db_password",
+          "subsection-name": "subsection-ranger-audit-db-row2-col2"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "admin-properties/DB_FLAVOR",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "admin-properties/db_user",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "admin-properties/db_name",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-admin-site/ranger.jpa.jdbc.url",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "admin-properties/db_host",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "admin-properties/db_password",
+        "widget": {
+          "type": "password"
+        }
+      },
+      {
+        "config": "admin-properties/db_root_user",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "admin-properties/db_root_password",
+        "widget": {
+          "type": "password"
+        }
+      },
+      {
+        "config": "usersync-properties/SYNC_SOURCE",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "usersync-properties/MIN_UNIX_USER_ID_TO_SYNC",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "usersync-properties/SYNC_LDAP_URL",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-env/bind_anonymous",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "usersync-properties/SYNC_LDAP_BIND_DN",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "usersync-properties/SYNC_LDAP_BIND_PASSWORD",
+        "widget": {
+          "type": "password"
+        }
+      },
+      {
+        "config": "usersync-properties/SYNC_LDAP_USER_NAME_ATTRIBUTE",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "usersync-properties/SYNC_LDAP_USER_OBJECT_CLASS",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "usersync-properties/SYNC_LDAP_USER_SEARCH_BASE",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "usersync-properties/SYNC_LDAP_USER_SEARCH_FILTER",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "usersync-properties/SYNC_LDAP_USER_SEARCH_SCOPE",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "usersync-properties/SYNC_LDAP_USER_GROUP_NAME_ATTRIBUTE",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-env/ranger-hdfs-plugin-enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-env/ranger-hive-plugin-enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-env/ranger-hbase-plugin-enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-env/ranger-knox-plugin-enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-env/ranger-storm-plugin-enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-env/xasecure.audit.destination.hdfs",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-env/xasecure.audit.destination.hdfs.dir",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-env/xasecure.audit.destination.db",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "admin-properties/audit_db_user",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "admin-properties/audit_db_name",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "admin-properties/audit_db_password",
+        "widget": {
+          "type": "password"
+        }
+      }
+    ]
+  }
+}
+
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/themes/theme_version_2.json b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/themes/theme_version_2.json
new file mode 100644
index 0000000..cbd27e4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/themes/theme_version_2.json
@@ -0,0 +1,1470 @@
+{
+  "name": "default",
+  "description": "Default theme for Ranger service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "ranger_admin_settings",
+            "display-name": "Ranger Admin",
+            "layout": {
+              "tab-columns": "2",
+              "tab-rows": "2",
+              "sections": [
+                {
+                  "name": "section-ranger-admin",
+                  "display-name": "Ranger Admin",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "3",
+                  "column-span": "2",
+                  "section-columns": "2",
+                  "section-rows": "3",
+                  "subsections": [
+                    {
+                      "name": "subsection-ranger-db-row1-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-ranger-db-row1-col2",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-ranger-db-row2",
+                      "row-index": "1",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "2"
+                    },
+                    {
+                      "name": "subsection-ranger-db-root-user-col1",
+                      "row-index": "2",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1",
+                      "depends-on": [
+                        {
+                          "configs":[
+                            "ranger-env/create_db_dbuser"
+                          ],
+                          "if": "${ranger-env/create_db_dbuser}",
+                          "then": {
+                            "property_value_attributes": {
+                              "visible": true
+                            }
+                          },
+                          "else": {
+                            "property_value_attributes": {
+                              "visible": false
+                            }
+                          }
+                        }
+                      ]
+                    },
+                    {
+                      "name": "subsection-ranger-db-root-user-col2",
+                      "row-index": "2",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1",
+                      "depends-on": [
+                        {
+                          "configs":[
+                            "ranger-env/create_db_dbuser"
+                          ],
+                          "if": "${ranger-env/create_db_dbuser}",
+                          "then": {
+                            "property_value_attributes": {
+                              "visible": true
+                            }
+                          },
+                          "else": {
+                            "property_value_attributes": {
+                              "visible": false
+                            }
+                          }
+                        }
+                      ]
+                    }
+                  ]
+                }
+              ]
+            }
+          },
+          {
+            "name": "ranger_user_info",
+            "display-name": "Ranger User Info",
+            "layout": {
+              "tab-columns": "1",
+              "tab-rows": "1",
+              "sections": [
+                {
+                  "name": "section-user-info",
+                  "display-name": "Ranger User Info",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "2",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "2",
+                  "subsections": [
+                    {
+                      "name": "subsection-ranger-user-row1-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-ranger-user-row2-col1",
+                      "row-index": "1",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1",
+                      "subsection-tabs": [
+                        {
+                          "name": "ldap-common-configs",
+                          "display-name": "Common Configs",
+                          "depends-on": [
+                            {
+                              "configs": [
+                                "ranger-ugsync-site/ranger.usersync.source.impl.class"
+                              ],
+                              "if": "${ranger-ugsync-site/ranger.usersync.source.impl.class} === org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder",
+                              "then": {
+                                "property_value_attributes": {
+                                  "visible": true
+                                }
+                              },
+                              "else": {
+                                "property_value_attributes": {
+                                  "visible": false
+                                }
+                              }
+                            }
+                          ]
+                        },
+                        {
+                          "name": "ldap-user-configs",
+                          "display-name": "User Configs",
+                          "depends-on": [
+                            {
+                              "configs": [
+                                "ranger-ugsync-site/ranger.usersync.source.impl.class"
+                              ],
+                              "if": "${ranger-ugsync-site/ranger.usersync.source.impl.class} === org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder",
+                              "then": {
+                                "property_value_attributes": {
+                                  "visible": true
+                                }
+                              },
+                              "else": {
+                                "property_value_attributes": {
+                                  "visible": false
+                                }
+                              }
+                            }
+                          ]
+                        },
+                        {
+                          "name": "ldap-group-configs",
+                          "display-name": "Group Configs",
+                          "depends-on": [
+                            {
+                              "configs": [
+                                "ranger-ugsync-site/ranger.usersync.source.impl.class"
+                              ],
+                              "if": "${ranger-ugsync-site/ranger.usersync.source.impl.class} === org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder",
+                              "then": {
+                                "property_value_attributes": {
+                                  "visible": true
+                                }
+                              },
+                              "else": {
+                                "property_value_attributes": {
+                                  "visible": false
+                                }
+                              }
+                            }
+                          ]
+                        }
+                      ],
+                      "depends-on": [
+                        {
+                          "configs": [
+                            "ranger-ugsync-site/ranger.usersync.enabled"
+                          ],
+                          "if": "${ranger-ugsync-site/ranger.usersync.enabled}",
+                          "then": {
+                            "property_value_attributes": {
+                              "visible": true
+                            }
+                          },
+                          "else": {
+                            "property_value_attributes": {
+                              "visible": false
+                            }
+                          }
+                        }
+                      ]
+                    }
+                  ]
+                }
+              ]
+            }
+          },
+          {
+            "name": "ranger_plugin",
+            "display-name": "Ranger Plugin",
+            "layout": {
+              "tab-columns": "1",
+              "tab-rows": "1",
+              "sections": [
+                {
+                  "name": "section-ranger-plugin",
+                  "display-name": "Ranger Plugin",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "3",
+                  "section-columns": "3",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "section-ranger-plugin-row1-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "section-ranger-plugin-row1-col2",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "section-ranger-plugin-row1-col3",
+                      "row-index": "0",
+                      "column-index": "2",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          },
+          {
+            "name": "ranger_audit_settings",
+            "display-name": "Ranger Audit",
+            "layout": {
+              "tab-columns": "2",
+              "tab-rows": "2",
+              "sections": [
+                {
+                  "name": "section-ranger-audit-solr",
+                  "display-name": "Audit to Solr",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-ranger-solr-row1-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-ranger-audit-hdfs",
+                  "display-name": "Audit to HDFS",
+                  "row-index": "0",
+                  "column-index": "1",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-ranger-hdfs-row1-col2",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-ranger-audit-db",
+                  "display-name": "Audit to DB",
+                  "row-index": "1",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "2",
+                  "section-columns": "2",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-ranger-audit-db-row2-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-ranger-audit-db-row2-col2",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "admin-properties/DB_FLAVOR",
+          "subsection-name": "subsection-ranger-db-row1-col1"
+        },
+        {
+          "config": "admin-properties/db_name",
+          "subsection-name": "subsection-ranger-db-row1-col1"
+        },
+        {
+          "config": "admin-properties/db_user",
+          "subsection-name": "subsection-ranger-db-row1-col1"
+        },
+        {
+          "config": "ranger-admin-site/ranger.jpa.jdbc.url",
+          "subsection-name": "subsection-ranger-db-row1-col1"
+        },
+        {
+          "config": "admin-properties/db_host",
+          "subsection-name": "subsection-ranger-db-row1-col2"
+        },
+        {
+          "config": "ranger-admin-site/ranger.jpa.jdbc.driver",
+          "subsection-name": "subsection-ranger-db-row1-col2"
+        },
+        {
+          "config": "admin-properties/db_password",
+          "subsection-name": "subsection-ranger-db-row1-col2"
+        },
+        {
+          "config": "ranger-env/test_db_connection",
+          "subsection-name": "subsection-ranger-db-row2",
+          "property_value_attributes": {
+            "ui_only_property": true
+          },
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-env/create_db_dbuser"
+              ],
+              "if": "${ranger-env/create_db_dbuser}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-env/create_db_dbuser",
+          "subsection-name": "subsection-ranger-db-row2"
+        },
+        {
+          "config": "admin-properties/db_root_user",
+          "subsection-name": "subsection-ranger-db-root-user-col1"
+        },
+        {
+          "config": "ranger-env/ranger_privelege_user_jdbc_url",
+          "subsection-name": "subsection-ranger-db-root-user-col1"
+        },
+        {
+          "config": "admin-properties/db_root_password",
+          "subsection-name": "subsection-ranger-db-root-user-col2"
+        },
+        {
+          "config": "ranger-env/test_root_db_connection",
+          "subsection-name": "subsection-ranger-db-root-user-col1",
+          "property_value_attributes": {
+            "ui_only_property": true
+          }
+        },
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.enabled",
+          "subsection-name": "subsection-ranger-user-row1-col1"
+        },
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.source.impl.class",
+          "subsection-name": "subsection-ranger-user-row2-col1"
+        },
+
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.unix.minUserId",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-ugsync-site/ranger.usersync.source.impl.class"
+              ],
+              "if": "${ranger-ugsync-site/ranger.usersync.source.impl.class} === org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.unix.password.file",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-ugsync-site/ranger.usersync.source.impl.class"
+              ],
+              "if": "${ranger-ugsync-site/ranger.usersync.source.impl.class} === org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.unix.group.file",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-ugsync-site/ranger.usersync.source.impl.class"
+              ],
+              "if": "${ranger-ugsync-site/ranger.usersync.source.impl.class} === org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.filesource.file",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-ugsync-site/ranger.usersync.source.impl.class"
+              ],
+              "if": "${ranger-ugsync-site/ranger.usersync.source.impl.class} === org.apache.ranger.unixusersync.process.FileSourceUserGroupBuilder",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.filesource.text.delimiter",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-ugsync-site/ranger.usersync.source.impl.class"
+              ],
+              "if": "${ranger-ugsync-site/ranger.usersync.source.impl.class} === org.apache.ranger.unixusersync.process.FileSourceUserGroupBuilder",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.ldap.url",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-common-configs"
+        },
+        {
+          "config": "ranger-env/bind_anonymous",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-common-configs"
+        },
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.ldap.binddn",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-common-configs",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-env/bind_anonymous"
+              ],
+              "if": "${ranger-env/bind_anonymous}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.ldap.ldapbindpassword",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-common-configs",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-env/bind_anonymous"
+              ],
+              "if": "${ranger-env/bind_anonymous}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.ldap.user.nameattribute",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-user-configs"
+        },
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.ldap.user.objectclass",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-user-configs"
+        },
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.ldap.user.searchbase",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-user-configs"
+        },
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.ldap.user.searchfilter",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-user-configs"
+        },
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.ldap.user.searchscope",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-user-configs"
+        },
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.ldap.user.groupnameattribute",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-user-configs"
+        },
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.group.usermapsyncenabled",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-user-configs"
+        },
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.group.searchenabled",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-group-configs"
+        },
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.group.memberattributename",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-group-configs",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-ugsync-site/ranger.usersync.group.searchenabled"
+              ],
+              "if": "${ranger-ugsync-site/ranger.usersync.group.searchenabled}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.group.nameattribute",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-group-configs",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-ugsync-site/ranger.usersync.group.searchenabled"
+              ],
+              "if": "${ranger-ugsync-site/ranger.usersync.group.searchenabled}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.group.objectclass",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-group-configs",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-ugsync-site/ranger.usersync.group.searchenabled"
+              ],
+              "if": "${ranger-ugsync-site/ranger.usersync.group.searchenabled}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.group.searchbase",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-group-configs",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-ugsync-site/ranger.usersync.group.searchenabled"
+              ],
+              "if": "${ranger-ugsync-site/ranger.usersync.group.searchenabled}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.group.searchfilter",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-group-configs",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-ugsync-site/ranger.usersync.group.searchenabled"
+              ],
+              "if": "${ranger-ugsync-site/ranger.usersync.group.searchenabled}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-env/ranger-hdfs-plugin-enabled",
+          "subsection-name": "section-ranger-plugin-row1-col1",
+          "depends-on": [
+            {
+              "resource": "service",
+              "if": "HDFS",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-env/ranger-yarn-plugin-enabled",
+          "subsection-name": "section-ranger-plugin-row1-col1",
+          "depends-on": [
+            {
+              "resource": "service",
+              "if": "YARN",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-env/ranger-hive-plugin-enabled",
+          "subsection-name": "section-ranger-plugin-row1-col1",
+          "depends-on": [
+            {
+              "resource": "service",
+              "if": "HIVE",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-env/ranger-hbase-plugin-enabled",
+          "subsection-name": "section-ranger-plugin-row1-col2",
+          "depends-on": [
+            {
+              "resource": "service",
+              "if": "HBASE",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-env/ranger-storm-plugin-enabled",
+          "subsection-name": "section-ranger-plugin-row1-col2",
+          "depends-on": [
+            {
+              "resource": "service",
+              "if": "STORM",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-env/ranger-knox-plugin-enabled",
+          "subsection-name": "section-ranger-plugin-row1-col3",
+          "depends-on": [
+            {
+              "resource": "service",
+              "if": "KNOX",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-env/ranger-kafka-plugin-enabled",
+          "subsection-name": "section-ranger-plugin-row1-col3",
+          "depends-on": [
+            {
+              "resource": "service",
+              "if": "KAFKA",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-env/xasecure.audit.destination.db",
+          "subsection-name": "subsection-ranger-audit-db-row2-col1"
+        },
+        {
+          "config": "admin-properties/audit_db_user",
+          "subsection-name": "subsection-ranger-audit-db-row2-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-env/xasecure.audit.destination.db"
+              ],
+              "if": "${ranger-env/xasecure.audit.destination.db}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "admin-properties/audit_db_name",
+          "subsection-name": "subsection-ranger-audit-db-row2-col2",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-env/xasecure.audit.destination.db"
+              ],
+              "if": "${ranger-env/xasecure.audit.destination.db}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "admin-properties/audit_db_password",
+          "subsection-name": "subsection-ranger-audit-db-row2-col2",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-env/xasecure.audit.destination.db"
+              ],
+              "if": "${ranger-env/xasecure.audit.destination.db}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-env/xasecure.audit.destination.solr",
+          "subsection-name": "subsection-ranger-solr-row1-col1"
+        },
+        {
+          "config": "ranger-env/is_solrCloud_enabled",
+          "subsection-name": "subsection-ranger-solr-row1-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-env/xasecure.audit.destination.solr"
+              ],
+              "if": "${ranger-env/xasecure.audit.destination.solr}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-admin-site/ranger.audit.solr.urls",
+          "subsection-name": "subsection-ranger-solr-row1-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-env/is_solrCloud_enabled",
+                "ranger-env/xasecure.audit.destination.solr"
+              ],
+              "if": "${ranger-env/is_solrCloud_enabled} === false && ${ranger-env/xasecure.audit.destination.solr}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-admin-site/ranger.audit.solr.zookeepers",
+          "subsection-name": "subsection-ranger-solr-row1-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-env/is_solrCloud_enabled",
+                "ranger-env/xasecure.audit.destination.solr"
+              ],
+              "if": "${ranger-env/is_solrCloud_enabled} && ${ranger-env/xasecure.audit.destination.solr}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-admin-site/ranger.audit.solr.username",
+          "subsection-name": "subsection-ranger-solr-row1-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-env/xasecure.audit.destination.solr"
+              ],
+              "if": "${ranger-env/xasecure.audit.destination.solr}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-admin-site/ranger.audit.solr.password",
+          "subsection-name": "subsection-ranger-solr-row1-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-env/xasecure.audit.destination.solr"
+              ],
+              "if": "${ranger-env/xasecure.audit.destination.solr}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-env/xasecure.audit.destination.hdfs",
+          "subsection-name": "subsection-ranger-hdfs-row1-col2"
+        },
+        {
+          "config": "ranger-env/xasecure.audit.destination.hdfs.dir",
+          "subsection-name": "subsection-ranger-hdfs-row1-col2",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-env/xasecure.audit.destination.hdfs"
+              ],
+              "if": "${ranger-env/xasecure.audit.destination.hdfs}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "admin-properties/DB_FLAVOR",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "admin-properties/db_user",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "admin-properties/db_name",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-admin-site/ranger.jpa.jdbc.url",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-admin-site/ranger.jpa.jdbc.driver",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "admin-properties/db_host",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "admin-properties/db_password",
+        "widget": {
+          "type": "password"
+        }
+      },
+      {
+        "config": "ranger-env/test_db_connection",
+        "widget": {
+          "type": "test-db-connection",
+          "display-name": "Test Connection",
+          "required-properties": {
+            "jdbc.driver.class": "ranger-admin-site/ranger.jpa.jdbc.driver",
+            "jdbc.driver.url": "ranger-admin-site/ranger.jpa.jdbc.url",
+            "db.connection.source.host": "ranger-site/ranger_admin_hosts",
+            "db.type": "admin-properties/DB_FLAVOR",
+            "db.connection.destination.host": "admin-properties/db_host",
+            "db.connection.user": "admin-properties/db_user",
+            "db.connection.password": "admin-properties/db_password"
+          }
+        }
+      },
+      {
+        "config": "ranger-env/create_db_dbuser",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-env/ranger_privelege_user_jdbc_url",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "admin-properties/db_root_user",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "admin-properties/db_root_password",
+        "widget": {
+          "type": "password"
+        }
+      },
+      {
+        "config": "ranger-env/test_root_db_connection",
+        "widget": {
+          "type": "test-db-connection",
+          "display-name": "Test Connection",
+          "required-properties": {
+            "jdbc.driver.class": "ranger-admin-site/ranger.jpa.jdbc.driver",
+            "jdbc.driver.url": "ranger-env/ranger_privelege_user_jdbc_url",
+            "db.connection.source.host": "ranger-site/ranger_admin_hosts",
+            "db.type": "admin-properties/DB_FLAVOR",
+            "db.connection.destination.host": "admin-properties/db_host",
+            "db.connection.user": "admin-properties/db_root_user",
+            "db.connection.password": "admin-properties/db_root_password"
+          }
+        }
+      },
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.source.impl.class",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.ldap.url",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-env/bind_anonymous",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.ldap.binddn",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.ldap.ldapbindpassword",
+        "widget": {
+          "type": "password"
+        }
+      },
+
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.group.usermapsyncenabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.ldap.user.nameattribute",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.ldap.user.objectclass",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.ldap.user.searchbase",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.ldap.user.searchfilter",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.ldap.user.searchscope",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.ldap.user.groupnameattribute",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.group.searchenabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.group.memberattributename",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.group.nameattribute",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.group.objectclass",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.group.searchbase",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.group.searchfilter",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.unix.minUserId",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.unix.password.file",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.unix.group.file",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.filesource.file",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.filesource.text.delimiter",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-env/ranger-hdfs-plugin-enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-env/ranger-hive-plugin-enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-env/ranger-hbase-plugin-enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-env/ranger-kafka-plugin-enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-env/ranger-knox-plugin-enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-env/ranger-storm-plugin-enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-env/ranger-yarn-plugin-enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-env/xasecure.audit.destination.db",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "admin-properties/audit_db_user",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "admin-properties/audit_db_name",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "admin-properties/audit_db_password",
+        "widget": {
+          "type": "password"
+        }
+      },
+      {
+        "config": "ranger-env/xasecure.audit.destination.solr",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-env/is_solrCloud_enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-admin-site/ranger.audit.solr.urls",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-admin-site/ranger.audit.solr.zookeepers",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-admin-site/ranger.audit.solr.username",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-admin-site/ranger.audit.solr.password",
+        "widget": {
+          "type": "password"
+        }
+      },
+      {
+        "config": "ranger-env/xasecure.audit.destination.hdfs",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-env/xasecure.audit.destination.hdfs.dir",
+        "widget": {
+          "type": "text-field"
+        }
+      }
+    ]
+  }
+}
+
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/themes/theme_version_3.json b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/themes/theme_version_3.json
new file mode 100644
index 0000000..cbe28a3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/themes/theme_version_3.json
@@ -0,0 +1,692 @@
+{
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "ranger_audit_settings",
+            "display-name": "Ranger Audit",
+            "layout": {
+              "tab-columns": "2",
+              "tab-rows": "2",
+              "sections": [
+                {
+                  "name": "section-ranger-audit-solr",
+                  "display-name": "Audit to Solr",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-ranger-solr-row1-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-ranger-audit-hdfs",
+                  "display-name": "Audit to HDFS",
+                  "row-index": "0",
+                  "column-index": "1",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-ranger-hdfs-row1-col2",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          },
+          {
+            "name": "ranger_tagsync",
+            "display-name": "Ranger Tagsync",
+            "layout": {
+              "tab-columns": "2",
+              "tab-rows": "2",
+              "sections": [
+                {
+                  "name": "section-tagsync-atlas",
+                  "display-name": "Atlas Tag Source",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-ranger-tagsync-row1-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-tagsync-atlasrest",
+                  "display-name": "AtlasRest Tag Source",
+                  "row-index": "0",
+                  "column-index": "1",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-ranger-tagsync-row1-col2",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-tagsync-file",
+                  "display-name": "File Tag Source",
+                  "row-index": "1",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-ranger-tagsync-row2-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "ranger-env/ranger-atlas-plugin-enabled",
+          "subsection-name": "section-ranger-plugin-row1-col2",
+          "depends-on": [
+            {
+              "resource": "service",
+              "if": "ATLAS",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-tagsync-site/ranger.tagsync.source.atlas",
+          "subsection-name": "subsection-ranger-tagsync-row1-col1"
+        },
+        {
+          "config": "tagsync-application-properties/atlas.kafka.bootstrap.servers",
+          "subsection-name": "subsection-ranger-tagsync-row1-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-tagsync-site/ranger.tagsync.source.atlas"
+              ],
+              "if": "${ranger-tagsync-site/ranger.tagsync.source.atlas}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "tagsync-application-properties/atlas.kafka.zookeeper.connect",
+          "subsection-name": "subsection-ranger-tagsync-row1-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-tagsync-site/ranger.tagsync.source.atlas"
+              ],
+              "if": "${ranger-tagsync-site/ranger.tagsync.source.atlas}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "tagsync-application-properties/atlas.kafka.entities.group.id",
+          "subsection-name": "subsection-ranger-tagsync-row1-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-tagsync-site/ranger.tagsync.source.atlas"
+              ],
+              "if": "${ranger-tagsync-site/ranger.tagsync.source.atlas}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-tagsync-site/ranger.tagsync.source.atlasrest",
+          "subsection-name": "subsection-ranger-tagsync-row1-col2"
+        },
+        {
+          "config": "ranger-tagsync-site/ranger.tagsync.source.atlasrest.endpoint",
+          "subsection-name": "subsection-ranger-tagsync-row1-col2",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-tagsync-site/ranger.tagsync.source.atlasrest"
+              ],
+              "if": "${ranger-tagsync-site/ranger.tagsync.source.atlasrest}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-tagsync-site/ranger.tagsync.source.atlasrest.download.interval.millis",
+          "subsection-name": "subsection-ranger-tagsync-row1-col2",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-tagsync-site/ranger.tagsync.source.atlasrest"
+              ],
+              "if": "${ranger-tagsync-site/ranger.tagsync.source.atlasrest}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-tagsync-site/ranger.tagsync.source.file",
+          "subsection-name": "subsection-ranger-tagsync-row2-col1"
+        },
+        {
+          "config": "ranger-tagsync-site/ranger.tagsync.source.file.check.interval.millis",
+          "subsection-name": "subsection-ranger-tagsync-row2-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-tagsync-site/ranger.tagsync.source.file"
+              ],
+              "if": "${ranger-tagsync-site/ranger.tagsync.source.file}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-tagsync-site/ranger.tagsync.source.file.filename",
+          "subsection-name": "subsection-ranger-tagsync-row2-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-tagsync-site/ranger.tagsync.source.file"
+              ],
+              "if": "${ranger-tagsync-site/ranger.tagsync.source.file}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.user.searchenabled",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-user-configs",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-ugsync-site/ranger.usersync.group.search.first.enabled"
+              ],
+              "if": "${ranger-ugsync-site/ranger.usersync.group.search.first.enabled}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.group.search.first.enabled",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-group-configs",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-ugsync-site/ranger.usersync.group.searchenabled"
+              ],
+              "if": "${ranger-ugsync-site/ranger.usersync.group.searchenabled}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-env/xasecure.audit.destination.solr",
+          "subsection-name": "subsection-ranger-solr-row1-col1"
+        },
+        {
+          "config": "ranger-env/is_solrCloud_enabled",
+          "subsection-name": "subsection-ranger-solr-row1-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-env/xasecure.audit.destination.solr"
+              ],
+              "if": "${ranger-env/xasecure.audit.destination.solr}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-env/is_external_solrCloud_enabled",
+          "subsection-name": "subsection-ranger-solr-row1-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-env/xasecure.audit.destination.solr",
+                "ranger-env/is_solrCloud_enabled"
+              ],
+              "if": "${ranger-env/xasecure.audit.destination.solr} && ${ranger-env/is_solrCloud_enabled}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-env/is_external_solrCloud_kerberos",
+          "subsection-name": "subsection-ranger-solr-row1-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-env/xasecure.audit.destination.solr",
+                "ranger-env/is_solrCloud_enabled",
+                "ranger-env/is_external_solrCloud_enabled"
+              ],
+              "if": "${ranger-env/xasecure.audit.destination.solr} && ${ranger-env/is_solrCloud_enabled} && ${ranger-env/is_external_solrCloud_enabled}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-admin-site/ranger.audit.solr.urls",
+          "subsection-name": "subsection-ranger-solr-row1-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-env/is_solrCloud_enabled",
+                "ranger-env/xasecure.audit.destination.solr"
+              ],
+              "if": "${ranger-env/is_solrCloud_enabled} === false && ${ranger-env/xasecure.audit.destination.solr}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-admin-site/ranger.audit.solr.zookeepers",
+          "subsection-name": "subsection-ranger-solr-row1-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-env/is_solrCloud_enabled",
+                "ranger-env/xasecure.audit.destination.solr"
+              ],
+              "if": "${ranger-env/is_solrCloud_enabled} && ${ranger-env/xasecure.audit.destination.solr}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-admin-site/ranger.audit.solr.username",
+          "subsection-name": "subsection-ranger-solr-row1-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-env/xasecure.audit.destination.solr"
+              ],
+              "if": "${ranger-env/xasecure.audit.destination.solr}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-admin-site/ranger.audit.solr.password",
+          "subsection-name": "subsection-ranger-solr-row1-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-env/xasecure.audit.destination.solr"
+              ],
+              "if": "${ranger-env/xasecure.audit.destination.solr}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "ranger-env/xasecure.audit.destination.hdfs",
+          "subsection-name": "subsection-ranger-hdfs-row1-col2"
+        },
+        {
+          "config": "ranger-env/xasecure.audit.destination.hdfs.dir",
+          "subsection-name": "subsection-ranger-hdfs-row1-col2",
+          "depends-on": [
+            {
+              "configs":[
+                "ranger-env/xasecure.audit.destination.hdfs"
+              ],
+              "if": "${ranger-env/xasecure.audit.destination.hdfs}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "ranger-tagsync-site/ranger.tagsync.source.file.check.interval.millis",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-tagsync-site/ranger.tagsync.source.file.filename",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-tagsync-site/ranger.tagsync.source.atlasrest.download.interval.millis",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-tagsync-site/ranger.tagsync.source.atlasrest.endpoint",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "tagsync-application-properties/atlas.kafka.entities.group.id",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "tagsync-application-properties/atlas.kafka.bootstrap.servers",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "tagsync-application-properties/atlas.kafka.zookeeper.connect",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-tagsync-site/ranger.tagsync.source.atlas",
+        "widget": {
+          "type": "checkbox"
+        }
+      },
+      {
+        "config": "ranger-tagsync-site/ranger.tagsync.source.atlasrest",
+        "widget": {
+          "type": "checkbox"
+        }
+      },
+      {
+        "config": "ranger-tagsync-site/ranger.tagsync.source.file",
+        "widget": {
+          "type": "checkbox"
+        }
+      },
+      {
+        "config": "ranger-env/ranger-atlas-plugin-enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.user.searchenabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.group.search.first.enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-env/xasecure.audit.destination.solr",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-env/is_solrCloud_enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-env/is_external_solrCloud_enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-env/is_external_solrCloud_kerberos",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-admin-site/ranger.audit.solr.urls",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-admin-site/ranger.audit.solr.zookeepers",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-admin-site/ranger.audit.solr.username",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "ranger-admin-site/ranger.audit.solr.password",
+        "widget": {
+          "type": "password"
+        }
+      },
+      {
+        "config": "ranger-env/xasecure.audit.destination.hdfs",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-env/xasecure.audit.destination.hdfs.dir",
+        "widget": {
+          "type": "text-field"
+        }
+      }
+    ]
+  }
+}
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/themes/theme_version_5.json b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/themes/theme_version_5.json
new file mode 100644
index 0000000..8068a38
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0.3.0/themes/theme_version_5.json
@@ -0,0 +1,48 @@
+{
+  "configuration": {
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "ranger-ugsync-site/ranger.usersync.ldap.deltasync",
+          "subsection-name": "subsection-ranger-user-row2-col1",
+          "subsection-tab-name": "ldap-common-configs"
+        },
+        {
+          "config": "ranger-env/ranger-nifi-plugin-enabled",
+          "subsection-name": "section-ranger-plugin-row1-col1",
+          "depends-on": [
+            {
+              "resource": "service",
+              "if": "NIFI",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "ranger-ugsync-site/ranger.usersync.ldap.deltasync",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "ranger-env/ranger-nifi-plugin-enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0/configuration/ranger-env.xml b/ambari-server/src/main/resources/common-services/RANGER/0.7.0/configuration/ranger-env.xml
index 661089a..627216e 100644
--- a/ambari-server/src/main/resources/common-services/RANGER/0.7.0/configuration/ranger-env.xml
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0/configuration/ranger-env.xml
@@ -25,4 +25,26 @@
     <deleted>true</deleted>
     <on-ambari-upgrade add="false"/>
   </property>
+  <property>
+    <name>ranger-nifi-plugin-enabled</name>
+    <value>No</value>
+    <display-name>NIFI Ranger Plugin</display-name>
+    <description>Enable NIFI Ranger plugin</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>Yes</value>
+          <label>ON</label>
+        </entry>
+        <entry>
+          <value>No</value>
+          <label>OFF</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0/properties/ranger-solrconfig.xml.j2 b/ambari-server/src/main/resources/common-services/RANGER/0.7.0/properties/ranger-solrconfig.xml.j2
index 34ce70d..25dbb7a 100644
--- a/ambari-server/src/main/resources/common-services/RANGER/0.7.0/properties/ranger-solrconfig.xml.j2
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0/properties/ranger-solrconfig.xml.j2
@@ -35,7 +35,7 @@
        that you fully re-index after changing this setting as it can
        affect both how text is indexed and queried.
   -->
-  <luceneMatchVersion>5.0.0</luceneMatchVersion>
+  <luceneMatchVersion>5.2.0</luceneMatchVersion>
 
   <!-- <lib/> directives can be used to instruct Solr to load any Jars
        identified and use them to resolve any "plugins" specified in
@@ -1055,13 +1055,6 @@
                   class="solr.DocumentAnalysisRequestHandler"
                   startup="lazy" />
 
-  <!-- Admin Handlers
-
-       Admin Handlers - This will register all the standard admin
-       RequestHandlers.
-    -->
-  <requestHandler name="/admin/"
-                  class="solr.admin.AdminHandlers" />
   <!-- This single handler is equivalent to the following... -->
   <!--
      <requestHandler name="/admin/luke"       class="solr.admin.LukeRequestHandler" />
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.7.0/themes/theme_version_5.json b/ambari-server/src/main/resources/common-services/RANGER/0.7.0/themes/theme_version_5.json
index 87fccbe..8068a38 100644
--- a/ambari-server/src/main/resources/common-services/RANGER/0.7.0/themes/theme_version_5.json
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.7.0/themes/theme_version_5.json
@@ -7,6 +7,26 @@
           "config": "ranger-ugsync-site/ranger.usersync.ldap.deltasync",
           "subsection-name": "subsection-ranger-user-row2-col1",
           "subsection-tab-name": "ldap-common-configs"
+        },
+        {
+          "config": "ranger-env/ranger-nifi-plugin-enabled",
+          "subsection-name": "section-ranger-plugin-row1-col1",
+          "depends-on": [
+            {
+              "resource": "service",
+              "if": "NIFI",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
         }
       ]
     },
@@ -16,6 +36,12 @@
         "widget": {
           "type": "toggle"
         }
+      },
+      {
+        "config": "ranger-env/ranger-nifi-plugin-enabled",
+        "widget": {
+          "type": "toggle"
+        }
       }
     ]
   }
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/configuration/ranger-kms-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/configuration/ranger-kms-logsearch-conf.xml
deleted file mode 100644
index 226d2b0..0000000
--- a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/configuration/ranger-kms-logsearch-conf.xml
+++ /dev/null
@@ -1,80 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>Ranger KMS</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>RANGER_KMS:ranger_kms</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder Config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-  "input":[
-    {
-      "type":"ranger_kms",
-      "rowtype":"service",
-      "path":"{{default('/configurations/kms-env/kms_log_dir', '/var/log/ranger/kms')}}/kms.log"
-    }
-   ],
-  "filter":[
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "ranger_kms"
-          ]
-         }
-       },
-      "log4j_format":"%d{ISO8601} %-5p %c{1} - %m%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       }
-     }
-   ]
- }
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/metainfo.xml b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/metainfo.xml
index b20201c..bc0aa74 100644
--- a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/metainfo.xml
@@ -45,6 +45,15 @@
               <primary>true</primary>
             </log>
           </logs>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
         </component>
 
       </components>
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py
index 423cdec..5a25b92 100755
--- a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py
@@ -506,6 +506,26 @@
       mode = 0640
       )
 
+    # create ranger kms audit directory
+    if params.xa_audit_hdfs_is_enabled and params.has_namenode and params.has_hdfs_client_on_node:
+      params.HdfsResource("/ranger/audit",
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.hdfs_user,
+                        group=params.hdfs_user,
+                        mode=0755,
+                        recursive_chmod=True
+      )
+      params.HdfsResource("/ranger/audit/kms",
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.kms_user,
+                        group=params.kms_group,
+                        mode=0750,
+                        recursive_chmod=True
+      )
+      params.HdfsResource(None, action="execute")
+
     if params.xa_audit_hdfs_is_enabled and len(params.namenode_host) > 1:
       Logger.info('Audit to Hdfs enabled in NameNode HA environment, creating hdfs-site.xml')
       XmlConfig("hdfs-site.xml",
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/params.py b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/params.py
index 9fe0a61..2445f2e 100755
--- a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/params.py
@@ -29,6 +29,9 @@
 from resource_management.libraries.functions.get_bare_principal import get_bare_principal
 from resource_management.libraries.functions.is_empty import is_empty
 from resource_management.libraries.functions.setup_ranger_plugin_xml import generate_ranger_service_config
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import get_kinit_path
 
 config  = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
@@ -296,4 +299,33 @@
 namenode_host = default("/clusterHostInfo/namenode_host", [])
 
 # need this to capture cluster name from where ranger kms plugin is enabled
-cluster_name = config['clusterName']
\ No newline at end of file
+cluster_name = config['clusterName']
+
+has_namenode = len(namenode_host) > 0
+
+hdfs_user = default("/configurations/hadoop-env/hdfs_user", None)
+hdfs_user_keytab = default("/configurations/hadoop-env/hdfs_user_keytab", None)
+hdfs_principal_name = default("/configurations/hadoop-env/hdfs_principal_name", None)
+default_fs = default("/configurations/core-site/fs.defaultFS", None)
+hdfs_site = config['configurations']['hdfs-site'] if has_namenode else None
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin") if has_namenode else None
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+
+import functools
+# create partial functions with common arguments for every HdfsResource call
+# to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
+)
+
+local_component_list = default("/localComponents", [])
+has_hdfs_client_on_node = 'HDFS_CLIENT' in local_component_list
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/templates/input.config-ranger-kms.json.j2 b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/templates/input.config-ranger-kms.json.j2
new file mode 100644
index 0000000..306fade
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/templates/input.config-ranger-kms.json.j2
@@ -0,0 +1,48 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"ranger_kms",
+      "rowtype":"service",
+      "path":"{{default('/configurations/kms-env/kms_log_dir', '/var/log/ranger/kms')}}/kms.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "ranger_kms"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} %-5p %c{1} - %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/role_command_order.json b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/role_command_order.json
index 006d177..7ddab41 100644
--- a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/role_command_order.json
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/role_command_order.json
@@ -1,7 +1,7 @@
 {
   "general_deps" : {
     "_comment" : "dependencies for RANGER-KMS",
-    "RANGER_KMS_SERVER-START" : ["RANGER_ADMIN-START"],
+    "RANGER_KMS_SERVER-START" : ["RANGER_ADMIN-START", "NAMENODE-START"],
     "RANGER_KMS_SERVICE_CHECK-SERVICE_CHECK" : ["RANGER_KMS_SERVER-START"]
   }
 }
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/alerts.json b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/alerts.json
new file mode 100644
index 0000000..05c3fe6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/alerts.json
@@ -0,0 +1,32 @@
+{
+  "RANGER_KMS": {
+    "service": [],
+    "RANGER_KMS_SERVER": [
+      {
+        "name": "ranger_kms_server_process",
+        "label": "Ranger KMS Server Process",
+        "description": "This host-level alert is triggered if the Ranger KMS Server cannot be determined to be up.",
+        "interval": 1,
+        "scope": "HOST",
+        "source": {
+          "type": "PORT",
+          "uri": "{{kms-env/kms_port}}",
+          "default_port": 9292,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/dbks-site.xml b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/dbks-site.xml
new file mode 100644
index 0000000..4ac20b3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/dbks-site.xml
@@ -0,0 +1,206 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hadoop.kms.blacklist.DECRYPT_EEK</name>
+    <value>hdfs</value>
+    <description>Blacklist for decrypt EncryptedKey CryptoExtension operations</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.db.encrypt.key.password</name>
+    <value>_</value>
+    <property-type>PASSWORD</property-type>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <description>Password used for encrypting Master Key</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ks.jpa.jdbc.url</name>
+    <display-name>JDBC connect string</display-name>
+    <value>jdbc:mysql://localhost</value>
+    <description>URL for Database</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>kms-properties</type>
+        <name>DB_FLAVOR</name>
+      </property>
+      <property>
+        <type>kms-properties</type>
+        <name>db_host</name>
+      </property>
+      <property>
+        <type>kms-properties</type>
+        <name>db_name</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ks.jpa.jdbc.user</name>
+    <value>{{db_user}}</value>
+    <description>Database username used for operation</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ks.jpa.jdbc.password</name>
+    <value>_</value>
+    <property-type>PASSWORD</property-type>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <description>Database user's password</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ks.jpa.jdbc.credential.provider.path</name>
+    <value>/etc/ranger/kms/rangerkms.jceks</value>
+    <description>Credential provider path</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ks.jpa.jdbc.credential.alias</name>
+    <value>ranger.ks.jdbc.password</value>
+    <description>Credential alias used for password</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ks.masterkey.credential.alias</name>
+    <value>ranger.ks.masterkey.password</value>
+    <description>Credential alias used for masterkey</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ks.jpa.jdbc.dialect</name>
+    <value>{{jdbc_dialect}}</value>
+    <description>Dialect used for database</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ks.jpa.jdbc.driver</name>
+    <display-name>Driver class name for a JDBC Ranger KMS database</display-name>
+    <value>com.mysql.jdbc.Driver</value>
+    <description>Driver used for database</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>kms-properties</type>
+        <name>DB_FLAVOR</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ks.jdbc.sqlconnectorjar</name>
+    <value>{{ews_lib_jar_path}}</value>
+    <description>Driver used for database</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ks.hsm.type</name>
+    <display-name>HSM Type</display-name>
+    <value>LunaProvider</value>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>LunaProvider</value>
+          <label>Luna Provider</label>
+        </entry>
+      </entries>
+    </value-attributes>
+    <description>HSM type</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ks.hsm.enabled</name>
+    <display-name>HSM Enabled</display-name>
+    <value>false</value>
+    <description>Enable HSM ?</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+      <type>value-list</type>
+      <overridable>false</overridable>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Yes</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>No</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ks.hsm.partition.name</name>
+    <display-name>HSM partition name. In case of HSM HA enter the group name</display-name>
+    <value>par19</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ks.hsm.partition.password</name>
+    <value>_</value>
+    <property-type>PASSWORD</property-type>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <description>HSM partition password</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ks.hsm.partition.password.alias</name>
+    <display-name>HSM partition password alias</display-name>
+    <value>ranger.kms.hsm.partition.password</value>
+    <description>HSM partition password alias</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ks.kerberos.principal</name>
+    <value/>
+    <description/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.ks.kerberos.keytab</name>
+    <value/>
+    <description/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/kms-env.xml b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/kms-env.xml
new file mode 100644
index 0000000..e049840
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/kms-env.xml
@@ -0,0 +1,116 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>kms_user</name>
+    <display-name>Kms User</display-name>
+    <value>kms</value>
+    <property-type>USER</property-type>
+    <description>Kms username</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>kms_group</name>
+    <display-name>Kms group</display-name>
+    <value>kms</value>
+    <property-type>GROUP</property-type>
+    <description>Kms group</description>
+    <value-attributes>
+      <type>user</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>kms_log_dir</name>
+    <value>/var/log/ranger/kms</value>
+    <description/>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>kms_port</name>
+    <value>9292</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>ranger-kms-site</type>
+        <name>ranger.service.https.port</name>
+      </property>
+      <property>
+        <type>ranger-kms-site</type>
+        <name>ranger.service.https.attrib.ssl.enabled</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>create_db_user</name>
+    <display-name>Setup Database and Database User</display-name>
+    <value>true</value>
+    <description>If set to Yes, Ambari will create and setup Ranger Database and Database User. This will require to specify Database Admin user and password</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Yes</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>No</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hsm_partition_password</name>
+    <display-name>HSM partition password</display-name>
+    <value/>
+    <property-type>PASSWORD</property-type>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <description>HSM partition password</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger_kms_pid_dir</name>
+    <value>/var/run/ranger_kms</value>
+    <description/>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/kms-log4j.xml b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/kms-log4j.xml
new file mode 100644
index 0000000..18dc46b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/kms-log4j.xml
@@ -0,0 +1,120 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="false">
+   <property>
+    <name>ranger_kms_log_maxfilesize</name>
+    <value>256</value>
+    <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>Ranger-kms Log: backup file size</display-name>
+    <value-attributes>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+   </property>
+   <property>
+    <name>ranger_kms_log_maxbackupindex</name>
+    <value>20</value>
+    <description>The number of backup files</description>
+    <display-name>Ranger-kms Log: # of backup files</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger_kms_audit_log_maxfilesize</name>
+    <value>256</value>
+    <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>Ranger-kms Audit Log: backup file size</display-name>
+    <value-attributes>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+   </property>
+   <property>
+    <name>ranger_kms_audit_log_maxbackupindex</name>
+    <value>20</value>
+    <description>The number of backup files</description>
+    <display-name>Ranger-kms Audit Log: # of backup files</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>kms-log4j template</display-name>
+    <description>kms-log4j.properties</description>
+    <value>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# If the Java System property 'kms.log.dir' is not defined at KMS start up time
+# Setup sets its value to '${kms.home}/logs'
+
+log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.kms.DatePattern='.'yyyy-MM-dd
+log4j.appender.kms.File=${kms.log.dir}/kms.log
+log4j.appender.kms.Append=true
+log4j.appender.kms.layout=org.apache.log4j.PatternLayout
+log4j.appender.kms.layout.ConversionPattern=%d{ISO8601} %-5p %c{1} - %m%n
+log4j.appender.kms.MaxFileSize = {{ranger_kms_log_maxfilesize}}MB
+log4j.appender.kms.MaxBackupIndex = {{ranger_kms_log_maxbackupindex}}
+
+log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.kms-audit.DatePattern='.'yyyy-MM-dd
+log4j.appender.kms-audit.File=${kms.log.dir}/kms-audit.log
+log4j.appender.kms-audit.Append=true
+log4j.appender.kms-audit.layout=org.apache.log4j.PatternLayout
+log4j.appender.kms-audit.layout.ConversionPattern=%d{ISO8601} %m%n
+log4j.appender.kms-audit.MaxFileSize = {{ranger_kms_audit_log_maxfilesize}}MB
+log4j.appender.kms-audit.MaxBackupIndex = {{ranger_kms_audit_log_maxbackupindex}}
+
+log4j.logger.kms-audit=INFO, kms-audit
+log4j.additivity.kms-audit=false
+
+log4j.logger=INFO, kms
+log4j.additivity.kms=false
+log4j.rootLogger=INFO, kms
+log4j.logger.org.apache.hadoop.conf=ERROR
+log4j.logger.org.apache.hadoop=INFO
+log4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF        
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/kms-properties.xml b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/kms-properties.xml
new file mode 100644
index 0000000..d2d4da5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/kms-properties.xml
@@ -0,0 +1,166 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>REPOSITORY_CONFIG_USERNAME</name>
+    <display-name>Repository config username</display-name>
+    <value>keyadmin</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_PASSWORD</name>
+    <display-name>Repository config password</display-name>
+    <value>keyadmin</value>
+    <property-type>PASSWORD</property-type>
+    <description/>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>DB_FLAVOR</name>
+    <display-name>DB FLAVOR</display-name>
+    <value>MYSQL</value>
+    <description>The database type to be used</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>MYSQL</value>
+          <label>MYSQL</label>
+        </entry>
+        <entry>
+          <value>ORACLE</value>
+          <label>ORACLE</label>
+        </entry>
+        <entry>
+          <value>POSTGRES</value>
+          <label>POSTGRES</label>
+        </entry>
+        <entry>
+          <value>MSSQL</value>
+          <label>MSSQL</label>
+        </entry>
+        <entry>
+          <value>SQLA</value>
+          <label>SQL Anywhere</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>SQL_CONNECTOR_JAR</name>
+    <display-name>SQL connector jar</display-name>
+    <value>{{driver_curl_target}}</value>
+    <description>Location of DB client library (please check the location of the jar file)</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>kms-properties</type>
+        <name>DB_FLAVOR</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false" update="false"/>
+  </property>
+  <property>
+    <name>db_root_user</name>
+    <display-name>Database Administrator (DBA) username</display-name>
+    <value>root</value>
+    <description>Database admin user. This user should have DBA permission to create the Ranger Database and Ranger Database User</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>db_root_password</name>
+    <display-name>Database Administrator (DBA) password</display-name>
+    <value/>
+    <property-type>PASSWORD</property-type>
+    <description>Database password for the database admin username</description>
+    <value-attributes>
+      <type>password</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>db_host</name>
+    <display-name>Ranger KMS DB host</display-name>
+    <value/>
+    <description>Database host</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>db_name</name>
+    <display-name>Ranger KMS DB name</display-name>
+    <value>rangerkms</value>
+    <description>Database name</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>db_user</name>
+    <display-name>Ranger KMS DB username</display-name>
+    <value>rangerkms</value>
+    <description>Database username used for the Ranger KMS schema</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>db_password</name>
+    <display-name>Ranger KMS DB password</display-name>
+    <value/>
+    <property-type>PASSWORD</property-type>
+    <description>Database password for the Ranger KMS schema</description>
+    <value-attributes>
+      <type>password</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>KMS_MASTER_KEY_PASSWD</name>
+    <display-name>KMS master key password</display-name>
+    <value/>
+    <property-type>PASSWORD</property-type>
+    <description/>
+    <value-attributes>
+      <type>password</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/kms-site.xml b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/kms-site.xml
new file mode 100644
index 0000000..1e6f7b5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/kms-site.xml
@@ -0,0 +1,133 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hadoop.kms.key.provider.uri</name>
+    <value>dbks://http@localhost:9292/kms</value>
+    <description>URI of the backing KeyProvider for the KMS.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hadoop.security.keystore.JavaKeyStoreProvider.password</name>
+    <value>none</value>
+    <description>If using the JavaKeyStoreProvider, the password for the keystore file.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hadoop.kms.cache.enable</name>
+    <value>true</value>
+    <description>Whether the KMS will act as a cache for the backing KeyProvider. When the cache is enabled, operations like getKeyVersion, getMetadata, and getCurrentKey will sometimes return cached data without consulting the backing KeyProvider. Cached values are flushed when keys are deleted or modified.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hadoop.kms.cache.timeout.ms</name>
+    <value>600000</value>
+    <description>Expiry time for the KMS key version and key metadata cache, in milliseconds. This affects getKeyVersion and getMetadata.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hadoop.kms.current.key.cache.timeout.ms</name>
+    <value>30000</value>
+    <description>Expiry time for the KMS current key cache, in milliseconds. This affects getCurrentKey operations.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hadoop.kms.audit.aggregation.window.ms</name>
+    <value>10000</value>
+    <description>Duplicate audit log events within the aggregation window (specified in ms) are quashed to reduce log traffic. A single message for aggregated events is printed at the end of the window, along with a count of the number of aggregated events.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hadoop.kms.authentication.type</name>
+    <value>simple</value>
+    <description>Authentication type for the KMS. Can be either "simple" or "kerberos".
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hadoop.kms.authentication.kerberos.keytab</name>
+    <value>${user.home}/kms.keytab</value>
+    <description>Path to the keytab with credentials for the configured Kerberos principal.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hadoop.kms.authentication.kerberos.principal</name>
+    <value>HTTP/localhost</value>
+    <description>The Kerberos principal to use for the HTTP endpoint. The principal must start with 'HTTP/' as per the Kerberos HTTP SPNEGO specification.</description>
+    <property-type>KERBEROS_PRINCIPAL</property-type>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hadoop.kms.authentication.kerberos.name.rules</name>
+    <value>DEFAULT</value>
+    <description>Rules used to resolve Kerberos principal names.</description>
+    <value-attributes>
+      <type>multiLine</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider</name>
+    <value>random</value>
+    <description>Indicates how the secret to sign the authentication cookies will be stored. Options are 'random' (default), 'string' and 'zookeeper'. If using a setup with multiple KMS instances, 'zookeeper' should be used.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.path</name>
+    <value>/hadoop-kms/hadoop-auth-signature-secret</value>
+    <description>The Zookeeper ZNode path where the KMS instances will store and retrieve the secret from.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string</name>
+    <value>#HOSTNAME#:#PORT#,...</value>
+    <description>The Zookeeper connection string, a list of hostnames and port comma separated.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.auth.type</name>
+    <value>kerberos</value>
+    <description>The Zookeeper authentication type, 'none' or 'sasl' (Kerberos).</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab</name>
+    <value>/etc/hadoop/conf/kms.keytab</value>
+    <description>The absolute path for the Kerberos keytab with the credentials to connect to Zookeeper.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal</name>
+    <value>kms/#HOSTNAME#</value>
+    <description>The Kerberos service principal used to connect to Zookeeper.</description>
+    <property-type>KERBEROS_PRINCIPAL</property-type>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hadoop.kms.security.authorization.manager</name>
+    <value>org.apache.ranger.authorization.kms.authorizer.RangerKmsAuthorizer</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/ranger-kms-audit.xml b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/ranger-kms-audit.xml
new file mode 100644
index 0000000..526794e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/ranger-kms-audit.xml
@@ -0,0 +1,124 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.audit.is.enabled</name>
+    <value>true</value>
+    <description>Is Audit enabled?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs</name>
+    <value>true</value>
+    <display-name>Audit to HDFS</display-name>
+    <description>Is Audit to HDFS enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>core-site</type>
+        <name>fs.defaultFS</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.dir</name>
+    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
+    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
+    <depends-on>
+      <property>
+        <type>core-site</type>
+        <name>fs.defaultFS</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
+    <value>/var/log/ranger/kms/audit/hdfs/spool</value>
+    <description>/var/log/ranger/kms/audit/hdfs/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr</name>
+    <value>true</value>
+    <display-name>Audit to SOLR</display-name>
+    <description>Is Solr audit enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
+    <value>/var/log/ranger/kms/audit/solr/spool</value>
+    <description>/var/log/ranger/kms/audit/solr/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.provider.summary.enabled</name>
+    <value>false</value>
+    <display-name>Audit provider summary enabled</display-name>
+    <description>Enable Summary audit?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.urls</name>
+    <value>{{ranger_audit_solr_urls}}</value>
+    <description>Solr URL</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.urls</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.zookeepers</name>
+    <value>none</value>
+    <description>Solr Zookeeper string</description>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.zookeepers</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.kms.ambari.cluster.name</name>
+    <value>{{cluster_name}}</value>
+    <description>Capture cluster name from where Ranger kms plugin is enabled.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/ranger-kms-policymgr-ssl.xml b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/ranger-kms-policymgr-ssl.xml
new file mode 100644
index 0000000..9eedc73
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/ranger-kms-policymgr-ssl.xml
@@ -0,0 +1,68 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.password</name>
+    <value>myKeyFilePassword</value>
+    <property-type>PASSWORD</property-type>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <description>password for keystore</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.password</name>
+    <value>changeit</value>
+    <property-type>PASSWORD</property-type>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <description>java truststore password</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java keystore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java truststore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore</name>
+    <value>/usr/hdp/current/ranger-kms/conf/ranger-plugin-keystore.jks</value>
+    <description>Java Keystore files</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore</name>
+    <value>/usr/hdp/current/ranger-kms/conf/ranger-plugin-truststore.jks</value>
+    <description>java truststore file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/ranger-kms-security.xml b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/ranger-kms-security.xml
new file mode 100644
index 0000000..13adcb4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/ranger-kms-security.xml
@@ -0,0 +1,64 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>ranger.plugin.kms.service.name</name>
+    <value>{{repo_name}}</value>
+    <description>Name of the Ranger service containing policies for this kms instance</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.kms.policy.source.impl</name>
+    <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
+    <description>Class to retrieve policies from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.kms.policy.rest.url</name>
+    <value>{{policymgr_mgr_url}}</value>
+    <description>URL to Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>admin-properties</type>
+        <name>policymgr_external_url</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>ranger.plugin.kms.policy.rest.ssl.config.file</name>
+    <value>/etc/ranger/kms/conf/ranger-policymgr-ssl.xml</value>
+    <description>Path to the file containing SSL details to contact Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.kms.policy.pollIntervalMs</name>
+    <value>30000</value>
+    <description>How often to poll for changes in policies?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.kms.policy.cache.dir</name>
+    <value>/etc/ranger/{{repo_name}}/policycache</value>
+    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/ranger-kms-site.xml b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/ranger-kms-site.xml
new file mode 100644
index 0000000..1d32f72
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/configuration/ranger-kms-site.xml
@@ -0,0 +1,104 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>ranger.service.host</name>
+    <value>{{kms_host}}</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.service.http.port</name>
+    <value>{{kms_port}}</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.service.https.port</name>
+    <value>9393</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.service.shutdown.port</name>
+    <value>7085</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.contextName</name>
+    <value>/kms</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xa.webapp.dir</name>
+    <value>./webapp</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.service.https.attrib.ssl.enabled</name>
+    <value>false</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.service.https.attrib.keystore.file</name>
+    <value>/etc/security/serverKeys/ranger-kms-keystore.jks</value>
+    <on-ambari-upgrade add="false"/>
+    <description/>
+  </property>
+  <property>
+    <name>ranger.service.https.attrib.client.auth</name>
+    <value>want</value>
+    <on-ambari-upgrade add="false"/>
+    <description/>
+  </property>
+  <property>
+    <name>ranger.service.https.attrib.keystore.keyalias</name>
+    <value>rangerkms</value>
+    <on-ambari-upgrade add="false"/>
+    <description/>
+  </property>
+  <property>
+    <name>ranger.service.https.attrib.keystore.pass</name>
+    <value>rangerkms</value>
+    <property-type>PASSWORD</property-type>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+    <description/>
+  </property>
+  <property>
+    <name>ranger.credential.provider.path</name>
+    <value>/etc/ranger/kms/rangerkms.jceks</value>
+    <on-ambari-upgrade add="false"/>
+    <description/>
+  </property>
+  <property>
+    <name>ranger.service.https.attrib.keystore.credential.alias</name>
+    <value>keyStoreCredentialAlias</value>
+    <on-ambari-upgrade add="false"/>
+    <description/>
+  </property>
+  <property>
+    <name>ajp.enabled</name>
+    <value>false</value>
+    <on-ambari-upgrade add="false"/>
+    <description/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/kerberos.json b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/kerberos.json
new file mode 100644
index 0000000..a54783e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/kerberos.json
@@ -0,0 +1,84 @@
+{
+  "services": [
+    {
+      "name": "RANGER_KMS",
+      "identities": [
+        {
+          "name": "/spnego",
+          "keytab": {
+            "configuration": "kms-site/hadoop.kms.authentication.kerberos.keytab"
+          }
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "auth_to_local_properties" : [
+        "kms-site/hadoop.kms.authentication.kerberos.name.rules"
+      ],
+      "configurations": [
+        {
+          "kms-site": {
+            "hadoop.kms.authentication.type": "kerberos",
+            "hadoop.kms.authentication.kerberos.principal": "*"
+          }
+        },
+        {
+          "ranger-kms-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "RANGER_KMS_SERVER",
+          "identities": [
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "kms-site/hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "kms-site/hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab"
+              }
+            },
+            {
+              "name": "/smokeuser"
+            },
+            {
+              "name": "rangerkms",
+              "principal": {
+                "value": "rangerkms/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "dbks-site/ranger.ks.kerberos.principal",
+                "local_username" : "keyadmin"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/rangerkms.service.keytab",
+                "owner": {
+                  "name": "${kms-env/kms_user}",
+                  "access": "r"
+                },
+                "configuration": "dbks-site/ranger.ks.kerberos.keytab"
+              }
+            },
+            {
+              "name": "/RANGER_KMS/RANGER_KMS_SERVER/rangerkms",
+              "principal": {
+                "configuration": "ranger-kms-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-kms-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/metainfo.xml
new file mode 100644
index 0000000..24ac51f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/metainfo.xml
@@ -0,0 +1,115 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>RANGER_KMS</name>
+      <displayName>Ranger KMS</displayName>
+      <comment>Key Management Server</comment>
+      <version>0.5.0.3.0</version>
+      <components>
+          
+        <component>
+          <name>RANGER_KMS_SERVER</name>
+          <displayName>Ranger KMS Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/kms_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>ranger_kms</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+        </component>
+      </components>
+
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat7,amazon2015,redhat6,suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>ranger_${stack_version}-kms</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+          <packages>
+            <package>
+              <name>ranger-${stack_version}-kms</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <configuration-dependencies>
+        <config-type>kms-properties</config-type>
+        <config-type>kms-site</config-type>
+        <config-type>kms-log4j</config-type>
+        <config-type>dbks-site</config-type>
+        <config-type>ranger-kms-site</config-type>
+        <config-type>ranger-kms-audit</config-type>
+        <config-type>ranger-kms-policymgr-ssl</config-type>
+        <config-type>ranger-kms-security</config-type>
+      </configuration-dependencies>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>RANGER</service>
+        <service>HDFS</service>
+      </requiredServices>
+
+      <themes>
+        <theme>
+          <fileName>theme_version_1.json</fileName>
+          <default>true</default>
+        </theme>
+        <theme>
+          <fileName>theme_version_2.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+      
+    </service>
+  </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/package/scripts/kms.py b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/package/scripts/kms.py
new file mode 100755
index 0000000..5a25b92
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/package/scripts/kms.py
@@ -0,0 +1,677 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+import fileinput
+import os
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+import urllib2, base64, httplib
+from StringIO import StringIO as BytesIO
+from datetime import datetime
+from resource_management.core.resources.system import File, Directory, Execute
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.resources.modify_properties_file import ModifyPropertiesFile
+from resource_management.core.source import DownloadSource, InlineTemplate
+from resource_management.core.exceptions import Fail
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.ranger_functions import Rangeradmin
+from resource_management.libraries.functions.ranger_functions_v2 import RangeradminV2
+from resource_management.libraries.functions.decorator import safe_retry
+from resource_management.core.utils import PasswordString
+from resource_management.core.shell import as_sudo
+import re
+import time
+import socket
+
+def password_validation(password, key):
+  import params
+  if password.strip() == "":
+    raise Fail("Blank password is not allowed for {0} property. Please enter valid password.".format(key))
+  if re.search("[\\\`'\"]",password):
+    raise Fail("{0} password contains one of the unsupported special characters like \" ' \ `".format(key))
+  else:
+    Logger.info("Password validated")
+
+def setup_kms_db(stack_version=None):
+  import params
+
+  if params.has_ranger_admin:
+
+    kms_home = params.kms_home
+    version = params.version
+    if stack_version is not None:
+      kms_home = format("{stack_root}/{stack_version}/ranger-kms")
+      version = stack_version
+
+    password_validation(params.kms_master_key_password, 'KMS master key')
+
+    copy_jdbc_connector(stack_version=version)
+
+    env_dict = {'RANGER_KMS_HOME':kms_home, 'JAVA_HOME': params.java_home}
+    if params.db_flavor.lower() == 'sqla':
+      env_dict = {'RANGER_KMS_HOME':kms_home, 'JAVA_HOME': params.java_home, 'LD_LIBRARY_PATH':params.ld_library_path}
+
+    dba_setup = format('ambari-python-wrap {kms_home}/dba_script.py -q')
+    db_setup = format('ambari-python-wrap {kms_home}/db_setup.py')
+
+    if params.create_db_user:
+      Logger.info('Setting up Ranger KMS DB and DB User')
+      Execute(dba_setup, environment=env_dict, logoutput=True, user=params.kms_user, tries=5, try_sleep=10)
+    else:
+      Logger.info('Separate DBA property not set. Assuming Ranger KMS DB and DB User exists!')
+    Execute(db_setup, environment=env_dict, logoutput=True, user=params.kms_user, tries=5, try_sleep=10)
+
+def setup_java_patch():
+  import params
+
+  if params.has_ranger_admin:
+
+    kms_home = params.kms_home
+    setup_java_patch = format('ambari-python-wrap {kms_home}/db_setup.py -javapatch')
+
+    env_dict = {'RANGER_KMS_HOME':kms_home, 'JAVA_HOME': params.java_home}
+    if params.db_flavor.lower() == 'sqla':
+      env_dict = {'RANGER_KMS_HOME':kms_home, 'JAVA_HOME': params.java_home, 'LD_LIBRARY_PATH':params.ld_library_path}
+
+    Execute(setup_java_patch, environment=env_dict, logoutput=True, user=params.kms_user, tries=5, try_sleep=10)
+
+    kms_lib_path = format('{kms_home}/ews/webapp/lib/')
+    files = os.listdir(kms_lib_path)
+    hadoop_jar_files = []
+
+    for x in files:
+      if x.startswith('hadoop-common') and x.endswith('.jar'):
+        hadoop_jar_files.append(x)
+
+    if len(hadoop_jar_files) != 0:
+      for f in hadoop_jar_files:
+        Execute((format('{java_home}/bin/jar'),'-uf', format('{kms_home}/ews/webapp/lib/{f}'), format('{kms_home}/ews/webapp/META-INF/services/org.apache.hadoop.crypto.key.KeyProviderFactory')),
+          user=params.kms_user)
+
+        File(format('{kms_home}/ews/webapp/lib/{f}'), owner=params.kms_user, group=params.kms_group)
+
+
+def do_keystore_setup(cred_provider_path, credential_alias, credential_password): 
+  import params
+
+  if cred_provider_path is not None:
+    java_bin = format('{java_home}/bin/java')
+    file_path = format('jceks://file{cred_provider_path}')
+    cmd = (java_bin, '-cp', params.cred_lib_path, 'org.apache.ranger.credentialapi.buildks', 'create', credential_alias, '-value', PasswordString(credential_password), '-provider', file_path)
+    Execute(cmd,
+            environment={'JAVA_HOME': params.java_home}, 
+            logoutput=True, 
+            sudo=True,
+    )
+
+    File(cred_provider_path,
+      owner = params.kms_user,
+      group = params.kms_group,
+      mode = 0640
+    )
+
+def kms(upgrade_type=None):
+  import params
+
+  if params.has_ranger_admin:
+
+    Directory(params.kms_conf_dir,
+      owner = params.kms_user,
+      group = params.kms_group,
+      create_parents = True
+    )
+
+    Directory("/etc/security/serverKeys",
+      create_parents = True,
+      cd_access = "a"
+    )
+
+    Directory("/etc/ranger/kms",
+      create_parents = True,
+      cd_access = "a"
+    )
+
+    copy_jdbc_connector()
+
+    File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
+      content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")),
+      mode = 0644,
+    )
+
+    cp = format("{check_db_connection_jar}")
+    if params.db_flavor.lower() == 'sqla':
+      cp = cp + os.pathsep + format("{kms_home}/ews/webapp/lib/sajdbc4.jar")
+    else:
+      path_to_jdbc = format("{kms_home}/ews/webapp/lib/{jdbc_jar_name}")
+      if not os.path.isfile(path_to_jdbc):
+        path_to_jdbc = format("{kms_home}/ews/webapp/lib/") + \
+                       params.default_connectors_map[params.db_flavor.lower()] if params.db_flavor.lower() in params.default_connectors_map else None
+        if not os.path.isfile(path_to_jdbc):
+          path_to_jdbc = format("{kms_home}/ews/webapp/lib/") + "*"
+          error_message = "Error! Sorry, but we can't find jdbc driver with default name " + params.default_connectors_map[params.db_flavor] + \
+                " in ranger kms lib dir. So, db connection check can fail. Please run 'ambari-server setup --jdbc-db={db_name} --jdbc-driver={path_to_jdbc} on server host.'"
+          Logger.error(error_message)
+
+      cp = cp + os.pathsep + path_to_jdbc
+
+    db_connection_check_command = format(
+      "{java_home}/bin/java -cp {cp} org.apache.ambari.server.DBConnectionVerification '{ranger_kms_jdbc_connection_url}' {db_user} {db_password!p} {ranger_kms_jdbc_driver}")
+    
+    env_dict = {}
+    if params.db_flavor.lower() == 'sqla':
+      env_dict = {'LD_LIBRARY_PATH':params.ld_library_path}
+
+    Execute(db_connection_check_command, path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin', tries=5, try_sleep=10, environment=env_dict)
+
+    if params.xa_audit_db_is_enabled and params.driver_source is not None and not params.driver_source.endswith("/None"):
+      if params.xa_previous_jdbc_jar and os.path.isfile(params.xa_previous_jdbc_jar):
+        File(params.xa_previous_jdbc_jar, action='delete')
+
+      File(params.downloaded_connector_path,
+        content = DownloadSource(params.driver_source),
+        mode = 0644
+      )
+
+      Execute(('cp', '--remove-destination', params.downloaded_connector_path, params.driver_target),
+          path=["/bin", "/usr/bin/"],
+          sudo=True)
+
+      File(params.driver_target, mode=0644)
+
+    Directory(os.path.join(params.kms_home, 'ews', 'webapp', 'WEB-INF', 'classes', 'lib'),
+        mode=0755,
+        owner=params.kms_user,
+        group=params.kms_group        
+      )
+
+    Execute(('cp',format('{kms_home}/ranger-kms-initd'),'/etc/init.d/ranger-kms'),
+    not_if=format('ls /etc/init.d/ranger-kms'),
+    only_if=format('ls {kms_home}/ranger-kms-initd'),
+    sudo=True)
+
+    File('/etc/init.d/ranger-kms',
+      mode = 0755
+    )
+
+    Directory(format('{kms_home}/'),
+              owner = params.kms_user,
+              group = params.kms_group,
+              recursive_ownership = True,
+    )
+
+    Directory(params.ranger_kms_pid_dir,
+      mode=0755,
+      owner = params.kms_user,
+      group = params.user_group,
+      cd_access = "a",
+      create_parents=True
+    )
+
+    if params.stack_supports_pid:
+      File(format('{kms_conf_dir}/ranger-kms-env-piddir.sh'),
+        content = format("export RANGER_KMS_PID_DIR_PATH={ranger_kms_pid_dir}\nexport KMS_USER={kms_user}"),
+        owner = params.kms_user,
+        group = params.kms_group,
+        mode=0755
+      )
+
+    Directory(params.kms_log_dir,
+      owner = params.kms_user,
+      group = params.kms_group,
+      cd_access = 'a',
+      create_parents=True,
+      mode=0755
+    )
+
+    File(format('{kms_conf_dir}/ranger-kms-env-logdir.sh'),
+      content = format("export RANGER_KMS_LOG_DIR={kms_log_dir}"),
+      owner = params.kms_user,
+      group = params.kms_group,
+      mode=0755
+    )
+
+    Execute(('ln','-sf', format('{kms_home}/ranger-kms'),'/usr/bin/ranger-kms'),
+      not_if=format('ls /usr/bin/ranger-kms'),
+      only_if=format('ls {kms_home}/ranger-kms'),
+      sudo=True)
+
+    File('/usr/bin/ranger-kms', mode = 0755)
+
+    Execute(('ln','-sf', format('{kms_home}/ranger-kms'),'/usr/bin/ranger-kms-services.sh'),
+      not_if=format('ls /usr/bin/ranger-kms-services.sh'),
+      only_if=format('ls {kms_home}/ranger-kms'),
+      sudo=True)
+
+    File('/usr/bin/ranger-kms-services.sh', mode = 0755)
+
+    Execute(('ln','-sf', format('{kms_home}/ranger-kms-initd'),format('{kms_home}/ranger-kms-services.sh')),
+      not_if=format('ls {kms_home}/ranger-kms-services.sh'),
+      only_if=format('ls {kms_home}/ranger-kms-initd'),
+      sudo=True)
+
+    File(format('{kms_home}/ranger-kms-services.sh'), mode = 0755)
+
+    Directory(params.kms_log_dir,
+      owner = params.kms_user,
+      group = params.kms_group,
+      mode = 0775
+    )
+
+    do_keystore_setup(params.credential_provider_path, params.jdbc_alias, params.db_password)
+    do_keystore_setup(params.credential_provider_path, params.masterkey_alias, params.kms_master_key_password)
+    if params.stack_support_kms_hsm and params.enable_kms_hsm:
+      do_keystore_setup(params.credential_provider_path, params.hms_partition_alias, unicode(params.hms_partition_passwd))
+    if params.stack_supports_ranger_kms_ssl and params.ranger_kms_ssl_enabled:
+      do_keystore_setup(params.ranger_kms_cred_ssl_path, params.ranger_kms_ssl_keystore_alias, params.ranger_kms_ssl_passwd)
+
+    # remove plain-text password from xml configs
+    dbks_site_copy = {}
+    dbks_site_copy.update(params.config['configurations']['dbks-site'])
+
+    for prop in params.dbks_site_password_properties:
+      if prop in dbks_site_copy:
+        dbks_site_copy[prop] = "_"
+
+    XmlConfig("dbks-site.xml",
+      conf_dir=params.kms_conf_dir,
+      configurations=dbks_site_copy,
+      configuration_attributes=params.config['configuration_attributes']['dbks-site'],
+      owner=params.kms_user,
+      group=params.kms_group,
+      mode=0644
+    )
+
+    ranger_kms_site_copy = {}
+    ranger_kms_site_copy.update(params.config['configurations']['ranger-kms-site'])
+    if params.stack_supports_ranger_kms_ssl:
+      # remove plain-text password from xml configs
+      for prop in params.ranger_kms_site_password_properties:
+        if prop in ranger_kms_site_copy:
+          ranger_kms_site_copy[prop] = "_"
+
+    XmlConfig("ranger-kms-site.xml",
+      conf_dir=params.kms_conf_dir,
+      configurations=ranger_kms_site_copy,
+      configuration_attributes=params.config['configuration_attributes']['ranger-kms-site'],
+      owner=params.kms_user,
+      group=params.kms_group,
+      mode=0644
+    )
+
+    XmlConfig("kms-site.xml",
+      conf_dir=params.kms_conf_dir,
+      configurations=params.config['configurations']['kms-site'],
+      configuration_attributes=params.config['configuration_attributes']['kms-site'],
+      owner=params.kms_user,
+      group=params.kms_group,
+      mode=0644
+    )
+
+    File(os.path.join(params.kms_conf_dir, "kms-log4j.properties"),
+      owner=params.kms_user,
+      group=params.kms_group,
+      content=InlineTemplate(params.kms_log4j),
+      mode=0644
+    )
+    if params.security_enabled:
+      # core-site.xml linking required by setup for HDFS encryption
+      XmlConfig("core-site.xml",
+        conf_dir=params.kms_conf_dir,
+        configurations=params.config['configurations']['core-site'],
+        configuration_attributes=params.config['configuration_attributes']['core-site'],
+        owner=params.kms_user,
+        group=params.kms_group,
+        mode=0644
+      )
+    else:
+      File(format('{kms_conf_dir}/core-site.xml'), action="delete")
+
+def copy_jdbc_connector(stack_version=None):
+  import params
+
+  if params.jdbc_jar_name is None and params.driver_curl_source.endswith("/None"):
+    error_message = "Error! Sorry, but we can't find jdbc driver related to {0} database to download from {1}. \
+    Please run 'ambari-server setup --jdbc-db={db_name} --jdbc-driver={path_to_jdbc} on server host.'".format(params.db_flavor, params.jdk_location)
+    Logger.error(error_message)
+
+  if params.driver_curl_source and not params.driver_curl_source.endswith("/None"):
+    if params.previous_jdbc_jar and os.path.isfile(params.previous_jdbc_jar):
+      File(params.previous_jdbc_jar, action='delete')
+
+  kms_home = params.kms_home
+  if stack_version is not None:
+    kms_home = format("{stack_root}/{stack_version}/ranger-kms")
+
+  driver_curl_target = format("{kms_home}/ews/webapp/lib/{jdbc_jar_name}")
+
+  File(params.downloaded_custom_connector,
+    content = DownloadSource(params.driver_curl_source),
+    mode = 0644
+  )
+
+  Directory(os.path.join(kms_home, 'ews', 'lib'),
+    mode=0755
+  )
+
+  if params.db_flavor.lower() == 'sqla':
+    Execute(('tar', '-xvf', params.downloaded_custom_connector, '-C', params.tmp_dir), sudo = True)
+
+    Execute(('cp', '--remove-destination', params.jar_path_in_archive, os.path.join(kms_home, 'ews', 'webapp', 'lib')),
+      path=["/bin", "/usr/bin/"],
+      sudo=True)
+
+    Directory(params.jdbc_libs_dir,
+      cd_access="a",
+      create_parents=True)
+
+    Execute(as_sudo(['yes', '|', 'cp', params.libs_path_in_archive, params.jdbc_libs_dir], auto_escape=False),
+      path=["/bin", "/usr/bin/"])
+
+    File(os.path.join(kms_home, 'ews', 'webapp', 'lib', 'sajdbc4.jar'), mode=0644)
+  else:
+    Execute(('cp', '--remove-destination', params.downloaded_custom_connector, os.path.join(kms_home, 'ews', 'webapp', 'lib')),
+      path=["/bin", "/usr/bin/"],
+      sudo=True)
+
+    File(os.path.join(kms_home, 'ews', 'webapp', 'lib', params.jdbc_jar_name), mode=0644)
+
+  ModifyPropertiesFile(format("{kms_home}/install.properties"),
+    properties = params.config['configurations']['kms-properties'],
+    owner = params.kms_user
+  )
+
+  if params.db_flavor.lower() == 'sqla':
+    ModifyPropertiesFile(format("{kms_home}/install.properties"),
+      properties = {'SQL_CONNECTOR_JAR': format('{kms_home}/ews/webapp/lib/sajdbc4.jar')},
+      owner = params.kms_user,
+    )
+  else:
+    ModifyPropertiesFile(format("{kms_home}/install.properties"),
+      properties = {'SQL_CONNECTOR_JAR': format('{driver_curl_target}')},
+      owner = params.kms_user,
+    )
+
+def enable_kms_plugin():
+
+  import params
+
+  if params.has_ranger_admin:
+
+    ranger_flag = False
+
+    if params.stack_supports_ranger_kerberos and params.security_enabled:
+      if not is_empty(params.rangerkms_principal) and params.rangerkms_principal != '':
+        ranger_flag = check_ranger_service_support_kerberos(params.kms_user, params.rangerkms_keytab, params.rangerkms_principal)
+      else:
+        ranger_flag = check_ranger_service_support_kerberos(params.kms_user, params.spengo_keytab, params.spnego_principal)
+    else:
+      ranger_flag = check_ranger_service()
+
+    if not ranger_flag:
+      Logger.error('Error in Get/Create service for Ranger Kms.')
+
+    current_datetime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+
+    File(format('{kms_conf_dir}/ranger-security.xml'),
+      owner = params.kms_user,
+      group = params.kms_group,
+      mode = 0644,
+      content = format('<ranger>\n<enabled>{current_datetime}</enabled>\n</ranger>')
+    )
+
+    Directory([os.path.join('/etc', 'ranger', params.repo_name), os.path.join('/etc', 'ranger', params.repo_name, 'policycache')],
+      owner = params.kms_user,
+      group = params.kms_group,
+      mode=0775,
+      create_parents = True
+    )
+    
+    File(os.path.join('/etc', 'ranger', params.repo_name, 'policycache',format('kms_{repo_name}.json')),
+      owner = params.kms_user,
+      group = params.kms_group,
+      mode = 0644        
+    )
+
+    # remove plain-text password from xml configs
+    plugin_audit_properties_copy = {}
+    plugin_audit_properties_copy.update(params.config['configurations']['ranger-kms-audit'])
+
+    if params.plugin_audit_password_property in plugin_audit_properties_copy:
+      plugin_audit_properties_copy[params.plugin_audit_password_property] = "crypted"
+
+    XmlConfig("ranger-kms-audit.xml",
+      conf_dir=params.kms_conf_dir,
+      configurations=plugin_audit_properties_copy,
+      configuration_attributes=params.config['configuration_attributes']['ranger-kms-audit'],
+      owner=params.kms_user,
+      group=params.kms_group,
+      mode=0744)
+
+    XmlConfig("ranger-kms-security.xml",
+      conf_dir=params.kms_conf_dir,
+      configurations=params.config['configurations']['ranger-kms-security'],
+      configuration_attributes=params.config['configuration_attributes']['ranger-kms-security'],
+      owner=params.kms_user,
+      group=params.kms_group,
+      mode=0744)
+
+    # remove plain-text password from xml configs
+    ranger_kms_policymgr_ssl_copy = {}
+    ranger_kms_policymgr_ssl_copy.update(params.config['configurations']['ranger-kms-policymgr-ssl'])
+
+    for prop in params.kms_plugin_password_properties:
+      if prop in ranger_kms_policymgr_ssl_copy:
+        ranger_kms_policymgr_ssl_copy[prop] = "crypted"
+
+    XmlConfig("ranger-policymgr-ssl.xml",
+      conf_dir=params.kms_conf_dir,
+      configurations=ranger_kms_policymgr_ssl_copy,
+      configuration_attributes=params.config['configuration_attributes']['ranger-kms-policymgr-ssl'],
+      owner=params.kms_user,
+      group=params.kms_group,
+      mode=0744)
+
+    if params.xa_audit_db_is_enabled:
+      cred_setup = params.cred_setup_prefix + ('-f', params.credential_file, '-k', 'auditDBCred', '-v', PasswordString(params.xa_audit_db_password), '-c', '1')
+      Execute(cred_setup, environment={'JAVA_HOME': params.java_home}, logoutput=True, sudo=True)
+
+    cred_setup = params.cred_setup_prefix + ('-f', params.credential_file, '-k', 'sslKeyStore', '-v', PasswordString(params.ssl_keystore_password), '-c', '1')
+    Execute(cred_setup, environment={'JAVA_HOME': params.java_home}, logoutput=True, sudo=True)
+
+    cred_setup = params.cred_setup_prefix + ('-f', params.credential_file, '-k', 'sslTrustStore', '-v', PasswordString(params.ssl_truststore_password), '-c', '1')
+    Execute(cred_setup, environment={'JAVA_HOME': params.java_home}, logoutput=True, sudo=True)
+
+    File(params.credential_file,
+      owner = params.kms_user,
+      group = params.kms_group,
+      mode = 0640
+      )
+
+    # create ranger kms audit directory
+    if params.xa_audit_hdfs_is_enabled and params.has_namenode and params.has_hdfs_client_on_node:
+      params.HdfsResource("/ranger/audit",
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.hdfs_user,
+                        group=params.hdfs_user,
+                        mode=0755,
+                        recursive_chmod=True
+      )
+      params.HdfsResource("/ranger/audit/kms",
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.kms_user,
+                        group=params.kms_group,
+                        mode=0750,
+                        recursive_chmod=True
+      )
+      params.HdfsResource(None, action="execute")
+
+    if params.xa_audit_hdfs_is_enabled and len(params.namenode_host) > 1:
+      Logger.info('Audit to Hdfs enabled in NameNode HA environment, creating hdfs-site.xml')
+      XmlConfig("hdfs-site.xml",
+        conf_dir=params.kms_conf_dir,
+        configurations=params.config['configurations']['hdfs-site'],
+        configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+        owner=params.kms_user,
+        group=params.kms_group,
+        mode=0644
+      )
+    else:
+      File(format('{kms_conf_dir}/hdfs-site.xml'), action="delete")
+
+def setup_kms_jce():
+  import params
+
+  if params.jce_name is not None:
+    Directory(params.jce_source_dir,
+      create_parents = True
+    )
+
+    jce_target = format('{jce_source_dir}/{jce_name}')
+
+    File(jce_target,
+      content = DownloadSource(format('{jdk_location}/{jce_name}')),
+      mode = 0644,
+    )
+
+    File([format("{java_home}/jre/lib/security/local_policy.jar"), format("{java_home}/jre/lib/security/US_export_policy.jar")],
+      action = "delete",
+    )
+
+    unzip_cmd = ("unzip", "-o", "-j", "-q", jce_target, "-d", format("{java_home}/jre/lib/security"))
+
+    Execute(unzip_cmd,
+      only_if = format("test -e {java_home}/jre/lib/security && test -f {jce_target}"),
+      path = ['/bin/','/usr/bin'],
+      sudo = True
+    )
+  else:
+    Logger.warning("Required jce policy zip is not available, need to setup manually")
+
+  
+def check_ranger_service():
+  import params
+
+  policymgr_mgr_url = params.policymgr_mgr_url
+  if policymgr_mgr_url.endswith('/'):
+    policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
+  ranger_adm_obj = Rangeradmin(url=policymgr_mgr_url)
+  ambari_username_password_for_ranger = format("{ambari_ranger_admin}:{ambari_ranger_password}")
+  response_code = ranger_adm_obj.check_ranger_login_urllib2(policymgr_mgr_url)
+
+  if response_code is not None and response_code == 200:
+    user_resp_code = ranger_adm_obj.create_ambari_admin_user(params.ambari_ranger_admin, params.ambari_ranger_password, params.admin_uname_password)
+    if user_resp_code is not None and user_resp_code == 200:
+      get_repo_flag = get_repo(policymgr_mgr_url, params.repo_name, ambari_username_password_for_ranger)
+      if not get_repo_flag:
+        return create_repo(policymgr_mgr_url, json.dumps(params.kms_ranger_plugin_repo), ambari_username_password_for_ranger)
+      else:
+        return True
+    else:
+      return False
+  else:
+    Logger.error('Ranger service is not reachable')
+    return False
+
+@safe_retry(times=5, sleep_time=8, backoff_factor=1.5, err_class=Fail, return_on_fail=False)
+def create_repo(url, data, usernamepassword):
+  try:
+    base_url = url + '/service/public/v2/api/service'
+    base64string = base64.encodestring('{0}'.format(usernamepassword)).replace('\n', '')
+    headers = {
+      'Accept': 'application/json',
+      "Content-Type": "application/json"
+    }
+    request = urllib2.Request(base_url, data, headers)
+    request.add_header("Authorization", "Basic {0}".format(base64string))
+    result = urllib2.urlopen(request, timeout=20)
+    response_code = result.getcode()
+    response = json.loads(json.JSONEncoder().encode(result.read()))
+    if response_code == 200:
+      Logger.info('Repository created Successfully')
+      return True
+    else:
+      Logger.info('Repository not created')
+      return False
+  except urllib2.URLError, e:
+    if isinstance(e, urllib2.HTTPError):
+      raise Fail("Error creating service. Http status code - {0}. \n {1}".format(e.code, e.read()))
+    else:
+      raise Fail("Error creating service. Reason - {0}.".format(e.reason))
+  except socket.timeout as e:
+    raise Fail("Error creating service. Reason - {0}".format(e))
+
+@safe_retry(times=5, sleep_time=8, backoff_factor=1.5, err_class=Fail, return_on_fail=False)
+def get_repo(url, name, usernamepassword):
+  try:
+    base_url = url + '/service/public/v2/api/service?serviceName=' + name + '&serviceType=kms&isEnabled=true'
+    request = urllib2.Request(base_url)
+    base64string = base64.encodestring(usernamepassword).replace('\n', '')
+    request.add_header("Content-Type", "application/json")
+    request.add_header("Accept", "application/json")
+    request.add_header("Authorization", "Basic {0}".format(base64string))
+    result = urllib2.urlopen(request, timeout=20)
+    response_code = result.getcode()
+    response = json.loads(result.read())
+    if response_code == 200 and len(response) > 0:
+      for repo in response:
+        if repo.get('name').lower() == name.lower() and repo.has_key('name'):
+          Logger.info('KMS repository exist')
+          return True
+        else:
+          Logger.info('KMS repository doesnot exist')
+          return False
+    else:
+      Logger.info('KMS repository doesnot exist')
+      return False
+  except urllib2.URLError, e:
+    if isinstance(e, urllib2.HTTPError):
+      raise Fail("Error getting {0} service. Http status code - {1}. \n {2}".format(name, e.code, e.read()))
+    else:
+      raise Fail("Error getting {0} service. Reason - {1}.".format(name, e.reason))
+  except socket.timeout as e:
+    raise Fail("Error creating service. Reason - {0}".format(e))
+
+def check_ranger_service_support_kerberos(user, keytab, principal):
+  import params
+
+  policymgr_mgr_url = params.policymgr_mgr_url
+  if policymgr_mgr_url.endswith('/'):
+    policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
+  ranger_adm_obj = RangeradminV2(url=policymgr_mgr_url)
+  response_code = ranger_adm_obj.check_ranger_login_curl(user, keytab, principal, policymgr_mgr_url, True)
+
+  if response_code is not None and response_code[0] == 200:
+    get_repo_name_response = ranger_adm_obj.get_repository_by_name_curl(user, keytab, principal, params.repo_name, 'kms', 'true', is_keyadmin = True)
+    if get_repo_name_response is not None:
+      Logger.info('KMS repository {0} exist'.format(get_repo_name_response['name']))
+      return True
+    else:
+      create_repo_response = ranger_adm_obj.create_repository_curl(user, keytab, principal, params.repo_name, json.dumps(params.kms_ranger_plugin_repo), None, is_keyadmin = True)
+      if create_repo_response is not None and len(create_repo_response) > 0:
+        return True
+      else:
+        return False
+  else:
+    Logger.error('Ranger service is not reachable')
+    return False
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/package/scripts/kms_server.py b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/package/scripts/kms_server.py
new file mode 100755
index 0000000..44d61da
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/package/scripts/kms_server.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.core.exceptions import Fail
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.script import Script
+from resource_management.core.resources.system import Execute, File
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.libraries.functions.format import format
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+from resource_management.libraries.functions.default import default
+from kms import kms, setup_kms_db, setup_java_patch, enable_kms_plugin, setup_kms_jce
+from kms_service import kms_service
+import upgrade
+
+class KmsServer(Script):
+
+  def get_component_name(self):
+    return "ranger-kms"
+
+  def install(self, env):
+    self.install_packages(env)
+    import params
+    env.set_params(params)
+
+    setup_kms_db()
+    self.configure(env)
+    setup_java_patch()
+
+  def stop(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    kms_service(action = 'stop', upgrade_type=upgrade_type)
+    if params.stack_supports_pid:
+      File(params.ranger_kms_pid_file,
+        action = "delete"
+      )
+
+  def start(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    self.configure(env)
+    enable_kms_plugin()
+    setup_kms_jce()
+    kms_service(action = 'start', upgrade_type=upgrade_type)
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+
+    if status_params.stack_supports_pid:
+      check_process_status(status_params.ranger_kms_pid_file)
+      return
+
+    cmd = 'ps -ef | grep proc_rangerkms | grep -v grep'
+    code, output = shell.call(cmd, timeout=20)
+    if code != 0:
+      Logger.debug('KMS process not running')
+      raise ComponentIsNotRunning()
+    pass
+
+  def configure(self, env):
+    import params
+
+    env.set_params(params)
+    kms()
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    upgrade.prestart(env, "ranger-kms")
+    kms(upgrade_type=upgrade_type)
+    setup_java_patch()
+
+  def setup_ranger_kms_database(self, env):
+    import params
+    env.set_params(params)
+
+    upgrade_stack = stack_select._get_upgrade_stack()
+    if upgrade_stack is None:
+      raise Fail('Unable to determine the stack and stack version')
+
+    stack_version = upgrade_stack[1]
+    Logger.info(format('Setting Ranger KMS database schema, using version {stack_version}'))
+    setup_kms_db(stack_version=stack_version)
+    
+  def get_log_folder(self):
+    import params
+    return params.kms_log_dir
+  
+  def get_user(self):
+    import params
+    return params.kms_user
+
+if __name__ == "__main__":
+  KmsServer().execute()
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/package/scripts/kms_service.py b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/package/scripts/kms_service.py
new file mode 100644
index 0000000..2ff48c3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/package/scripts/kms_service.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.core.resources.system import Execute, File
+from resource_management.core import shell
+from resource_management.libraries.functions.format import format
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.show_logs import show_logs
+from ambari_commons.constants import UPGRADE_TYPE_NON_ROLLING, UPGRADE_TYPE_ROLLING
+from resource_management.libraries.functions.constants import Direction
+import os
+
+def kms_service(action='start', upgrade_type=None):
+  import params
+
+  env_dict = {'JAVA_HOME': params.java_home}
+  if params.db_flavor.lower() == 'sqla':
+    env_dict = {'JAVA_HOME': params.java_home, 'LD_LIBRARY_PATH': params.ld_library_path}
+
+  if action == 'start':
+    no_op_test = format('ps -ef | grep proc_rangerkms | grep -v grep')
+    cmd = format('{kms_home}/ranger-kms start')
+    try:
+      Execute(cmd, not_if=no_op_test, environment=env_dict, user=format('{kms_user}'))
+    except:
+      show_logs(params.kms_log_dir, params.kms_user)
+      raise
+  elif action == 'stop':
+    if upgrade_type == UPGRADE_TYPE_NON_ROLLING and params.upgrade_direction == Direction.UPGRADE:
+      if os.path.isfile(format('{kms_home}/ranger-kms')):
+        File(format('{kms_home}/ranger-kms'),
+          owner=params.kms_user,
+          group = params.kms_group
+        )
+    cmd = format('{kms_home}/ranger-kms stop')
+    try:
+      Execute(cmd, environment=env_dict, user=format('{kms_user}'))
+    except:
+      show_logs(params.kms_log_dir, params.kms_user)
+      raise
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/package/scripts/params.py
new file mode 100755
index 0000000..2445f2e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/package/scripts/params.py
@@ -0,0 +1,331 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.get_bare_principal import get_bare_principal
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.functions.setup_ranger_plugin_xml import generate_ranger_service_config
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import get_kinit_path
+
+config  = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+stack_root = Script.get_stack_root()
+
+stack_name = default("/hostLevelParams/stack_name", None)
+version = default("/commandParams/version", None)
+upgrade_direction = default("/commandParams/upgrade_direction", None)
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+# get the correct version to use for checking stack features
+version_for_stack_feature_checks = get_stack_feature_version(config)
+
+stack_supports_config_versioning = check_stack_feature(StackFeature.CONFIG_VERSIONING, version_for_stack_feature_checks)
+stack_support_kms_hsm = check_stack_feature(StackFeature.RANGER_KMS_HSM_SUPPORT, version_for_stack_feature_checks)
+stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
+stack_supports_pid = check_stack_feature(StackFeature.RANGER_KMS_PID_SUPPORT, version_for_stack_feature_checks)
+stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
+stack_supports_ranger_kms_ssl = check_stack_feature(StackFeature.RANGER_KMS_SSL, version_for_stack_feature_checks)
+
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+if stack_supports_config_versioning:
+  kms_home = format('{stack_root}/current/ranger-kms')
+  kms_conf_dir = format('{stack_root}/current/ranger-kms/conf')
+
+kms_log_dir = default("/configurations/kms-env/kms_log_dir", "/var/log/ranger/kms")
+java_home = config['hostLevelParams']['java_home']
+kms_user  = default("/configurations/kms-env/kms_user", "kms")
+kms_group = default("/configurations/kms-env/kms_group", "kms")
+
+ranger_kms_audit_log_maxfilesize = default('/configurations/kms-log4j/ranger_kms_audit_log_maxfilesize',256)
+ranger_kms_audit_log_maxbackupindex = default('/configurations/kms-log4j/ranger_kms_audit_log_maxbackupindex',20)
+ranger_kms_log_maxfilesize = default('/configurations/kms-log4j/ranger_kms_log_maxfilesize',256)
+ranger_kms_log_maxbackupindex = default('/configurations/kms-log4j/ranger_kms_log_maxbackupindex',20)
+
+jdk_location = config['hostLevelParams']['jdk_location']
+kms_log4j = config['configurations']['kms-log4j']['content']
+
+# ranger host
+ranger_admin_hosts = config['clusterHostInfo']['ranger_admin_hosts'][0]
+has_ranger_admin = len(ranger_admin_hosts) > 0
+kms_host = config['clusterHostInfo']['ranger_kms_server_hosts'][0]
+kms_port = config['configurations']['kms-env']['kms_port']
+
+create_db_user = config['configurations']['kms-env']['create_db_user']
+
+#kms properties
+db_flavor = (config['configurations']['kms-properties']['DB_FLAVOR']).lower()
+db_host = config['configurations']['kms-properties']['db_host']
+db_name = config['configurations']['kms-properties']['db_name']
+db_user = config['configurations']['kms-properties']['db_user']
+db_password = unicode(config['configurations']['kms-properties']['db_password'])
+kms_master_key_password = unicode(config['configurations']['kms-properties']['KMS_MASTER_KEY_PASSWD'])
+credential_provider_path = config['configurations']['dbks-site']['ranger.ks.jpa.jdbc.credential.provider.path']
+jdbc_alias = config['configurations']['dbks-site']['ranger.ks.jpa.jdbc.credential.alias']
+masterkey_alias = config['configurations']['dbks-site']['ranger.ks.masterkey.credential.alias']
+repo_name = str(config['clusterName']) + '_kms'
+repo_name_value = config['configurations']['ranger-kms-security']['ranger.plugin.kms.service.name']
+if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
+  repo_name = repo_name_value
+cred_lib_path = os.path.join(kms_home,"cred","lib","*")
+cred_setup_prefix = (format('{kms_home}/ranger_credential_helper.py'), '-l', cred_lib_path)
+credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
+
+if has_ranger_admin:
+  policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
+  if 'admin-properties' in config['configurations'] and 'policymgr_external_url' in config['configurations']['admin-properties'] and policymgr_mgr_url.endswith('/'):
+    policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
+  xa_audit_db_flavor = (config['configurations']['admin-properties']['DB_FLAVOR']).lower()
+  xa_audit_db_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
+  xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
+  xa_audit_db_password = ''
+  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db:
+    xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password']
+  xa_db_host = config['configurations']['admin-properties']['db_host']
+
+  admin_uname = config['configurations']['ranger-env']['admin_username']
+  admin_password = config['configurations']['ranger-env']['admin_password']
+  ambari_ranger_admin = config['configurations']['ranger-env']['ranger_admin_username']
+  ambari_ranger_password = config['configurations']['ranger-env']['ranger_admin_password']
+  admin_uname_password = format("{admin_uname}:{admin_password}")
+  ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
+
+default_connectors_map = { "mssql":"sqljdbc4.jar",
+                           "mysql":"mysql-connector-java.jar",
+                           "postgres":"postgresql-jdbc.jar",
+                           "oracle":"ojdbc.jar",
+                           "sqla":"sajdbc4.jar"}
+
+java_share_dir = '/usr/share/java'
+jdbc_jar_name = None
+previous_jdbc_jar_name = None
+if db_flavor == 'mysql':
+  jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
+  previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
+  db_jdbc_url = format('jdbc:log4jdbc:mysql://{db_host}/{db_name}')
+  db_jdbc_driver = "com.mysql.jdbc.Driver"
+  jdbc_dialect = "org.eclipse.persistence.platform.database.MySQLPlatform"
+elif db_flavor == 'oracle':
+  jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
+  previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
+  colon_count = db_host.count(':')
+  if colon_count == 2 or colon_count == 0:
+    db_jdbc_url = format('jdbc:oracle:thin:@{db_host}')
+  else:
+    db_jdbc_url = format('jdbc:oracle:thin:@//{db_host}')
+  db_jdbc_driver = "oracle.jdbc.OracleDriver"
+  jdbc_dialect = "org.eclipse.persistence.platform.database.OraclePlatform"
+elif db_flavor == 'postgres':
+  jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
+  previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
+  db_jdbc_url = format('jdbc:postgresql://{db_host}/{db_name}')
+  db_jdbc_driver = "org.postgresql.Driver"
+  jdbc_dialect = "org.eclipse.persistence.platform.database.PostgreSQLPlatform"
+elif db_flavor == 'mssql':
+  jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
+  previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
+  db_jdbc_url = format('jdbc:sqlserver://{db_host};databaseName={db_name}')
+  db_jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
+  jdbc_dialect = "org.eclipse.persistence.platform.database.SQLServerPlatform"
+elif db_flavor == 'sqla':
+  jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
+  previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
+  db_jdbc_url = format('jdbc:sqlanywhere:database={db_name};host={db_host}')
+  db_jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
+  jdbc_dialect = "org.eclipse.persistence.platform.database.SQLAnywherePlatform"
+
+downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}")
+
+driver_curl_source = format("{jdk_location}/{jdbc_jar_name}")
+driver_curl_target = format("{kms_home}/ews/webapp/lib/{jdbc_jar_name}")
+previous_jdbc_jar = format("{kms_home}/ews/webapp/lib/{previous_jdbc_jar_name}")
+ews_lib_jar_path = format("{kms_home}/ews/webapp/lib/{jdbc_jar_name}")
+
+if db_flavor == 'sqla':
+  downloaded_custom_connector = format("{tmp_dir}/sqla-client-jdbc.tar.gz")
+  jar_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/java/sajdbc4.jar")
+  libs_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/native/lib64/*")
+  jdbc_libs_dir = format("{kms_home}/native/lib64")
+  ld_library_path = format("{jdbc_libs_dir}")
+
+if has_ranger_admin:
+  xa_previous_jdbc_jar_name = None
+  if stack_supports_ranger_audit_db:
+    if xa_audit_db_flavor == 'mysql':
+      jdbc_jar = default("/hostLevelParams/custom_mysql_jdbc_name", None)
+      xa_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
+      audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
+      jdbc_driver = "com.mysql.jdbc.Driver"
+    elif xa_audit_db_flavor == 'oracle':
+      jdbc_jar = default("/hostLevelParams/custom_oracle_jdbc_name", None)
+      xa_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
+      colon_count = xa_db_host.count(':')
+      if colon_count == 2 or colon_count == 0:
+        audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
+      else:
+        audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
+      jdbc_driver = "oracle.jdbc.OracleDriver"
+    elif xa_audit_db_flavor == 'postgres':
+      jdbc_jar = default("/hostLevelParams/custom_postgres_jdbc_name", None)
+      xa_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
+      audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
+      jdbc_driver = "org.postgresql.Driver"
+    elif xa_audit_db_flavor == 'mssql':
+      jdbc_jar = default("/hostLevelParams/custom_mssql_jdbc_name", None)
+      xa_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
+      audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
+      jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
+    elif xa_audit_db_flavor == 'sqla':
+      jdbc_jar = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
+      xa_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
+      audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
+      jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
+
+  downloaded_connector_path = format("{tmp_dir}/{jdbc_jar}") if stack_supports_ranger_audit_db else None
+  driver_source = format("{jdk_location}/{jdbc_jar}") if stack_supports_ranger_audit_db else None
+  driver_target = format("{kms_home}/ews/webapp/lib/{jdbc_jar}") if stack_supports_ranger_audit_db else None
+  xa_previous_jdbc_jar = format("{kms_home}/ews/webapp/lib/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+
+repo_config_username = config['configurations']['kms-properties']['REPOSITORY_CONFIG_USERNAME']
+repo_config_password = unicode(config['configurations']['kms-properties']['REPOSITORY_CONFIG_PASSWORD'])
+
+kms_plugin_config = {
+  'username' : repo_config_username,
+  'password' : repo_config_password,
+  'provider' : format('kms://http@{kms_host}:{kms_port}/kms') 
+}
+
+xa_audit_db_is_enabled = False
+if stack_supports_ranger_audit_db:
+  xa_audit_db_is_enabled = config['configurations']['ranger-kms-audit']['xasecure.audit.destination.db']
+ssl_keystore_password = unicode(config['configurations']['ranger-kms-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'])
+ssl_truststore_password = unicode(config['configurations']['ranger-kms-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'])
+
+#For SQLA explicitly disable audit to DB for Ranger
+if xa_audit_db_flavor == 'sqla':
+  xa_audit_db_is_enabled = False
+
+current_host = config['hostname']
+ranger_kms_hosts = config['clusterHostInfo']['ranger_kms_server_hosts']
+if current_host in ranger_kms_hosts:
+  kms_host = current_host
+
+check_db_connection_jar_name = "DBConnectionVerification.jar"
+check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
+ranger_kms_jdbc_connection_url = config['configurations']['dbks-site']['ranger.ks.jpa.jdbc.url']
+ranger_kms_jdbc_driver = config['configurations']['dbks-site']['ranger.ks.jpa.jdbc.driver']
+
+jce_name = default("/hostLevelParams/jce_name", None)
+jce_source_dir = format('{tmp_dir}/jce_dir')
+
+#kms hsm support
+enable_kms_hsm = default("/configurations/dbks-site/ranger.ks.hsm.enabled", False)
+hms_partition_alias = default("/configurations/dbks-site/ranger.ks.hsm.partition.password.alias", "ranger.kms.hsm.partition.password")
+hms_partition_passwd = default("/configurations/kms-env/hsm_partition_password", None)
+
+# kms kerberos from stack 2.5 onward
+rangerkms_bare_principal = 'rangerkms'
+
+if stack_supports_ranger_kerberos:
+  if security_enabled:
+    rangerkms_principal = config['configurations']['dbks-site']['ranger.ks.kerberos.principal']
+    rangerkms_keytab = config['configurations']['dbks-site']['ranger.ks.kerberos.keytab']
+    if not is_empty(rangerkms_principal) and rangerkms_principal != '':
+      rangerkms_bare_principal = get_bare_principal(rangerkms_principal)
+      rangerkms_principal = rangerkms_principal.replace('_HOST', kms_host.lower())
+  kms_plugin_config['policy.download.auth.users'] = format('keyadmin,{rangerkms_bare_principal}')
+
+custom_ranger_service_config = generate_ranger_service_config(config['configurations']['kms-properties'])
+if len(custom_ranger_service_config) > 0:
+  kms_plugin_config.update(custom_ranger_service_config)
+
+kms_ranger_plugin_repo = {
+  'isEnabled' : 'true',
+  'configs' : kms_plugin_config,
+  'description' : 'kms repo',
+  'name' : repo_name,
+  'type' : 'kms'
+}
+
+# ranger kms pid
+user_group = config['configurations']['cluster-env']['user_group']
+ranger_kms_pid_dir = default("/configurations/kms-env/ranger_kms_pid_dir", "/var/run/ranger_kms")
+ranger_kms_pid_file = format('{ranger_kms_pid_dir}/rangerkms.pid')
+
+if security_enabled:
+  spengo_keytab = config['configurations']['kms-site']['hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab']
+  spnego_principal = config['configurations']['kms-site']['hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal']
+  spnego_principal = spnego_principal.replace('_HOST', current_host.lower())
+
+plugin_audit_password_property = 'xasecure.audit.destination.db.password'
+kms_plugin_password_properties = ['xasecure.policymgr.clientssl.keystore.password', 'xasecure.policymgr.clientssl.truststore.password']
+dbks_site_password_properties = ['ranger.db.encrypt.key.password', 'ranger.ks.jpa.jdbc.password', 'ranger.ks.hsm.partition.password']
+ranger_kms_site_password_properties = ['ranger.service.https.attrib.keystore.pass']
+ranger_kms_cred_ssl_path = config['configurations']['ranger-kms-site']['ranger.credential.provider.path']
+ranger_kms_ssl_keystore_alias = config['configurations']['ranger-kms-site']['ranger.service.https.attrib.keystore.credential.alias']
+ranger_kms_ssl_passwd = config['configurations']['ranger-kms-site']['ranger.service.https.attrib.keystore.pass']
+ranger_kms_ssl_enabled = config['configurations']['ranger-kms-site']['ranger.service.https.attrib.ssl.enabled']
+
+xa_audit_hdfs_is_enabled = default("/configurations/ranger-kms-audit/xasecure.audit.destination.hdfs", False)
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+
+# need this to capture cluster name from where ranger kms plugin is enabled
+cluster_name = config['clusterName']
+
+has_namenode = len(namenode_host) > 0
+
+hdfs_user = default("/configurations/hadoop-env/hdfs_user", None)
+hdfs_user_keytab = default("/configurations/hadoop-env/hdfs_user_keytab", None)
+hdfs_principal_name = default("/configurations/hadoop-env/hdfs_principal_name", None)
+default_fs = default("/configurations/core-site/fs.defaultFS", None)
+hdfs_site = config['configurations']['hdfs-site'] if has_namenode else None
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin") if has_namenode else None
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+
+import functools
+# create partial functions with common arguments for every HdfsResource call
+# to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
+)
+
+local_component_list = default("/localComponents", [])
+has_hdfs_client_on_node = 'HDFS_CLIENT' in local_component_list
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/package/scripts/service_check.py
new file mode 100644
index 0000000..84e4e73
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/package/scripts/service_check.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script import Script
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+from resource_management.core.exceptions import ComponentIsNotRunning
+
+
+class KmsServiceCheck(Script):
+  def service_check(self, env):
+    import params
+
+    env.set_params(params)
+    cmd = 'ps -ef | grep proc_rangerkms | grep -v grep'
+    code, output = shell.call(cmd, timeout=20)
+    if code == 0:
+      Logger.info('KMS process up and running')
+    else:
+      Logger.debug('KMS process not running')
+      raise ComponentIsNotRunning()
+
+if __name__ == "__main__":
+  KmsServiceCheck().execute()
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/package/scripts/status_params.py
new file mode 100644
index 0000000..34d0082
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/package/scripts/status_params.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+
+config  = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+stack_name = default("/hostLevelParams/stack_name", None)
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+stack_supports_pid = stack_version_formatted and check_stack_feature(StackFeature.RANGER_KMS_PID_SUPPORT, stack_version_formatted)
+ranger_kms_pid_dir = default("/configurations/kms-env/ranger_kms_pid_dir", "/var/run/ranger_kms")
+ranger_kms_pid_file = format('{ranger_kms_pid_dir}/rangerkms.pid')
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/package/scripts/upgrade.py b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/package/scripts/upgrade.py
new file mode 100644
index 0000000..8478bb8
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/package/scripts/upgrade.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.format import format
+
+def prestart(env, stack_component):
+  import params
+
+  if params.version and params.stack_supports_config_versioning:
+    conf_select.select(params.stack_name, stack_component, params.version)
+    stack_select.select(stack_component, params.version)
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/package/templates/input.config-ranger-kms.json.j2 b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/package/templates/input.config-ranger-kms.json.j2
new file mode 100644
index 0000000..306fade
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/package/templates/input.config-ranger-kms.json.j2
@@ -0,0 +1,48 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"ranger_kms",
+      "rowtype":"service",
+      "path":"{{default('/configurations/kms-env/kms_log_dir', '/var/log/ranger/kms')}}/kms.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "ranger_kms"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} %-5p %c{1} - %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/role_command_order.json b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/role_command_order.json
new file mode 100644
index 0000000..7ddab41
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/role_command_order.json
@@ -0,0 +1,7 @@
+{
+  "general_deps" : {
+    "_comment" : "dependencies for RANGER-KMS",
+    "RANGER_KMS_SERVER-START" : ["RANGER_ADMIN-START", "NAMENODE-START"],
+    "RANGER_KMS_SERVICE_CHECK-SERVICE_CHECK" : ["RANGER_KMS_SERVER-START"]
+  }
+}
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/service_advisor.py
new file mode 100644
index 0000000..9c33218
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/service_advisor.py
@@ -0,0 +1,281 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+# Python imports
+import imp
+import os
+import traceback
+import re
+import socket
+import fnmatch
+
+
+from resource_management.core.logger import Logger
+
+SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
+STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../stacks/')
+PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
+
+try:
+  with open(PARENT_FILE, 'rb') as fp:
+    service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE, ('.py', 'rb', imp.PY_SOURCE))
+except Exception as e:
+  traceback.print_exc()
+  print "Failed to load parent"
+
+class RangerKMSServiceAdvisor(service_advisor.ServiceAdvisor):
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(RangerKMSServiceAdvisor, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    # Always call these methods
+    self.modifyMastersWithMultipleInstances()
+    self.modifyCardinalitiesDict()
+    self.modifyHeapSizeProperties()
+    self.modifyNotValuableComponents()
+    self.modifyComponentsNotPreferableOnServer()
+    self.modifyComponentLayoutSchemes()
+
+  def modifyMastersWithMultipleInstances(self):
+    """
+    Modify the set of masters with multiple instances.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyCardinalitiesDict(self):
+    """
+    Modify the dictionary of cardinalities.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyHeapSizeProperties(self):
+    """
+    Modify the dictionary of heap size properties.
+    Must be overriden in child class.
+    """
+    pass
+
+  def modifyNotValuableComponents(self):
+    """
+    Modify the set of components whose host assignment is based on other services.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentsNotPreferableOnServer(self):
+    """
+    Modify the set of components that are not preferable on the server.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentLayoutSchemes(self):
+    """
+    Modify layout scheme dictionaries for components.
+    The scheme dictionary basically maps the number of hosts to
+    host index where component should exist.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def getServiceComponentLayoutValidations(self, services, hosts):
+    """
+    Get a list of errors.
+    Must be overriden in child class.
+    """
+
+    return []
+
+  def getServiceConfigurationRecommendations(self, configurations, clusterData, services, hosts):
+    """
+    Entry point.
+    Must be overriden in child class.
+    """
+    #Logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    recommender = RangerKMSRecommender()
+    recommender.recommendRangerKMSConfigurationsFromHDP23(configurations, clusterData, services, hosts)
+    recommender.recommendRangerKMSConfigurationsFromHDP25(configurations, clusterData, services, hosts)
+
+
+
+  def getServiceConfigurationsValidationItems(self, configurations, recommendedDefaults, services, hosts):
+    """
+    Entry point.
+    Validate configurations for the service. Return a list of errors.
+    The code for this function should be the same for each Service Advisor.
+    """
+    #Logger.info("Class: %s, Method: %s. Validating Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    validator = RangerKMSValidator()
+    # Calls the methods of the validator using arguments,
+    # method(siteProperties, siteRecommendations, configurations, services, hosts)
+    return validator.validateListOfConfigUsingMethod(configurations, recommendedDefaults, services, hosts, validator.validators)
+
+
+
+class RangerKMSRecommender(service_advisor.ServiceAdvisor):
+  """
+  RangerKMS Recommender suggests properties when adding the service for the first time or modifying configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(RangerKMSRecommender, self)
+    self.as_super.__init__(*args, **kwargs)
+
+
+  def recommendRangerKMSConfigurationsFromHDP23(self, configurations, clusterData, services, hosts):
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    putRangerKmsDbksProperty = self.putProperty(configurations, "dbks-site", services)
+    putRangerKmsProperty = self.putProperty(configurations, "kms-properties", services)
+    kmsEnvProperties = self.getSiteProperties(services['configurations'], 'kms-env')
+    putCoreSiteProperty = self.putProperty(configurations, "core-site", services)
+    putCoreSitePropertyAttribute = self.putPropertyAttribute(configurations, "core-site")
+    putRangerKmsAuditProperty = self.putProperty(configurations, "ranger-kms-audit", services)
+    security_enabled = self.isSecurityEnabled(services)
+    putRangerKmsSiteProperty = self.putProperty(configurations, "kms-site", services)
+    putRangerKmsSitePropertyAttribute = self.putPropertyAttribute(configurations, "kms-site")
+
+    if 'kms-properties' in services['configurations'] and ('DB_FLAVOR' in services['configurations']['kms-properties']['properties']):
+
+      rangerKmsDbFlavor = services['configurations']["kms-properties"]["properties"]["DB_FLAVOR"]
+
+      if ('db_host' in services['configurations']['kms-properties']['properties']) and ('db_name' in services['configurations']['kms-properties']['properties']):
+
+        rangerKmsDbHost =   services['configurations']["kms-properties"]["properties"]["db_host"]
+        rangerKmsDbName =   services['configurations']["kms-properties"]["properties"]["db_name"]
+
+        ranger_kms_db_url_dict = {
+          'MYSQL': {'ranger.ks.jpa.jdbc.driver': 'com.mysql.jdbc.Driver',
+                    'ranger.ks.jpa.jdbc.url': 'jdbc:mysql://' + self.getDBConnectionHostPort(rangerKmsDbFlavor, rangerKmsDbHost) + '/' + rangerKmsDbName},
+          'ORACLE': {'ranger.ks.jpa.jdbc.driver': 'oracle.jdbc.driver.OracleDriver',
+                     'ranger.ks.jpa.jdbc.url': 'jdbc:oracle:thin:@' + self.getOracleDBConnectionHostPort(rangerKmsDbFlavor, rangerKmsDbHost, rangerKmsDbName)},
+          'POSTGRES': {'ranger.ks.jpa.jdbc.driver': 'org.postgresql.Driver',
+                       'ranger.ks.jpa.jdbc.url': 'jdbc:postgresql://' + self.getDBConnectionHostPort(rangerKmsDbFlavor, rangerKmsDbHost) + '/' + rangerKmsDbName},
+          'MSSQL': {'ranger.ks.jpa.jdbc.driver': 'com.microsoft.sqlserver.jdbc.SQLServerDriver',
+                    'ranger.ks.jpa.jdbc.url': 'jdbc:sqlserver://' + self.getDBConnectionHostPort(rangerKmsDbFlavor, rangerKmsDbHost) + ';databaseName=' + rangerKmsDbName},
+          'SQLA': {'ranger.ks.jpa.jdbc.driver': 'sap.jdbc4.sqlanywhere.IDriver',
+                   'ranger.ks.jpa.jdbc.url': 'jdbc:sqlanywhere:host=' + self.getDBConnectionHostPort(rangerKmsDbFlavor, rangerKmsDbHost) + ';database=' + rangerKmsDbName}
+        }
+
+        rangerKmsDbProperties = ranger_kms_db_url_dict.get(rangerKmsDbFlavor, ranger_kms_db_url_dict['MYSQL'])
+        for key in rangerKmsDbProperties:
+          putRangerKmsDbksProperty(key, rangerKmsDbProperties.get(key))
+
+    if kmsEnvProperties and self.checkSiteProperties(kmsEnvProperties, 'kms_user') and 'KERBEROS' in servicesList:
+      kmsUser = kmsEnvProperties['kms_user']
+      kmsUserOld = self.getOldValue(services, 'kms-env', 'kms_user')
+      self.put_proxyuser_value(kmsUser, '*', is_groups=True, services=services, configurations=configurations, put_function=putCoreSiteProperty)
+      if kmsUserOld is not None and kmsUser != kmsUserOld:
+        putCoreSitePropertyAttribute("hadoop.proxyuser.{0}.groups".format(kmsUserOld), 'delete', 'true')
+        services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.groups".format(kmsUserOld)})
+        services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.groups".format(kmsUser)})
+
+    if "HDFS" in servicesList:
+      if 'core-site' in services['configurations'] and ('fs.defaultFS' in services['configurations']['core-site']['properties']):
+        default_fs = services['configurations']['core-site']['properties']['fs.defaultFS']
+        putRangerKmsAuditProperty('xasecure.audit.destination.hdfs.dir', '{0}/{1}/{2}'.format(default_fs,'ranger','audit'))
+
+    required_services = [{'service' : 'YARN', 'config-type': 'yarn-env', 'property-name': 'yarn_user', 'proxy-category': ['hosts', 'users', 'groups']},
+                         {'service' : 'SPARK', 'config-type': 'livy-env', 'property-name': 'livy_user', 'proxy-category': ['hosts', 'users', 'groups']}]
+
+    required_services_for_secure = [{'service' : 'HIVE', 'config-type': 'hive-env', 'property-name': 'hive_user', 'proxy-category': ['hosts', 'users']},
+                                    {'service' : 'OOZIE', 'config-type': 'oozie-env', 'property-name': 'oozie_user', 'proxy-category': ['hosts', 'users']}]
+
+    if security_enabled:
+      required_services.extend(required_services_for_secure)
+
+    # recommendations for kms proxy related properties
+    self.recommendKMSProxyUsers(configurations, services, hosts, required_services)
+
+    ambari_user = self.getAmbariUser(services)
+    if security_enabled:
+      # adding for ambari user
+      putRangerKmsSiteProperty('hadoop.kms.proxyuser.{0}.users'.format(ambari_user), '*')
+      putRangerKmsSiteProperty('hadoop.kms.proxyuser.{0}.hosts'.format(ambari_user), '*')
+      # adding for HTTP
+      putRangerKmsSiteProperty('hadoop.kms.proxyuser.HTTP.users', '*')
+      putRangerKmsSiteProperty('hadoop.kms.proxyuser.HTTP.hosts', '*')
+    else:
+      self.deleteKMSProxyUsers(configurations, services, hosts, required_services_for_secure)
+      # deleting ambari user proxy properties
+      putRangerKmsSitePropertyAttribute('hadoop.kms.proxyuser.{0}.hosts'.format(ambari_user), 'delete', 'true')
+      putRangerKmsSitePropertyAttribute('hadoop.kms.proxyuser.{0}.users'.format(ambari_user), 'delete', 'true')
+      # deleting HTTP proxy properties
+      putRangerKmsSitePropertyAttribute('hadoop.kms.proxyuser.HTTP.hosts', 'delete', 'true')
+      putRangerKmsSitePropertyAttribute('hadoop.kms.proxyuser.HTTP.users', 'delete', 'true')
+
+
+  def recommendRangerKMSConfigurationsFromHDP25(self, configurations, clusterData, services, hosts):
+
+    security_enabled = self.isSecurityEnabled(services)
+    required_services = [{'service' : 'RANGER', 'config-type': 'ranger-env', 'property-name': 'ranger_user', 'proxy-category': ['hosts', 'users', 'groups']}]
+
+    if security_enabled:
+      # recommendations for kms proxy related properties
+      self.recommendKMSProxyUsers(configurations, services, hosts, required_services)
+    else:
+      self.deleteKMSProxyUsers(configurations, services, hosts, required_services)
+
+
+
+  def recommendRangerKMSConfigurations(self, configurations, clusterData, services, hosts):
+    putRangerKmsEnvProperty = self.putProperty(configurations, "kms-env", services)
+
+    ranger_kms_ssl_enabled = False
+    ranger_kms_ssl_port = "9393"
+    if 'ranger-kms-site' in services['configurations'] and 'ranger.service.https.attrib.ssl.enabled' in services['configurations']['ranger-kms-site']['properties']:
+      ranger_kms_ssl_enabled = services['configurations']['ranger-kms-site']['properties']['ranger.service.https.attrib.ssl.enabled'].lower() == "true"
+
+    if 'ranger-kms-site' in services['configurations'] and 'ranger.service.https.port' in services['configurations']['ranger-kms-site']['properties']:
+      ranger_kms_ssl_port = services['configurations']['ranger-kms-site']['properties']['ranger.service.https.port']
+
+    if ranger_kms_ssl_enabled:
+      putRangerKmsEnvProperty("kms_port", ranger_kms_ssl_port)
+    else:
+      putRangerKmsEnvProperty("kms_port", "9292")
+
+
+
+class RangerKMSValidator(service_advisor.ServiceAdvisor):
+  """
+  RangerKMS Validator checks the correctness of properties whenever the service is first added or the user attempts to
+  change configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(RangerKMSValidator, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    self.validators = []
+
+
+
+
+
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/themes/theme_version_1.json b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/themes/theme_version_1.json
new file mode 100644
index 0000000..c08a56c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/themes/theme_version_1.json
@@ -0,0 +1,303 @@
+{
+  "name": "default",
+  "description": "Default theme for Ranger KMS service",
+  "configuration": {
+    "layouts": [
+    {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "db_settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-columns": "2",
+              "tab-rows": "2",
+              "sections": [
+                {
+                  "name": "section-db-settings",
+                  "display-name": "",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "4",
+                  "column-span": "2",
+                  "section-columns": "2",
+                  "section-rows": "4",
+                  "subsections": [
+                    {
+                      "name": "subsection-kms-db-row1-col1",
+                      "display-name": "Ranger KMS DB",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-kms-db-row1-col2",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-kms-create-db-user-row2-col",
+                      "display-name": "Setup Database and Database User",
+                      "row-index": "1",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "2"
+                    },
+                    {
+                      "name": "subsection-kms-db-root-user-row3-col1",
+                      "display-name": "Ranger KMS Root DB",
+                      "row-index": "2",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1",
+                      "depends-on": [
+                        {
+                          "configs":[
+                            "kms-env/create_db_user"
+                          ],
+                          "if": "${kms-env/create_db_user}",
+                          "then": {
+                            "property_value_attributes": {
+                              "visible": true
+                            }
+                          },
+                          "else": {
+                            "property_value_attributes": {
+                              "visible": false
+                            }
+                          }
+                        }
+                      ]
+                    },
+                    {
+                      "name": "subsection-kms-db-root-user-row3-col2",
+                      "row-index": "2",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1",
+                      "depends-on": [
+                        {
+                          "configs":[
+                            "kms-env/create_db_user"
+                          ],
+                          "if": "${kms-env/create_db_user}",
+                          "then": {
+                            "property_value_attributes": {
+                              "visible": true
+                            }
+                          },
+                          "else": {
+                            "property_value_attributes": {
+                              "visible": false
+                            }
+                          }
+                        }
+                      ]
+                    },
+                    {
+                      "name": "subsection-kms-master-row4-col",
+                      "display-name": "KMS Master Secret Password",
+                      "row-index": "3",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "2"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "kms-properties/DB_FLAVOR",
+          "subsection-name": "subsection-kms-db-row1-col1"
+        },
+        {
+          "config": "kms-properties/db_name",
+          "subsection-name": "subsection-kms-db-row1-col1"
+        },
+        {
+          "config": "dbks-site/ranger.ks.jpa.jdbc.url",
+          "subsection-name": "subsection-kms-db-row1-col1"
+        },
+        {
+          "config": "kms-properties/db_user",
+          "subsection-name": "subsection-kms-db-row1-col1"
+        },
+        {
+          "config": "kms-properties/db_host",
+          "subsection-name": "subsection-kms-db-row1-col2"
+        },
+        {
+          "config": "kms-properties/SQL_CONNECTOR_JAR",
+          "subsection-name": "subsection-kms-db-row1-col2",
+          "depends-on" : [
+            {
+              "configs":[
+                "kms-properties/DB_FLAVOR"
+              ],
+              "if": "${kms-properties/DB_FLAVOR} === SQLA",
+              "then": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "dbks-site/ranger.ks.jpa.jdbc.driver",
+          "subsection-name": "subsection-kms-db-row1-col2"
+        },
+        {
+          "config": "kms-properties/db_password",
+          "subsection-name": "subsection-kms-db-row1-col2"
+        },
+        {
+          "config": "kms-properties/db_root_user",
+          "subsection-name": "subsection-kms-db-root-user-row3-col1"
+        },
+        {
+          "config": "kms-properties/db_root_password",
+          "subsection-name": "subsection-kms-db-root-user-row3-col2"
+        },
+        {
+          "config": "kms-properties/KMS_MASTER_KEY_PASSWD",
+          "subsection-name": "subsection-kms-master-row4-col"
+        },
+        {
+          "config" : "kms-env/create_db_user",
+          "subsection-name": "subsection-kms-create-db-user-row2-col"
+        },
+        {
+          "config": "kms-env/test_db_kms_connection",
+          "subsection-name": "subsection-kms-create-db-user-row2-col",
+          "property_value_attributes": {
+            "ui_only_property": true
+          },
+          "depends-on": [
+            {
+              "configs":[
+                "kms-env/create_db_user"
+              ],
+              "if": "${kms-env/create_db_user}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "kms-properties/DB_FLAVOR",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "kms-properties/db_user",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "kms-properties/db_name",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "kms-properties/SQL_CONNECTOR_JAR",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "kms-properties/db_root_user",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "kms-properties/db_host",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "kms-properties/db_password",
+        "widget": {
+          "type": "password"
+        }
+      },
+      {
+        "config": "kms-properties/db_root_password",
+        "widget": {
+          "type": "password"
+        }
+      },
+      {
+        "config": "kms-properties/KMS_MASTER_KEY_PASSWD",
+        "widget": {
+          "type": "password"
+        }
+      },
+      {
+        "config": "kms-env/create_db_user",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "kms-env/test_db_kms_connection",
+        "widget": {
+          "type": "test-db-connection",
+          "display-name": "Test Connection",
+          "required-properties": {
+            "jdbc.driver.class": "dbks-site/ranger.ks.jpa.jdbc.driver",
+            "jdbc.driver.url": "dbks-site/ranger.ks.jpa.jdbc.url",
+            "db.connection.source.host": "ranger_kms-site/ranger_kms_server_hosts",
+            "db.type": "kms-properties/DB_FLAVOR",
+            "db.connection.destination.host": "kms-properties/db_host",
+            "db.connection.user": "kms-properties/db_user",
+            "db.connection.password": "kms-properties/db_password"
+          }
+        }
+      },
+      {
+        "config": "dbks-site/ranger.ks.jpa.jdbc.driver",
+        "widget" : {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "dbks-site/ranger.ks.jpa.jdbc.url",
+        "widget": {
+          "type": "text-field"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/themes/theme_version_2.json b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/themes/theme_version_2.json
new file mode 100644
index 0000000..be50dad
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.3.0/themes/theme_version_2.json
@@ -0,0 +1,124 @@
+{
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "kms_hsm",
+            "display-name": "KMS HSM",
+            "layout": {
+              "tab-columns": "1",
+              "tab-rows": "1",
+              "sections": [
+                {
+                  "name": "section-kms-hms",
+                  "display-name": "",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "2",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "2",
+                  "subsections": [
+                    {
+                      "name": "subsection-kms-hsm-row1-col1",
+                      "display-name": "Ranger KMS HSM Enabled",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-kms-hsm-row2-col1",
+                      "display-name": "Configuration Settings",
+                      "row-index": "1",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1",
+                      "depends-on": [
+                        {
+                          "configs": [
+                            "dbks-site/ranger.ks.hsm.enabled"
+                          ],
+                          "if": "${dbks-site/ranger.ks.hsm.enabled}",
+                          "then": {
+                            "property_value_attributes": {
+                              "visible": true
+                            }
+                          },
+                          "else": {
+                            "property_value_attributes": {
+                              "visible": false
+                            }
+                          }
+                        }
+                      ]
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "dbks-site/ranger.ks.hsm.enabled",
+          "subsection-name": "subsection-kms-hsm-row1-col1"
+        },
+        {
+          "config": "dbks-site/ranger.ks.hsm.type",
+          "subsection-name": "subsection-kms-hsm-row2-col1"
+        },
+        {
+          "config": "dbks-site/ranger.ks.hsm.partition.name",
+          "subsection-name": "subsection-kms-hsm-row2-col1"
+        },
+        {
+          "config": "dbks-site/ranger.ks.hsm.partition.password.alias",
+          "subsection-name": "subsection-kms-hsm-row2-col1"
+        },
+        {
+          "config": "kms-env/hsm_partition_password",
+          "subsection-name": "subsection-kms-hsm-row2-col1"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "dbks-site/ranger.ks.hsm.enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "dbks-site/ranger.ks.hsm.type",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "dbks-site/ranger.ks.hsm.partition.name",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "dbks-site/ranger.ks.hsm.partition.password.alias",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "kms-env/hsm_partition_password",
+        "widget": {
+          "type": "password"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/alerts.json b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/alerts.json
index 0e38f16..5035ecf 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/alerts.json
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/alerts.json
@@ -27,6 +27,54 @@
           }
         }
       }
+    ],
+    "LIVY_SERVER": [
+      {
+        "name": "livy_server_status",
+        "label": "Spark Livy Server",
+        "description": "This host-level alert is triggered if the Livy Server cannot be determined to be up.",
+        "interval": 1,
+        "scope": "HOST",
+        "source": {
+          "type": "SCRIPT",
+          "path": "SPARK/1.2.1/package/scripts/alerts/alert_spark_livy_port.py",
+          "parameters": [
+            {
+              "name": "check.command.timeout",
+              "display_name": "Command Timeout",
+              "value": 60.0,
+              "type": "NUMERIC",
+              "description": "The maximum time before check command will be killed by timeout",
+              "units": "seconds",
+              "threshold": "CRITICAL"
+            }
+          ]
+        }
+      }
+    ],
+    "SPARK_THRIFTSERVER": [
+      {
+        "name": "spark_thriftserver_status",
+        "label": "Spark Thrift Server",
+        "description": "This host-level alert is triggered if the Spark Thrift Server cannot be determined to be up.",
+        "interval": 1,
+        "scope": "HOST",
+        "source": {
+          "type": "SCRIPT",
+          "path": "SPARK/1.2.1/package/scripts/alerts/alert_spark_thrift_port.py",
+          "parameters": [
+            {
+              "name": "check.command.timeout",
+              "display_name": "Command Timeout",
+              "value": 60.0,
+              "type": "NUMERIC",
+              "description": "The maximum time before check command will be killed by timeout",
+              "units": "seconds",
+              "threshold": "CRITICAL"
+            }
+          ]
+        }
+      }
     ]
   }
 }
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/configuration/spark-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/configuration/spark-logsearch-conf.xml
deleted file mode 100644
index 63201ef..0000000
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/configuration/spark-logsearch-conf.xml
+++ /dev/null
@@ -1,98 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>Spark</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>SPARK_JOBHISTORYSERVER:spark_jobhistory_server;SPARK_THRIFTSERVER:spark_thriftserver;LIVY_SERVER:livy_server</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder Config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-   "input":[
-      {
-       "type":"spark_jobhistory_server",
-       "rowtype":"service",
-       "path":"{{default('/configurations/spark-env/spark_log_dir', '/var/log/spark')}}/spark-*-org.apache.spark.deploy.history.HistoryServer*.out"
-     },
-     {
-       "type":"spark_thriftserver",
-       "rowtype":"service",
-       "path":"{{default('/configurations/spark-env/spark_log_dir', '/var/log/spark')}}/spark-*-org.apache.spark.sql.hive.thriftserver.HiveThriftServer2*.out"
-     },
-     {
-       "type":"livy_server",
-       "rowtype":"service",
-       "path":"{{default('/configurations/livy-env/livy_log_dir', '/var/log/livy')}}/livy-livy-server.out"
-     }
-   ],
-   "filter":[
-       {
-          "filter":"grok",
-          "conditions":{
-            "fields":{
-              "type":[
-                "spark_jobhistory_server",
-                "spark_thriftserver",
-                "livy_server"
-              ]
-             }
-          },
-          "log4j_format":"",
-          "multiline_pattern":"^(%{SPARK_DATESTAMP:logtime}%{SPACE}%{LOGLEVEL:level})",
-          "message_pattern":"(?m)^%{SPARK_DATESTAMP:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVAFILE:file}:%{SPACE}%{GREEDYDATA:log_message}",
-          "post_map_values":{
-            "logtime":{
-              "map_date":{
-                "target_date_pattern":"yy/MM/dd HH:mm:ss"
-              }
-             },
-            "level":{
-              "map_fieldvalue":{
-                "pre_value":"WARNING",
-                "post_value":"WARN"
-              }
-             }
-           }
-      }
-   ]
-}
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/alerts/alert_spark_livy_port.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/alerts/alert_spark_livy_port.py
new file mode 100644
index 0000000..f3a63b2
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/alerts/alert_spark_livy_port.py
@@ -0,0 +1,148 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import time
+import logging
+import traceback
+import socket
+from resource_management import *
+from resource_management.libraries.functions import format
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from resource_management.libraries.script.script import Script
+from resource_management.core.resources import Execute
+from resource_management.core.logger import Logger
+from resource_management.core import global_lock
+from resource_management.libraries.functions import get_kinit_path
+
+
+OK_MESSAGE = "TCP OK - {0:.3f}s response on port {1}"
+CRITICAL_MESSAGE = "Connection failed on host {0}:{1} ({2})"
+
+logger = logging.getLogger('ambari_alerts')
+
+LIVY_SERVER_PORT_KEY = '{{livy-conf/livy.server.port}}'
+
+LIVYUSER_DEFAULT = 'livy'
+
+CHECK_COMMAND_TIMEOUT_KEY = 'check.command.timeout'
+CHECK_COMMAND_TIMEOUT_DEFAULT = 60.0
+
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+SMOKEUSER_KEYTAB_KEY = '{{cluster-env/smokeuser_keytab}}'
+SMOKEUSER_PRINCIPAL_KEY = '{{cluster-env/smokeuser_principal_name}}'
+SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
+LIVY_SSL_ENABLED_KEY = '{{livy-conf/livy.keystore}}'
+
+# The configured Kerberos executable search paths, if any
+KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def get_tokens():
+    """
+    Returns a tuple of tokens in the format {{site/property}} that will be used
+    to build the dictionary passed into execute
+    """
+    return (LIVY_SERVER_PORT_KEY,LIVYUSER_DEFAULT,SECURITY_ENABLED_KEY,SMOKEUSER_KEYTAB_KEY,SMOKEUSER_PRINCIPAL_KEY,SMOKEUSER_KEY,LIVY_SSL_ENABLED_KEY)
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def execute(configurations={}, parameters={}, host_name=None):
+    """
+    Returns a tuple containing the result code and a pre-formatted result label
+
+    Keyword arguments:
+    configurations (dictionary): a mapping of configuration key to value
+    parameters (dictionary): a mapping of script parameter key to value
+    host_name (string): the name of this host where the alert is running
+    """
+
+    if configurations is None:
+        return ('UNKNOWN', ['There were no configurations supplied to the script.'])
+
+    LIVY_PORT_DEFAULT = 8998
+
+    port = LIVY_PORT_DEFAULT
+    if LIVY_SERVER_PORT_KEY in configurations:
+        port = int(configurations[LIVY_SERVER_PORT_KEY])
+
+    if host_name is None:
+        host_name = socket.getfqdn()
+
+    livyuser = configurations[SMOKEUSER_KEY]
+
+    security_enabled = False
+    if SECURITY_ENABLED_KEY in configurations:
+        security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
+
+    smokeuser_kerberos_keytab = None
+    if SMOKEUSER_KEYTAB_KEY in configurations:
+        smokeuser_kerberos_keytab = configurations[SMOKEUSER_KEYTAB_KEY]
+
+    if host_name is None:
+        host_name = socket.getfqdn()
+
+    smokeuser_principal = None
+    if SMOKEUSER_PRINCIPAL_KEY in configurations:
+        smokeuser_principal = configurations[SMOKEUSER_PRINCIPAL_KEY]
+        smokeuser_principal = smokeuser_principal.replace('_HOST',host_name.lower())
+
+    # Get the configured Kerberos executable search paths, if any
+    if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
+        kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
+    else:
+        kerberos_executable_search_paths = None
+
+    kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
+
+    if security_enabled:
+        kinitcmd = format("{kinit_path_local} -kt {smokeuser_kerberos_keytab} {smokeuser_principal}; ")
+        # prevent concurrent kinit
+        kinit_lock = global_lock.get_lock(global_lock.LOCK_TYPE_KERBEROS)
+        kinit_lock.acquire()
+        try:
+            Execute(kinitcmd, user=livyuser)
+        finally:
+            kinit_lock.release()
+
+    http_scheme = 'https' if LIVY_SSL_ENABLED_KEY in configurations else 'http'
+    result_code = None
+    try:
+        start_time = time.time()
+        try:
+            livy_livyserver_host = str(host_name)
+
+            livy_cmd = format("curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k {http_scheme}://{livy_livyserver_host}:{port}/sessions | grep 200 ")
+
+            Execute(livy_cmd,
+                    tries=3,
+                    try_sleep=1,
+                    logoutput=True,
+                    user=livyuser
+                    )
+
+            total_time = time.time() - start_time
+            result_code = 'OK'
+            label = OK_MESSAGE.format(total_time, port)
+        except:
+            result_code = 'CRITICAL'
+            label = CRITICAL_MESSAGE.format(host_name, port, traceback.format_exc())
+    except:
+        label = traceback.format_exc()
+        result_code = 'UNKNOWN'
+
+    return (result_code, [label])
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/alerts/alert_spark_thrift_port.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/alerts/alert_spark_thrift_port.py
new file mode 100644
index 0000000..503360e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/alerts/alert_spark_thrift_port.py
@@ -0,0 +1,151 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import os
+import socket
+import time
+import logging
+import traceback
+from resource_management.libraries.functions import format
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.core.resources import Execute
+from resource_management.core import global_lock
+
+stack_root = Script.get_stack_root()
+
+OK_MESSAGE = "TCP OK - {0:.3f}s response on port {1}"
+CRITICAL_MESSAGE = "Connection failed on host {0}:{1} ({2})"
+
+HIVE_SERVER_THRIFT_PORT_KEY = '{{spark-hive-site-override/hive.server2.thrift.port}}'
+HIVE_SERVER_TRANSPORT_MODE_KEY = '{{spark-hive-site-override/hive.server2.transport.mode}}'
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+
+HIVE_SERVER2_AUTHENTICATION_KEY = '{{hive-site/hive.server2.authentication}}'
+HIVE_SERVER2_KERBEROS_KEYTAB = '{{hive-site/hive.server2.authentication.kerberos.keytab}}'
+HIVE_SERVER2_PRINCIPAL_KEY = '{{hive-site/hive.server2.authentication.kerberos.principal}}'
+
+# The configured Kerberos executable search paths, if any
+KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
+
+THRIFT_PORT_DEFAULT = 10015
+HIVE_SERVER_TRANSPORT_MODE_DEFAULT = 'binary'
+
+HIVEUSER_DEFAULT = 'hive'
+
+CHECK_COMMAND_TIMEOUT_KEY = 'check.command.timeout'
+CHECK_COMMAND_TIMEOUT_DEFAULT = 60.0
+
+logger = logging.getLogger('ambari_alerts')
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def get_tokens():
+    """
+    Returns a tuple of tokens in the format {{site/property}} that will be used
+    to build the dictionary passed into execute
+    """
+    return (HIVE_SERVER_THRIFT_PORT_KEY, HIVE_SERVER_TRANSPORT_MODE_KEY, SECURITY_ENABLED_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY,
+            HIVEUSER_DEFAULT, HIVE_SERVER2_KERBEROS_KEYTAB, HIVE_SERVER2_PRINCIPAL_KEY)
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def execute(configurations={}, parameters={}, host_name=None):
+    """
+    Returns a tuple containing the result code and a pre-formatted result label
+
+    Keyword arguments:
+    configurations (dictionary): a mapping of configuration key to value
+    parameters (dictionary): a mapping of script parameter key to value
+    host_name (string): the name of this host where the alert is running
+    """
+
+    spark_home = os.path.join(stack_root, "current", 'spark-client')
+
+    if configurations is None:
+        return ('UNKNOWN', ['There were no configurations supplied to the script.'])
+
+    transport_mode = HIVE_SERVER_TRANSPORT_MODE_DEFAULT
+    if HIVE_SERVER_TRANSPORT_MODE_KEY in configurations:
+        transport_mode = configurations[HIVE_SERVER_TRANSPORT_MODE_KEY]
+
+    port = THRIFT_PORT_DEFAULT
+    if transport_mode.lower() == 'binary' and HIVE_SERVER_THRIFT_PORT_KEY in configurations:
+        port = int(configurations[HIVE_SERVER_THRIFT_PORT_KEY])
+
+    security_enabled = False
+    if SECURITY_ENABLED_KEY in configurations:
+        security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
+
+    hive_kerberos_keytab = None
+    if HIVE_SERVER2_KERBEROS_KEYTAB in configurations:
+        hive_kerberos_keytab = configurations[HIVE_SERVER2_KERBEROS_KEYTAB]
+
+    if host_name is None:
+        host_name = socket.getfqdn()
+
+    hive_principal = None
+    if HIVE_SERVER2_PRINCIPAL_KEY in configurations:
+        hive_principal = configurations[HIVE_SERVER2_PRINCIPAL_KEY]
+        hive_principal = hive_principal.replace('_HOST',host_name.lower())
+
+    # Get the configured Kerberos executable search paths, if any
+    if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
+        kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
+    else:
+        kerberos_executable_search_paths = None
+
+    kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
+
+    hiveruser = HIVEUSER_DEFAULT
+
+    if security_enabled:
+        kinitcmd = format("{kinit_path_local} -kt {hive_kerberos_keytab} {hive_principal}; ")
+        # prevent concurrent kinit
+        kinit_lock = global_lock.get_lock(global_lock.LOCK_TYPE_KERBEROS)
+        kinit_lock.acquire()
+        try:
+            Execute(kinitcmd, user=hiveruser)
+        finally:
+            kinit_lock.release()
+
+    result_code = None
+    try:
+        if host_name is None:
+            host_name = socket.getfqdn()
+
+        beeline_url = ['jdbc:hive2://{host_name}:{port}/', "transportMode={transport_mode}"]
+        # append url according to used transport
+
+        beeline_cmd = os.path.join(spark_home, "bin", "beeline")
+        cmd = "! beeline -u %s  -e '' 2>&1| awk '{print}'|grep -i -e 'Connection refused' -e 'Invalid URL'" % \
+         (format(" ".join(beeline_url)))
+
+        start_time = time.time()
+        try:
+            Execute(cmd, user=hiveruser, path=[beeline_cmd], timeout=CHECK_COMMAND_TIMEOUT_DEFAULT)
+            total_time = time.time() - start_time
+            result_code = 'OK'
+            label = OK_MESSAGE.format(total_time, port)
+        except:
+            result_code = 'CRITICAL'
+            label = CRITICAL_MESSAGE.format(host_name, port, traceback.format_exc())
+    except:
+        label = traceback.format_exc()
+        result_code = 'UNKNOWN'
+
+    return (result_code, [label])
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
index 6a59caf..74fd76a 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
@@ -211,7 +211,7 @@
 # livy is only supported from HDP 2.5
 has_livyserver = False
 
-if stack_version_formatted and check_stack_feature(StackFeature.SPARK_LIVY, stack_version_formatted):
+if stack_version_formatted and check_stack_feature(StackFeature.SPARK_LIVY, stack_version_formatted) and "livy-env" in config['configurations']:
   livy_component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "LIVY_SERVER")
   livy_conf = format("{stack_root}/current/{livy_component_directory}/conf")
   livy_log_dir = config['configurations']['livy-env']['livy_log_dir']
@@ -242,6 +242,7 @@
     livy_kerberos_principal = config['configurations']['livy-conf']['livy.server.launch.kerberos.principal']
 
   livy_livyserver_hosts = default("/clusterHostInfo/livy_server_hosts", [])
+  livy_http_scheme = 'https' if 'livy.keystore' in config['configurations']['livy-conf'] else 'http'
 
   # ats 1.5 properties
   entity_groupfs_active_dir = config['configurations']['yarn-site']['yarn.timeline-service.entity-group-fs-store.active-dir']
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/service_check.py
index 9d74779..4699b2e 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/service_check.py
@@ -43,7 +43,7 @@
       live_livyserver_host = "";
       for livyserver_host in params.livy_livyserver_hosts:
         try:
-          Execute(format("curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k http://{livyserver_host}:{livy_livyserver_port}/sessions | grep 200"),
+          Execute(format("curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k {livy_http_scheme}://{livyserver_host}:{livy_livyserver_port}/sessions | grep 200"),
               tries=3,
               try_sleep=1,
               logoutput=True,
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/templates/input.config-spark.json.j2 b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/templates/input.config-spark.json.j2
new file mode 100644
index 0000000..7eea751
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/templates/input.config-spark.json.j2
@@ -0,0 +1,66 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"spark_jobhistory_server",
+      "rowtype":"service",
+      "path":"{{default('/configurations/spark-env/spark_log_dir', '/var/log/spark')}}/spark-*-org.apache.spark.deploy.history.HistoryServer*.out"
+    },
+    {
+      "type":"spark_thriftserver",
+      "rowtype":"service",
+      "path":"{{default('/configurations/spark-env/spark_log_dir', '/var/log/spark')}}/spark-*-org.apache.spark.sql.hive.thriftserver.HiveThriftServer2*.out"
+    },
+    {
+      "type":"livy_server",
+      "rowtype":"service",
+      "path":"{{default('/configurations/livy-env/livy_log_dir', '/var/log/livy')}}/livy-livy-server.out"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "spark_jobhistory_server",
+            "spark_thriftserver",
+            "livy_server"
+          ]
+        }
+      },
+      "log4j_format":"",
+      "multiline_pattern":"^(%{SPARK_DATESTAMP:logtime}%{SPACE}%{LOGLEVEL:level})",
+      "message_pattern":"(?m)^%{SPARK_DATESTAMP:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVAFILE:file}:%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yy/MM/dd HH:mm:ss"
+          }
+        },
+        "level":{
+          "map_fieldvalue":{
+            "pre_value":"WARNING",
+            "post_value":"WARN"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/quicklinks/quicklinks.json
index 685665a..5557c53 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/quicklinks/quicklinks.json
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/quicklinks/quicklinks.json
@@ -12,6 +12,7 @@
         "name": "spark_history_server_ui",
         "label": "Spark History Server UI",
         "requires_user_name": "false",
+        "component_name": "SPARK_JOBHISTORYSERVER",
         "url": "%@://%@:%@",
         "port":{
           "http_property": "spark.history.ui.port",
diff --git a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/metainfo.xml
index 25a6855..d2e2d8b 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/metainfo.xml
@@ -240,18 +240,19 @@
         <service>HIVE</service>
       </requiredServices>
 
+      <!-- TODO, change these to "spark" and "livy" after RPM switches the name. -->
       <osSpecifics>
         <osSpecific>
           <osFamily>redhat7,amazon2015,redhat6,suse11,suse12</osFamily>
           <packages>
             <package>
-              <name>spark_${stack_version}</name>
+              <name>spark2_${stack_version}</name>
             </package>
             <package>
-              <name>spark_${stack_version}-python</name>
+              <name>spark2_${stack_version}-python</name>
             </package>
             <package>
-              <name>livy_${stack_version}</name>
+              <name>livy2_${stack_version}</name>
             </package>
           </packages>
         </osSpecific>
@@ -259,13 +260,13 @@
           <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
           <packages>
             <package>
-              <name>spark-${stack_version}</name>
+              <name>spark2-${stack_version}</name>
             </package>
             <package>
-              <name>spark-${stack_version}-python</name>
+              <name>spark2-${stack_version}-python</name>
             </package>
             <package>
-              <name>livy-${stack_version}</name>
+              <name>livy2-${stack_version}</name>
             </package>
           </packages>
         </osSpecific>
diff --git a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/job_history_server.py b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/job_history_server.py
new file mode 100644
index 0000000..3937c88
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/job_history_server.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+import os
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+from setup_spark import *
+from spark_service import spark_service
+
+
+class JobHistoryServer(Script):
+
+  def install(self, env):
+    import params
+    env.set_params(params)
+    
+    self.install_packages(env)
+    
+  def configure(self, env, upgrade_type=None, config_dir=None):
+    import params
+    env.set_params(params)
+    
+    setup_spark(env, 'server', upgrade_type=upgrade_type, action = 'config')
+    
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    
+    self.configure(env)
+    spark_service('jobhistoryserver', upgrade_type=upgrade_type, action='start')
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    
+    spark_service('jobhistoryserver', upgrade_type=upgrade_type, action='stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+
+    check_process_status(status_params.spark_history_server_pid_file)
+    
+
+  def get_component_name(self):
+    # TODO, change to "spark" after RPM switches the name
+    return "spark2-historyserver"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      Logger.info("Executing Spark Job History Server Stack Upgrade pre-restart")
+      # TODO, change to "spark" after RPM switches the name
+      conf_select.select(params.stack_name, "spark2", params.version)
+      stack_select.select("spark2-historyserver", params.version)
+
+      # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
+      # need to copy the tarball, otherwise, copy it.
+      if params.version and check_stack_feature(StackFeature.TEZ_FOR_SPARK, params.version):
+        resource_created = copy_to_hdfs(
+          "tez",
+          params.user_group,
+          params.hdfs_user,
+          skip=params.sysprep_skip_copy_tarballs_hdfs)
+        if resource_created:
+          params.HdfsResource(None, action="execute")
+          
+  def get_log_folder(self):
+    import params
+    return params.spark_log_dir
+  
+  def get_user(self):
+    import params
+    return params.spark_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.spark_history_server_pid_file]
+
+if __name__ == "__main__":
+  JobHistoryServer().execute()
diff --git a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/livy_server.py b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/livy_server.py
new file mode 100644
index 0000000..269c97d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/livy_server.py
@@ -0,0 +1,151 @@
+#!/usr/bin/python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
+from resource_management.libraries.providers.hdfs_resource import HdfsResourceProvider
+from resource_management import is_empty
+from resource_management import shell
+from resource_management.libraries.functions.decorator import retry
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import conf_select, stack_select
+
+from livy_service import livy_service
+from setup_livy import setup_livy
+
+class LivyServer(Script):
+
+  def install(self, env):
+    import params
+    env.set_params(params)
+
+    self.install_packages(env)
+
+  def configure(self, env, upgrade_type=None, config_dir=None):
+    import params
+    env.set_params(params)
+
+    setup_livy(env, 'server', upgrade_type=upgrade_type, action = 'config')
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.has_ats and params.has_livyserver:
+      Logger.info("Verifying DFS directories where ATS stores time line data for active and completed applications.")
+      self.wait_for_dfs_directories_created([params.entity_groupfs_store_dir, params.entity_groupfs_active_dir])
+
+    self.configure(env)
+    livy_service('server', upgrade_type=upgrade_type, action='start')
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    livy_service('server', upgrade_type=upgrade_type, action='stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+
+    check_process_status(status_params.livy_server_pid_file)
+
+  #  TODO move out and compose with similar method in resourcemanager.py
+  def wait_for_dfs_directories_created(self, dirs):
+    import params
+
+    ignored_dfs_dirs = HdfsResourceProvider.get_ignored_resources_list(params.hdfs_resource_ignore_file)
+
+    if params.security_enabled:
+      Execute(format("{kinit_path_local} -kt {livy_kerberos_keytab} {livy_principal}"),
+              user=params.livy_user
+              )
+      Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
+              user=params.hdfs_user
+              )
+
+    for dir_path in dirs:
+        self.wait_for_dfs_directory_created(dir_path, ignored_dfs_dirs)
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.livy_server_pid_file]
+
+
+  @retry(times=8, sleep_time=20, backoff_factor=1, err_class=Fail)
+  def wait_for_dfs_directory_created(self, dir_path, ignored_dfs_dirs):
+    import params
+
+    if not is_empty(dir_path):
+      dir_path = HdfsResourceProvider.parse_path(dir_path)
+
+      if dir_path in ignored_dfs_dirs:
+        Logger.info("Skipping DFS directory '" + dir_path + "' as it's marked to be ignored.")
+        return
+
+      Logger.info("Verifying if DFS directory '" + dir_path + "' exists.")
+
+      dir_exists = None
+
+      if WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
+        # check with webhdfs is much faster than executing hdfs dfs -test
+        util = WebHDFSUtil(params.hdfs_site, params.hdfs_user, params.security_enabled)
+        list_status = util.run_command(dir_path, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False)
+        dir_exists = ('FileStatus' in list_status)
+      else:
+        # have to do time expensive hdfs dfs -d check.
+        dfs_ret_code = shell.call(format("hdfs --config {hadoop_conf_dir} dfs -test -d " + dir_path), user=params.livy_user)[0]
+        dir_exists = not dfs_ret_code #dfs -test -d returns 0 in case the dir exists
+
+      if not dir_exists:
+        raise Fail("DFS directory '" + dir_path + "' does not exist !")
+      else:
+        Logger.info("DFS directory '" + dir_path + "' exists.")
+
+  def get_component_name(self):
+    # TODO, change to "livy" after RPM switches the name
+    return "livy2-server"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      Logger.info("Executing Livy Server Stack Upgrade pre-restart")
+      # TODO, change to "spark" and "livy" after RPM switches the name
+      conf_select.select(params.stack_name, "spark2", params.version)
+      stack_select.select("livy2-server", params.version)
+
+  def get_log_folder(self):
+    import params
+    return params.livy_log_dir
+
+  def get_user(self):
+    import params
+    return params.livy_user
+if __name__ == "__main__":
+    LivyServer().execute()
+
diff --git a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/livy_service.py b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/livy_service.py
similarity index 100%
rename from ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/livy_service.py
rename to ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/livy_service.py
diff --git a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/params.py
new file mode 100644
index 0000000..1d36a75
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/params.py
@@ -0,0 +1,268 @@
+#!/usr/bin/python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import socket
+
+import status_params
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.copy_tarball import get_sysprep_skip_copy_tarballs_hdfs
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.script.script import Script
+
+# a map of the Ambari role to the component name
+# for use with <stack-root>/current/<component>
+# TODO, change to "spark" and "livy" after RPM switches the name
+SERVER_ROLE_DIRECTORY_MAP = {
+  'SPARK_JOBHISTORYSERVER' : 'spark2-historyserver',
+  'SPARK_CLIENT' : 'spark2-client',
+  'SPARK_THRIFTSERVER' : 'spark2-thriftserver',
+  'LIVY_SERVER' : 'livy2-server',
+  'LIVY_CLIENT' : 'livy2-client'
+
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "SPARK_CLIENT")
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+stack_name = status_params.stack_name
+stack_root = Script.get_stack_root()
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+sysprep_skip_copy_tarballs_hdfs = get_sysprep_skip_copy_tarballs_hdfs()
+
+# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
+version = default("/commandParams/version", None)
+
+# TODO, change to "spark" after RPM switches the name
+spark_conf = '/etc/spark2/conf'
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+
+if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+  hadoop_home = stack_select.get_hadoop_dir("home")
+  spark_conf = format("{stack_root}/current/{component_directory}/conf")
+  spark_log_dir = config['configurations']['spark-env']['spark_log_dir']
+  spark_pid_dir = status_params.spark_pid_dir
+  spark_home = format("{stack_root}/current/{component_directory}")
+
+spark_daemon_memory = config['configurations']['spark-env']['spark_daemon_memory']
+spark_thrift_server_conf_file = spark_conf + "/spark-thrift-sparkconf.conf"
+java_home = config['hostLevelParams']['java_home']
+
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+user_group = config['configurations']['cluster-env']['user_group']
+
+spark_user = status_params.spark_user
+hive_user = status_params.hive_user
+spark_group = status_params.spark_group
+user_group = status_params.user_group
+spark_hdfs_user_dir = format("/user/{spark_user}")
+spark_history_dir = default('/configurations/spark-defaults/spark.history.fs.logDirectory', "hdfs:///spark-history")
+
+spark_history_server_pid_file = status_params.spark_history_server_pid_file
+spark_thrift_server_pid_file = status_params.spark_thrift_server_pid_file
+
+spark_history_server_start = format("{spark_home}/sbin/start-history-server.sh")
+spark_history_server_stop = format("{spark_home}/sbin/stop-history-server.sh")
+
+spark_thrift_server_start = format("{spark_home}/sbin/start-thriftserver.sh")
+spark_thrift_server_stop = format("{spark_home}/sbin/stop-thriftserver.sh")
+spark_hadoop_lib_native = format("{stack_root}/current/hadoop-client/lib/native:{stack_root}/current/hadoop-client/lib/native/Linux-amd64-64")
+
+run_example_cmd = format("{spark_home}/bin/run-example")
+spark_smoke_example = "SparkPi"
+spark_service_check_cmd = format(
+  "{run_example_cmd} --master yarn --deploy-mode cluster --num-executors 1 --driver-memory 256m --executor-memory 256m --executor-cores 1 {spark_smoke_example} 1")
+
+spark_jobhistoryserver_hosts = default("/clusterHostInfo/spark_jobhistoryserver_hosts", [])
+
+if len(spark_jobhistoryserver_hosts) > 0:
+  spark_history_server_host = spark_jobhistoryserver_hosts[0]
+else:
+  spark_history_server_host = "localhost"
+
+# spark-defaults params
+ui_ssl_enabled = default("configurations/spark-defaults/spark.ssl.enabled", False)
+
+spark_yarn_historyServer_address = default(spark_history_server_host, "localhost")
+spark_history_scheme = "http"
+spark_history_ui_port = config['configurations']['spark-defaults']['spark.history.ui.port']
+
+if ui_ssl_enabled:
+  spark_history_ui_port = str(int(spark_history_ui_port) + 400)
+  spark_history_scheme = "https"
+
+
+spark_env_sh = config['configurations']['spark-env']['content']
+spark_log4j_properties = config['configurations']['spark-log4j-properties']['content']
+spark_metrics_properties = config['configurations']['spark-metrics-properties']['content']
+
+hive_server_host = default("/clusterHostInfo/hive_server_host", [])
+is_hive_installed = not len(hive_server_host) == 0
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+spark_kerberos_keytab =  config['configurations']['spark-defaults']['spark.history.kerberos.keytab']
+spark_kerberos_principal =  config['configurations']['spark-defaults']['spark.history.kerberos.principal']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+smokeuser_principal =  config['configurations']['cluster-env']['smokeuser_principal_name']
+
+spark_thriftserver_hosts = default("/clusterHostInfo/spark_thriftserver_hosts", [])
+has_spark_thriftserver = not len(spark_thriftserver_hosts) == 0
+
+# hive-site params
+spark_hive_properties = {
+  'hive.metastore.uris': default('/configurations/hive-site/hive.metastore.uris', '')
+}
+
+# security settings
+if security_enabled:
+  spark_principal = spark_kerberos_principal.replace('_HOST',spark_history_server_host.lower())
+
+  if is_hive_installed:
+    spark_hive_properties.update({
+      'hive.metastore.sasl.enabled': str(config['configurations']['hive-site']['hive.metastore.sasl.enabled']).lower(),
+      'hive.metastore.kerberos.keytab.file': config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file'],
+      'hive.server2.authentication.spnego.principal': config['configurations']['hive-site']['hive.server2.authentication.spnego.principal'],
+      'hive.server2.authentication.spnego.keytab': config['configurations']['hive-site']['hive.server2.authentication.spnego.keytab'],
+      'hive.metastore.kerberos.principal': config['configurations']['hive-site']['hive.metastore.kerberos.principal'],
+      'hive.server2.authentication.kerberos.principal': config['configurations']['hive-site']['hive.server2.authentication.kerberos.principal'],
+      'hive.server2.authentication.kerberos.keytab': config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab'],
+      'hive.server2.authentication': config['configurations']['hive-site']['hive.server2.authentication'],
+    })
+
+    hive_kerberos_keytab = config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab']
+    hive_kerberos_principal = config['configurations']['hive-site']['hive.server2.authentication.kerberos.principal'].replace('_HOST', socket.getfqdn().lower())
+
+# thrift server support - available on HDP 2.3 or higher
+spark_thrift_sparkconf = None
+spark_thrift_cmd_opts_properties = ''
+spark_thrift_fairscheduler_content = None
+spark_thrift_master = "yarn-client"
+if 'nm_hosts' in config['clusterHostInfo'] and len(config['clusterHostInfo']['nm_hosts']) == 1:
+  # use local mode when there's only one nodemanager
+  spark_thrift_master = "local[4]"
+
+if has_spark_thriftserver and 'spark-thrift-sparkconf' in config['configurations']:
+  spark_thrift_sparkconf = config['configurations']['spark-thrift-sparkconf']
+  spark_thrift_cmd_opts_properties = config['configurations']['spark-env']['spark_thrift_cmd_opts']
+  if is_hive_installed:
+    # update default metastore client properties (async wait for metastore component) it is useful in case of
+    # blueprint provisioning when hive-metastore and spark-thriftserver is not on the same host.
+    spark_hive_properties.update({
+      'hive.metastore.client.socket.timeout' : config['configurations']['hive-site']['hive.metastore.client.socket.timeout']
+    })
+    spark_hive_properties.update(config['configurations']['spark-hive-site-override'])
+
+  if 'spark-thrift-fairscheduler' in config['configurations'] and 'fairscheduler_content' in config['configurations']['spark-thrift-fairscheduler']:
+    spark_thrift_fairscheduler_content = config['configurations']['spark-thrift-fairscheduler']['fairscheduler_content']
+
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+hdfs_site = config['configurations']['hdfs-site']
+hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore"
+
+ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
+has_ats = len(ats_host) > 0
+
+dfs_type = default("/commandParams/dfs_type", "")
+
+# livy related config
+
+# livy for spark2 is only supported from HDP 2.6
+has_livyserver = False
+
+if stack_version_formatted and check_stack_feature(StackFeature.SPARK_LIVY, stack_version_formatted) and "livy-env" in config['configurations']:
+  livy_component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "LIVY_SERVER")
+  livy_conf = format("{stack_root}/current/{livy_component_directory}/conf")
+  livy_log_dir = config['configurations']['livy-env']['livy_log_dir']
+  livy_pid_dir = status_params.livy_pid_dir
+  livy_home = format("{stack_root}/current/{livy_component_directory}")
+  livy_user = status_params.livy_user
+  livy_group = status_params.livy_group
+  user_group = status_params.user_group
+  livy_hdfs_user_dir = format("/user/{livy_user}")
+  livy_server_pid_file = status_params.livy_server_pid_file
+  livy_recovery_dir = default("/configurations/livy-conf/livy.server.recovery.state-store.url", "/livy-recovery")
+
+  livy_server_start = format("{livy_home}/bin/livy-server start")
+  livy_server_stop = format("{livy_home}/bin/livy-server stop")
+  livy_logs_dir = format("{livy_home}/logs")
+
+  livy_env_sh = config['configurations']['livy-env']['content']
+  livy_log4j_properties = config['configurations']['livy-log4j-properties']['content']
+  livy_spark_blacklist_properties = config['configurations']['livy-spark-blacklist']['content']
+
+  if 'livy.server.kerberos.keytab' in config['configurations']['livy-conf']:
+    livy_kerberos_keytab =  config['configurations']['livy-conf']['livy.server.kerberos.keytab']
+  else:
+    livy_kerberos_keytab =  config['configurations']['livy-conf']['livy.server.launch.kerberos.keytab']
+  if 'livy.server.kerberos.principal' in config['configurations']['livy-conf']:
+    livy_kerberos_principal = config['configurations']['livy-conf']['livy.server.kerberos.principal']
+  else:
+    livy_kerberos_principal = config['configurations']['livy-conf']['livy.server.launch.kerberos.principal']
+
+  livy_livyserver_hosts = default("/clusterHostInfo/livy_server_hosts", [])
+
+  # ats 1.5 properties
+  entity_groupfs_active_dir = config['configurations']['yarn-site']['yarn.timeline-service.entity-group-fs-store.active-dir']
+  entity_groupfs_active_dir_mode = 01777
+  entity_groupfs_store_dir = config['configurations']['yarn-site']['yarn.timeline-service.entity-group-fs-store.done-dir']
+  entity_groupfs_store_dir_mode = 0700
+  is_webhdfs_enabled = hdfs_site['dfs.webhdfs.enabled']
+
+  if len(livy_livyserver_hosts) > 0:
+    has_livyserver = True
+    if security_enabled:
+      livy_principal = livy_kerberos_principal.replace('_HOST', config['hostname'].lower())
+
+  livy_livyserver_port = default('configurations/livy-conf/livy.server.port',8999)
+
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file = hdfs_resource_ignore_file,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
+)
+
diff --git a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/service_check.py b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/service_check.py
similarity index 100%
rename from ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/service_check.py
rename to ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/service_check.py
diff --git a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/setup_livy.py b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/setup_livy.py
similarity index 100%
rename from ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/setup_livy.py
rename to ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/setup_livy.py
diff --git a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/setup_spark.py b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/setup_spark.py
similarity index 100%
rename from ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/setup_spark.py
rename to ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/setup_spark.py
diff --git a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/spark_client.py b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/spark_client.py
new file mode 100644
index 0000000..3acde4e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/spark_client.py
@@ -0,0 +1,62 @@
+#!/usr/bin/python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+from setup_spark import setup_spark
+
+
+class SparkClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env, upgrade_type=None, config_dir=None):
+    import params
+    env.set_params(params)
+    
+    setup_spark(env, 'client', upgrade_type=upgrade_type, action = 'config')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+  
+  def get_component_name(self):
+    # TODO, change to "spark" after RPM switches the name
+    return "spark2-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      Logger.info("Executing Spark Client Stack Upgrade pre-restart")
+      # TODO, change to "spark" after RPM switches the name
+      conf_select.select(params.stack_name, "spark2", params.version)
+      stack_select.select("spark2-client", params.version)
+
+if __name__ == "__main__":
+  SparkClient().execute()
+
diff --git a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/spark_service.py b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/spark_service.py
new file mode 100644
index 0000000..536d798
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/spark_service.py
@@ -0,0 +1,146 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+import socket
+import tarfile
+import os
+from contextlib import closing
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions.copy_tarball import copy_to_hdfs, get_tarball_paths
+from resource_management.libraries.functions import format
+from resource_management.core.resources.system import File, Execute
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.show_logs import show_logs
+
+
+def make_tarfile(output_filename, source_dir):
+  try:
+    os.remove(output_filename)
+  except OSError:
+    pass
+  parent_dir=os.path.dirname(output_filename)
+  if not os.path.exists(parent_dir):
+    os.makedirs(parent_dir)
+  os.chmod(parent_dir, 0711)
+  with closing(tarfile.open(output_filename, "w:gz")) as tar:
+    for file in os.listdir(source_dir):
+      tar.add(os.path.join(source_dir,file),arcname=file)
+  os.chmod(output_filename, 0644)
+
+
+def spark_service(name, upgrade_type=None, action=None):
+  import params
+
+  if action == 'start':
+
+    effective_version = params.version if upgrade_type is not None else params.stack_version_formatted
+    if effective_version:
+      effective_version = format_stack_version(effective_version)
+
+    if name == 'jobhistoryserver' and effective_version and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
+      # TODO, change to "spark" after RPM switches the name
+      # create & copy spark2-hdp-yarn-archive.tar.gz to hdfs
+      if not params.sysprep_skip_copy_tarballs_hdfs:
+          source_dir=params.spark_home+"/jars"
+          tmp_archive_file=get_tarball_paths("spark2")[1]
+          make_tarfile(tmp_archive_file, source_dir)
+          copy_to_hdfs("spark2", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs, replace_existing_files=True)
+      # create spark history directory
+      params.HdfsResource(params.spark_history_dir,
+                          type="directory",
+                          action="create_on_execute",
+                          owner=params.spark_user,
+                          group=params.user_group,
+                          mode=0777,
+                          recursive_chmod=True
+                          )
+      params.HdfsResource(None, action="execute")
+
+    if params.security_enabled:
+      spark_kinit_cmd = format("{kinit_path_local} -kt {spark_kerberos_keytab} {spark_principal}; ")
+      Execute(spark_kinit_cmd, user=params.spark_user)
+
+    # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
+    # need to copy the tarball, otherwise, copy it.
+    if params.stack_version_formatted and check_stack_feature(StackFeature.TEZ_FOR_SPARK, params.stack_version_formatted):
+      resource_created = copy_to_hdfs("tez", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
+      if resource_created:
+        params.HdfsResource(None, action="execute")
+
+    if name == 'jobhistoryserver':
+      historyserver_no_op_test = format(
+      'ls {spark_history_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_history_server_pid_file}` >/dev/null 2>&1')
+      try:
+        Execute(format('{spark_history_server_start}'),
+                user=params.spark_user,
+                environment={'JAVA_HOME': params.java_home},
+                not_if=historyserver_no_op_test)
+      except:
+        show_logs(params.spark_log_dir, user=params.spark_user)
+        raise
+
+    elif name == 'sparkthriftserver':
+      if params.security_enabled:
+        hive_principal = params.hive_kerberos_principal
+        hive_kinit_cmd = format("{kinit_path_local} -kt {hive_kerberos_keytab} {hive_principal}; ")
+        Execute(hive_kinit_cmd, user=params.hive_user)
+
+      thriftserver_no_op_test = format(
+      'ls {spark_thrift_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_thrift_server_pid_file}` >/dev/null 2>&1')
+      try:
+        Execute(format('{spark_thrift_server_start} --properties-file {spark_thrift_server_conf_file} {spark_thrift_cmd_opts_properties}'),
+                user=params.hive_user,
+                environment={'JAVA_HOME': params.java_home},
+                not_if=thriftserver_no_op_test
+        )
+      except:
+        show_logs(params.spark_log_dir, user=params.hive_user)
+        raise
+  elif action == 'stop':
+    if name == 'jobhistoryserver':
+      try:
+        Execute(format('{spark_history_server_stop}'),
+                user=params.spark_user,
+                environment={'JAVA_HOME': params.java_home}
+        )
+      except:
+        show_logs(params.spark_log_dir, user=params.spark_user)
+        raise
+      File(params.spark_history_server_pid_file,
+        action="delete"
+      )
+
+    elif name == 'sparkthriftserver':
+      try:
+        Execute(format('{spark_thrift_server_stop}'),
+                user=params.hive_user,
+                environment={'JAVA_HOME': params.java_home}
+        )
+      except:
+        show_logs(params.spark_log_dir, user=params.hive_user)
+        raise
+      File(params.spark_thrift_server_pid_file,
+        action="delete"
+      )
+
+
diff --git a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/spark_thrift_server.py b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/spark_thrift_server.py
new file mode 100644
index 0000000..8953b35
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/spark_thrift_server.py
@@ -0,0 +1,91 @@
+#!/usr/bin/python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+import os
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+from setup_spark import setup_spark
+from spark_service import spark_service
+
+
+class SparkThriftServer(Script):
+
+  def install(self, env):
+    import params
+    env.set_params(params)
+
+    self.install_packages(env)
+
+  def configure(self, env, upgrade_type=None, config_dir=None):
+    import params
+    env.set_params(params)
+    setup_spark(env, 'server', upgrade_type = upgrade_type, action = 'config')
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    self.configure(env)
+    spark_service('sparkthriftserver', upgrade_type=upgrade_type, action='start')
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    spark_service('sparkthriftserver', upgrade_type=upgrade_type, action='stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.spark_thrift_server_pid_file)
+
+  def get_component_name(self):
+    # TODO, change to "spark" after RPM switches the name
+    return "spark2-thriftserver"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    Logger.info("Executing Spark Thrift Server Stack Upgrade pre-restart")
+    # TODO, change to "spark" after RPM switches the name
+    conf_select.select(params.stack_name, "spark2", params.version)
+    stack_select.select("spark2-thriftserver", params.version)
+      
+  def get_log_folder(self):
+    import params
+    return params.spark_log_dir
+  
+  def get_user(self):
+    import params
+    return params.hive_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.spark_thrift_server_pid_file]
+
+if __name__ == "__main__":
+  SparkThriftServer().execute()
diff --git a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/status_params.py b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/status_params.py
similarity index 100%
rename from ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/status_params.py
rename to ambari-server/src/main/resources/common-services/SPARK/2.2.0/package/scripts/status_params.py
diff --git a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/job_history_server.py b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/job_history_server.py
deleted file mode 100644
index 16a2224..0000000
--- a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/job_history_server.py
+++ /dev/null
@@ -1,106 +0,0 @@
-#!/usr/bin/python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-import os
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import conf_select, stack_select
-from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
-from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.constants import StackFeature
-from resource_management.core.logger import Logger
-from resource_management.core import shell
-from setup_spark import *
-from spark_service import spark_service
-
-
-class JobHistoryServer(Script):
-
-  def install(self, env):
-    import params
-    env.set_params(params)
-    
-    self.install_packages(env)
-    
-  def configure(self, env, upgrade_type=None, config_dir=None):
-    import params
-    env.set_params(params)
-    
-    setup_spark(env, 'server', upgrade_type=upgrade_type, action = 'config')
-    
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    
-    self.configure(env)
-    spark_service('jobhistoryserver', upgrade_type=upgrade_type, action='start')
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    
-    spark_service('jobhistoryserver', upgrade_type=upgrade_type, action='stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-
-    check_process_status(status_params.spark_history_server_pid_file)
-    
-
-  def get_component_name(self):
-    return "spark-historyserver"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    import params
-
-    env.set_params(params)
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      Logger.info("Executing Spark Job History Server Stack Upgrade pre-restart")
-      conf_select.select(params.stack_name, "spark", params.version)
-      stack_select.select("spark-historyserver", params.version)
-
-      # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
-      # need to copy the tarball, otherwise, copy it.
-      if params.version and check_stack_feature(StackFeature.TEZ_FOR_SPARK, params.version):
-        resource_created = copy_to_hdfs(
-          "tez",
-          params.user_group,
-          params.hdfs_user,
-          skip=params.sysprep_skip_copy_tarballs_hdfs)
-        if resource_created:
-          params.HdfsResource(None, action="execute")
-          
-  def get_log_folder(self):
-    import params
-    return params.spark_log_dir
-  
-  def get_user(self):
-    import params
-    return params.spark_user
-
-  def get_pid_files(self):
-    import status_params
-    return [status_params.spark_history_server_pid_file]
-
-if __name__ == "__main__":
-  JobHistoryServer().execute()
diff --git a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/livy_server.py b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/livy_server.py
deleted file mode 100644
index b09d9a9..0000000
--- a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/livy_server.py
+++ /dev/null
@@ -1,149 +0,0 @@
-#!/usr/bin/python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.constants import StackFeature
-from resource_management.core.exceptions import Fail
-from resource_management.core.resources.system import Execute
-from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
-from resource_management.libraries.providers.hdfs_resource import HdfsResourceProvider
-from resource_management import is_empty
-from resource_management import shell
-from resource_management.libraries.functions.decorator import retry
-from resource_management.core.logger import Logger
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions import conf_select, stack_select
-
-from livy_service import livy_service
-from setup_livy import setup_livy
-
-class LivyServer(Script):
-
-  def install(self, env):
-    import params
-    env.set_params(params)
-
-    self.install_packages(env)
-
-  def configure(self, env, upgrade_type=None, config_dir=None):
-    import params
-    env.set_params(params)
-
-    setup_livy(env, 'server', upgrade_type=upgrade_type, action = 'config')
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    if params.has_ats and params.has_livyserver:
-      Logger.info("Verifying DFS directories where ATS stores time line data for active and completed applications.")
-      self.wait_for_dfs_directories_created([params.entity_groupfs_store_dir, params.entity_groupfs_active_dir])
-
-    self.configure(env)
-    livy_service('server', upgrade_type=upgrade_type, action='start')
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    livy_service('server', upgrade_type=upgrade_type, action='stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-
-    check_process_status(status_params.livy_server_pid_file)
-
-  #  TODO move out and compose with similar method in resourcemanager.py
-  def wait_for_dfs_directories_created(self, dirs):
-    import params
-
-    ignored_dfs_dirs = HdfsResourceProvider.get_ignored_resources_list(params.hdfs_resource_ignore_file)
-
-    if params.security_enabled:
-      Execute(format("{kinit_path_local} -kt {livy_kerberos_keytab} {livy_principal}"),
-              user=params.livy_user
-              )
-      Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
-              user=params.hdfs_user
-              )
-
-    for dir_path in dirs:
-        self.wait_for_dfs_directory_created(dir_path, ignored_dfs_dirs)
-
-  def get_pid_files(self):
-    import status_params
-    return [status_params.livy_server_pid_file]
-
-
-  @retry(times=8, sleep_time=20, backoff_factor=1, err_class=Fail)
-  def wait_for_dfs_directory_created(self, dir_path, ignored_dfs_dirs):
-    import params
-
-    if not is_empty(dir_path):
-      dir_path = HdfsResourceProvider.parse_path(dir_path)
-
-      if dir_path in ignored_dfs_dirs:
-        Logger.info("Skipping DFS directory '" + dir_path + "' as it's marked to be ignored.")
-        return
-
-      Logger.info("Verifying if DFS directory '" + dir_path + "' exists.")
-
-      dir_exists = None
-
-      if WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
-        # check with webhdfs is much faster than executing hdfs dfs -test
-        util = WebHDFSUtil(params.hdfs_site, params.hdfs_user, params.security_enabled)
-        list_status = util.run_command(dir_path, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False)
-        dir_exists = ('FileStatus' in list_status)
-      else:
-        # have to do time expensive hdfs dfs -d check.
-        dfs_ret_code = shell.call(format("hdfs --config {hadoop_conf_dir} dfs -test -d " + dir_path), user=params.livy_user)[0]
-        dir_exists = not dfs_ret_code #dfs -test -d returns 0 in case the dir exists
-
-      if not dir_exists:
-        raise Fail("DFS directory '" + dir_path + "' does not exist !")
-      else:
-        Logger.info("DFS directory '" + dir_path + "' exists.")
-
-  def get_component_name(self):
-    return "livy-server"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    import params
-
-    env.set_params(params)
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      Logger.info("Executing Livy Server Stack Upgrade pre-restart")
-      conf_select.select(params.stack_name, "spark", params.version)
-      stack_select.select("livy-server", params.version)
-
-  def get_log_folder(self):
-    import params
-    return params.livy_log_dir
-
-  def get_user(self):
-    import params
-    return params.livy_user
-if __name__ == "__main__":
-    LivyServer().execute()
-
diff --git a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/params.py b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/params.py
deleted file mode 100644
index ab58cb6..0000000
--- a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/params.py
+++ /dev/null
@@ -1,266 +0,0 @@
-#!/usr/bin/python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import socket
-
-import status_params
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.constants import StackFeature
-from resource_management.libraries.functions import conf_select, stack_select
-from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions.copy_tarball import get_sysprep_skip_copy_tarballs_hdfs
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.script.script import Script
-
-# a map of the Ambari role to the component name
-# for use with <stack-root>/current/<component>
-SERVER_ROLE_DIRECTORY_MAP = {
-  'SPARK_JOBHISTORYSERVER' : 'spark-historyserver',
-  'SPARK_CLIENT' : 'spark-client',
-  'SPARK_THRIFTSERVER' : 'spark-thriftserver',
-  'LIVY_SERVER' : 'livy-server',
-  'LIVY_CLIENT' : 'livy-client'
-
-}
-
-component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "SPARK_CLIENT")
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-stack_name = status_params.stack_name
-stack_root = Script.get_stack_root()
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-
-sysprep_skip_copy_tarballs_hdfs = get_sysprep_skip_copy_tarballs_hdfs()
-
-# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
-version = default("/commandParams/version", None)
-
-spark_conf = '/etc/spark/conf'
-hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
-
-if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
-  hadoop_home = stack_select.get_hadoop_dir("home")
-  spark_conf = format("{stack_root}/current/{component_directory}/conf")
-  spark_log_dir = config['configurations']['spark-env']['spark_log_dir']
-  spark_pid_dir = status_params.spark_pid_dir
-  spark_home = format("{stack_root}/current/{component_directory}")
-
-spark_daemon_memory = config['configurations']['spark-env']['spark_daemon_memory']
-spark_thrift_server_conf_file = spark_conf + "/spark-thrift-sparkconf.conf"
-java_home = config['hostLevelParams']['java_home']
-
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-user_group = config['configurations']['cluster-env']['user_group']
-
-spark_user = status_params.spark_user
-hive_user = status_params.hive_user
-spark_group = status_params.spark_group
-user_group = status_params.user_group
-spark_hdfs_user_dir = format("/user/{spark_user}")
-spark_history_dir = default('/configurations/spark-defaults/spark.history.fs.logDirectory', "hdfs:///spark-history")
-
-spark_history_server_pid_file = status_params.spark_history_server_pid_file
-spark_thrift_server_pid_file = status_params.spark_thrift_server_pid_file
-
-spark_history_server_start = format("{spark_home}/sbin/start-history-server.sh")
-spark_history_server_stop = format("{spark_home}/sbin/stop-history-server.sh")
-
-spark_thrift_server_start = format("{spark_home}/sbin/start-thriftserver.sh")
-spark_thrift_server_stop = format("{spark_home}/sbin/stop-thriftserver.sh")
-spark_hadoop_lib_native = format("{stack_root}/current/hadoop-client/lib/native:{stack_root}/current/hadoop-client/lib/native/Linux-amd64-64")
-
-run_example_cmd = format("{spark_home}/bin/run-example")
-spark_smoke_example = "SparkPi"
-spark_service_check_cmd = format(
-  "{run_example_cmd} --master yarn --deploy-mode cluster --num-executors 1 --driver-memory 256m --executor-memory 256m --executor-cores 1 {spark_smoke_example} 1")
-
-spark_jobhistoryserver_hosts = default("/clusterHostInfo/spark_jobhistoryserver_hosts", [])
-
-if len(spark_jobhistoryserver_hosts) > 0:
-  spark_history_server_host = spark_jobhistoryserver_hosts[0]
-else:
-  spark_history_server_host = "localhost"
-
-# spark-defaults params
-ui_ssl_enabled = default("configurations/spark-defaults/spark.ssl.enabled", False)
-
-spark_yarn_historyServer_address = default(spark_history_server_host, "localhost")
-spark_history_scheme = "http"
-spark_history_ui_port = config['configurations']['spark-defaults']['spark.history.ui.port']
-
-if ui_ssl_enabled:
-  spark_history_ui_port = str(int(spark_history_ui_port) + 400)
-  spark_history_scheme = "https"
-
-
-spark_env_sh = config['configurations']['spark-env']['content']
-spark_log4j_properties = config['configurations']['spark-log4j-properties']['content']
-spark_metrics_properties = config['configurations']['spark-metrics-properties']['content']
-
-hive_server_host = default("/clusterHostInfo/hive_server_host", [])
-is_hive_installed = not len(hive_server_host) == 0
-
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-spark_kerberos_keytab =  config['configurations']['spark-defaults']['spark.history.kerberos.keytab']
-spark_kerberos_principal =  config['configurations']['spark-defaults']['spark.history.kerberos.principal']
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-smokeuser_principal =  config['configurations']['cluster-env']['smokeuser_principal_name']
-
-spark_thriftserver_hosts = default("/clusterHostInfo/spark_thriftserver_hosts", [])
-has_spark_thriftserver = not len(spark_thriftserver_hosts) == 0
-
-# hive-site params
-spark_hive_properties = {
-  'hive.metastore.uris': config['configurations']['hive-site']['hive.metastore.uris']
-}
-
-# security settings
-if security_enabled:
-  spark_principal = spark_kerberos_principal.replace('_HOST',spark_history_server_host.lower())
-
-  if is_hive_installed:
-    spark_hive_properties.update({
-      'hive.metastore.sasl.enabled': str(config['configurations']['hive-site']['hive.metastore.sasl.enabled']).lower(),
-      'hive.metastore.kerberos.keytab.file': config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file'],
-      'hive.server2.authentication.spnego.principal': config['configurations']['hive-site']['hive.server2.authentication.spnego.principal'],
-      'hive.server2.authentication.spnego.keytab': config['configurations']['hive-site']['hive.server2.authentication.spnego.keytab'],
-      'hive.metastore.kerberos.principal': config['configurations']['hive-site']['hive.metastore.kerberos.principal'],
-      'hive.server2.authentication.kerberos.principal': config['configurations']['hive-site']['hive.server2.authentication.kerberos.principal'],
-      'hive.server2.authentication.kerberos.keytab': config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab'],
-      'hive.server2.authentication': config['configurations']['hive-site']['hive.server2.authentication'],
-    })
-
-    hive_kerberos_keytab = config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab']
-    hive_kerberos_principal = config['configurations']['hive-site']['hive.server2.authentication.kerberos.principal'].replace('_HOST', socket.getfqdn().lower())
-
-# thrift server support - available on HDP 2.3 or higher
-spark_thrift_sparkconf = None
-spark_thrift_cmd_opts_properties = ''
-spark_thrift_fairscheduler_content = None
-spark_thrift_master = "yarn-client"
-if 'nm_hosts' in config['clusterHostInfo'] and len(config['clusterHostInfo']['nm_hosts']) == 1:
-  # use local mode when there's only one nodemanager
-  spark_thrift_master = "local[4]"
-
-if has_spark_thriftserver and 'spark-thrift-sparkconf' in config['configurations']:
-  spark_thrift_sparkconf = config['configurations']['spark-thrift-sparkconf']
-  spark_thrift_cmd_opts_properties = config['configurations']['spark-env']['spark_thrift_cmd_opts']
-  if is_hive_installed:
-    # update default metastore client properties (async wait for metastore component) it is useful in case of
-    # blueprint provisioning when hive-metastore and spark-thriftserver is not on the same host.
-    spark_hive_properties.update({
-      'hive.metastore.client.socket.timeout' : config['configurations']['hive-site']['hive.metastore.client.socket.timeout']
-    })
-    spark_hive_properties.update(config['configurations']['spark-hive-site-override'])
-
-  if 'spark-thrift-fairscheduler' in config['configurations'] and 'fairscheduler_content' in config['configurations']['spark-thrift-fairscheduler']:
-    spark_thrift_fairscheduler_content = config['configurations']['spark-thrift-fairscheduler']['fairscheduler_content']
-
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-hdfs_site = config['configurations']['hdfs-site']
-hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore"
-
-ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
-has_ats = len(ats_host) > 0
-
-dfs_type = default("/commandParams/dfs_type", "")
-
-# livy related config
-
-# livy for spark is only supported from HDP 2.6
-has_livyserver = False
-
-if stack_version_formatted and check_stack_feature(StackFeature.SPARK_LIVY, stack_version_formatted):
-  livy_component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "LIVY_SERVER")
-  livy_conf = format("{stack_root}/current/{livy_component_directory}/conf")
-  livy_log_dir = config['configurations']['livy-env']['livy_log_dir']
-  livy_pid_dir = status_params.livy_pid_dir
-  livy_home = format("{stack_root}/current/{livy_component_directory}")
-  livy_user = status_params.livy_user
-  livy_group = status_params.livy_group
-  user_group = status_params.user_group
-  livy_hdfs_user_dir = format("/user/{livy_user}")
-  livy_server_pid_file = status_params.livy_server_pid_file
-  livy_recovery_dir = default("/configurations/livy-conf/livy.server.recovery.state-store.url", "/livy-recovery")
-
-  livy_server_start = format("{livy_home}/bin/livy-server start")
-  livy_server_stop = format("{livy_home}/bin/livy-server stop")
-  livy_logs_dir = format("{livy_home}/logs")
-
-  livy_env_sh = config['configurations']['livy-env']['content']
-  livy_log4j_properties = config['configurations']['livy-log4j-properties']['content']
-  livy_spark_blacklist_properties = config['configurations']['livy-spark-blacklist']['content']
-
-  if 'livy.server.kerberos.keytab' in config['configurations']['livy-conf']:
-    livy_kerberos_keytab =  config['configurations']['livy-conf']['livy.server.kerberos.keytab']
-  else:
-    livy_kerberos_keytab =  config['configurations']['livy-conf']['livy.server.launch.kerberos.keytab']
-  if 'livy.server.kerberos.principal' in config['configurations']['livy-conf']:
-    livy_kerberos_principal = config['configurations']['livy-conf']['livy.server.kerberos.principal']
-  else:
-    livy_kerberos_principal = config['configurations']['livy-conf']['livy.server.launch.kerberos.principal']
-
-  livy_livyserver_hosts = default("/clusterHostInfo/livy_server_hosts", [])
-
-  # ats 1.5 properties
-  entity_groupfs_active_dir = config['configurations']['yarn-site']['yarn.timeline-service.entity-group-fs-store.active-dir']
-  entity_groupfs_active_dir_mode = 01777
-  entity_groupfs_store_dir = config['configurations']['yarn-site']['yarn.timeline-service.entity-group-fs-store.done-dir']
-  entity_groupfs_store_dir_mode = 0700
-  is_webhdfs_enabled = hdfs_site['dfs.webhdfs.enabled']
-
-  if len(livy_livyserver_hosts) > 0:
-    has_livyserver = True
-    if security_enabled:
-      livy_principal = livy_kerberos_principal.replace('_HOST', config['hostname'].lower())
-
-  livy_livyserver_port = default('configurations/livy-conf/livy.server.port',8999)
-
-
-import functools
-#create partial functions with common arguments for every HdfsResource call
-#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
-HdfsResource = functools.partial(
-  HdfsResource,
-  user=hdfs_user,
-  hdfs_resource_ignore_file = hdfs_resource_ignore_file,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir,
-  principal_name = hdfs_principal_name,
-  hdfs_site = hdfs_site,
-  default_fs = default_fs,
-  immutable_paths = get_not_managed_resources(),
-  dfs_type = dfs_type
-)
-
diff --git a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/spark_client.py b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/spark_client.py
deleted file mode 100644
index a2e53cd..0000000
--- a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/spark_client.py
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/usr/bin/python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import conf_select, stack_select
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.constants import StackFeature
-from resource_management.core.exceptions import ClientComponentHasNoStatus
-from resource_management.core.logger import Logger
-from resource_management.core import shell
-from setup_spark import setup_spark
-
-
-class SparkClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env, upgrade_type=None, config_dir=None):
-    import params
-    env.set_params(params)
-    
-    setup_spark(env, 'client', upgrade_type=upgrade_type, action = 'config')
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-  
-  def get_component_name(self):
-    return "spark-client"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    import params
-
-    env.set_params(params)
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      Logger.info("Executing Spark Client Stack Upgrade pre-restart")
-      conf_select.select(params.stack_name, "spark", params.version)
-      stack_select.select("spark-client", params.version)
-
-if __name__ == "__main__":
-  SparkClient().execute()
-
diff --git a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/spark_service.py b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/spark_service.py
deleted file mode 100644
index c6619e4..0000000
--- a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/spark_service.py
+++ /dev/null
@@ -1,145 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-import socket
-import tarfile
-import os
-from contextlib import closing
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions.copy_tarball import copy_to_hdfs, get_tarball_paths
-from resource_management.libraries.functions import format
-from resource_management.core.resources.system import File, Execute
-from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.constants import StackFeature
-from resource_management.libraries.functions.show_logs import show_logs
-
-
-def make_tarfile(output_filename, source_dir):
-  try:
-    os.remove(output_filename)
-  except OSError:
-    pass
-  parent_dir=os.path.dirname(output_filename)
-  if not os.path.exists(parent_dir):
-    os.makedirs(parent_dir)
-    os.chmod(parent_dir, 0711)
-  with closing(tarfile.open(output_filename, "w:gz")) as tar:
-    for file in os.listdir(source_dir):
-      tar.add(os.path.join(source_dir,file),arcname=file)
-  os.chmod(output_filename, 0644)
-
-
-def spark_service(name, upgrade_type=None, action=None):
-  import params
-
-  if action == 'start':
-
-    effective_version = params.version if upgrade_type is not None else params.stack_version_formatted
-    if effective_version:
-      effective_version = format_stack_version(effective_version)
-
-    if name == 'jobhistoryserver' and effective_version and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
-      # create & copy spark-hdp-yarn-archive.tar.gz to hdfs
-      if not params.sysprep_skip_copy_tarballs_hdfs:
-          source_dir=params.spark_home+"/jars"
-          tmp_archive_file=get_tarball_paths("spark")[1]
-          make_tarfile(tmp_archive_file, source_dir)
-          copy_to_hdfs("spark", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs, replace_existing_files=True)
-      # create spark history directory
-      params.HdfsResource(params.spark_history_dir,
-                          type="directory",
-                          action="create_on_execute",
-                          owner=params.spark_user,
-                          group=params.user_group,
-                          mode=0777,
-                          recursive_chmod=True
-                          )
-      params.HdfsResource(None, action="execute")
-
-    if params.security_enabled:
-      spark_kinit_cmd = format("{kinit_path_local} -kt {spark_kerberos_keytab} {spark_principal}; ")
-      Execute(spark_kinit_cmd, user=params.spark_user)
-
-    # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
-    # need to copy the tarball, otherwise, copy it.
-    if params.stack_version_formatted and check_stack_feature(StackFeature.TEZ_FOR_SPARK, params.stack_version_formatted):
-      resource_created = copy_to_hdfs("tez", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
-      if resource_created:
-        params.HdfsResource(None, action="execute")
-
-    if name == 'jobhistoryserver':
-      historyserver_no_op_test = format(
-      'ls {spark_history_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_history_server_pid_file}` >/dev/null 2>&1')
-      try:
-        Execute(format('{spark_history_server_start}'),
-                user=params.spark_user,
-                environment={'JAVA_HOME': params.java_home},
-                not_if=historyserver_no_op_test)
-      except:
-        show_logs(params.spark_log_dir, user=params.spark_user)
-        raise
-
-    elif name == 'sparkthriftserver':
-      if params.security_enabled:
-        hive_principal = params.hive_kerberos_principal
-        hive_kinit_cmd = format("{kinit_path_local} -kt {hive_kerberos_keytab} {hive_principal}; ")
-        Execute(hive_kinit_cmd, user=params.hive_user)
-
-      thriftserver_no_op_test = format(
-      'ls {spark_thrift_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_thrift_server_pid_file}` >/dev/null 2>&1')
-      try:
-        Execute(format('{spark_thrift_server_start} --properties-file {spark_thrift_server_conf_file} {spark_thrift_cmd_opts_properties}'),
-                user=params.hive_user,
-                environment={'JAVA_HOME': params.java_home},
-                not_if=thriftserver_no_op_test
-        )
-      except:
-        show_logs(params.spark_log_dir, user=params.hive_user)
-        raise
-  elif action == 'stop':
-    if name == 'jobhistoryserver':
-      try:
-        Execute(format('{spark_history_server_stop}'),
-                user=params.spark_user,
-                environment={'JAVA_HOME': params.java_home}
-        )
-      except:
-        show_logs(params.spark_log_dir, user=params.spark_user)
-        raise
-      File(params.spark_history_server_pid_file,
-        action="delete"
-      )
-
-    elif name == 'sparkthriftserver':
-      try:
-        Execute(format('{spark_thrift_server_stop}'),
-                user=params.hive_user,
-                environment={'JAVA_HOME': params.java_home}
-        )
-      except:
-        show_logs(params.spark_log_dir, user=params.hive_user)
-        raise
-      File(params.spark_thrift_server_pid_file,
-        action="delete"
-      )
-
-
diff --git a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/spark_thrift_server.py b/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/spark_thrift_server.py
deleted file mode 100644
index de82c16..0000000
--- a/ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/spark_thrift_server.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-import os
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import conf_select, stack_select
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.constants import StackFeature
-from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.core.logger import Logger
-from resource_management.core import shell
-from setup_spark import setup_spark
-from spark_service import spark_service
-
-
-class SparkThriftServer(Script):
-
-  def install(self, env):
-    import params
-    env.set_params(params)
-
-    self.install_packages(env)
-
-  def configure(self, env, upgrade_type=None, config_dir=None):
-    import params
-    env.set_params(params)
-    setup_spark(env, 'server', upgrade_type = upgrade_type, action = 'config')
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    self.configure(env)
-    spark_service('sparkthriftserver', upgrade_type=upgrade_type, action='start')
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    spark_service('sparkthriftserver', upgrade_type=upgrade_type, action='stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.spark_thrift_server_pid_file)
-
-  def get_component_name(self):
-    return "spark-thriftserver"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    import params
-
-    env.set_params(params)
-    Logger.info("Executing Spark Thrift Server Stack Upgrade pre-restart")
-    conf_select.select(params.stack_name, "spark", params.version)
-    stack_select.select("spark-thriftserver", params.version)
-      
-  def get_log_folder(self):
-    import params
-    return params.spark_log_dir
-  
-  def get_user(self):
-    import params
-    return params.hive_user
-
-  def get_pid_files(self):
-    import status_params
-    return [status_params.spark_thrift_server_pid_file]
-
-if __name__ == "__main__":
-  SparkThriftServer().execute()
diff --git a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/alerts.json b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/alerts.json
index dc9d023..5d163ac 100755
--- a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/alerts.json
+++ b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/alerts.json
@@ -27,6 +27,54 @@
           }
         }
       }
+    ],
+    "LIVY2_SERVER": [
+      {
+        "name": "livy2_server_status",
+        "label": "Spark2 Livy Server",
+        "description": "This host-level alert is triggered if the Livy2 Server cannot be determined to be up.",
+        "interval": 1,
+        "scope": "HOST",
+        "source": {
+          "type": "SCRIPT",
+          "path": "SPARK2/2.0.0/package/scripts/alerts/alert_spark2_livy_port.py",
+          "parameters": [
+            {
+              "name": "check.command.timeout",
+              "display_name": "Command Timeout",
+              "value": 60.0,
+              "type": "NUMERIC",
+              "description": "The maximum time before check command will be killed by timeout",
+              "units": "seconds",
+              "threshold": "CRITICAL"
+            }
+          ]
+        }
+      }
+    ],
+    "SPARK2_THRIFTSERVER": [
+      {
+        "name": "spark2_thriftserver_status",
+        "label": "Spark2 Thrift Server",
+        "description": "This host-level alert is triggered if the Spark2 Thrift Server cannot be determined to be up.",
+        "interval": 1,
+        "scope": "HOST",
+        "source": {
+          "type": "SCRIPT",
+          "path": "SPARK2/2.0.0/package/scripts/alerts/alert_spark2_thrift_port.py",
+          "parameters": [
+            {
+              "name": "check.command.timeout",
+              "display_name": "Command Timeout",
+              "value": 60.0,
+              "type": "NUMERIC",
+              "description": "The maximum time before check command will be killed by timeout",
+              "units": "seconds",
+              "threshold": "CRITICAL"
+            }
+          ]
+        }
+      }
     ]
   }
 }
diff --git a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/configuration/spark2-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/configuration/spark2-logsearch-conf.xml
deleted file mode 100644
index cb71c6b..0000000
--- a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/configuration/spark2-logsearch-conf.xml
+++ /dev/null
@@ -1,98 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>Spark2</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>SPARK2_JOBHISTORYSERVER:spark2_jobhistory_server;SPARK2_THRIFTSERVER:spark2_thriftserver;LIVY2_SERVER:livy2_server</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder Config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-   "input":[
-      {
-       "type":"spark2_jobhistory_server",
-       "rowtype":"service",
-       "path":"{{default('/configurations/spark2-env/spark_log_dir', '/var/log/spark2')}}/spark-*-org.apache.spark.deploy.history.HistoryServer*.out"
-     },
-     {
-       "type":"spark2_thriftserver",
-       "rowtype":"service",
-       "path":"{{default('/configurations/spark2-env/spark_log_dir', '/var/log/spark2')}}/spark-*-org.apache.spark.sql.hive.thriftserver.HiveThriftServer2*.out"
-     },
-     {
-       "type":"livy2_server",
-       "rowtype":"service",
-       "path":"{{default('/configurations/livy2-env/livy2_log_dir', '/var/log/livy2')}}/livy-livy-server.out"
-     }
-   ],
-   "filter":[
-      {
-          "filter":"grok",
-          "conditions":{
-            "fields":{
-              "type":[
-                "spark2_jobhistory_server",
-                "spark2_thriftserver",
-                "livy2_server"
-              ]
-             }
-          },
-          "log4j_format":"",
-          "multiline_pattern":"^(%{SPARK_DATESTAMP:logtime}%{SPACE}%{LOGLEVEL:level})",
-          "message_pattern":"(?m)^%{SPARK_DATESTAMP:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVAFILE:file}:%{SPACE}%{GREEDYDATA:log_message}",
-          "post_map_values":{
-            "logtime":{
-              "map_date":{
-                "target_date_pattern":"yy/MM/dd HH:mm:ss"
-              }
-             },
-            "level":{
-              "map_fieldvalue":{
-                "pre_value":"WARNING",
-                "post_value":"WARN"
-              }
-             }
-           }
-      }
-   ]
-}
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/alerts/alert_spark2_livy_port.py b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/alerts/alert_spark2_livy_port.py
new file mode 100644
index 0000000..525ac3c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/alerts/alert_spark2_livy_port.py
@@ -0,0 +1,148 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import time
+import logging
+import traceback
+import socket
+from resource_management import *
+from resource_management.libraries.functions import format
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from resource_management.libraries.script.script import Script
+from resource_management.core.resources import Execute
+from resource_management.core.logger import Logger
+from resource_management.core import global_lock
+from resource_management.libraries.functions import get_kinit_path
+
+
+OK_MESSAGE = "TCP OK - {0:.3f}s response on port {1}"
+CRITICAL_MESSAGE = "Connection failed on host {0}:{1} ({2})"
+
+logger = logging.getLogger('ambari_alerts')
+
+LIVY_SERVER_PORT_KEY = '{{livy2-conf/livy.server.port}}'
+
+LIVYUSER_DEFAULT = 'livy'
+
+CHECK_COMMAND_TIMEOUT_KEY = 'check.command.timeout'
+CHECK_COMMAND_TIMEOUT_DEFAULT = 60.0
+
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+SMOKEUSER_KEYTAB_KEY = '{{cluster-env/smokeuser_keytab}}'
+SMOKEUSER_PRINCIPAL_KEY = '{{cluster-env/smokeuser_principal_name}}'
+SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
+LIVY_SSL_ENABLED_KEY = '{{livy2-conf/livy.keystore}}'
+
+# The configured Kerberos executable search paths, if any
+KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def get_tokens():
+    """
+    Returns a tuple of tokens in the format {{site/property}} that will be used
+    to build the dictionary passed into execute
+    """
+    return (LIVY_SERVER_PORT_KEY,LIVYUSER_DEFAULT,SECURITY_ENABLED_KEY,SMOKEUSER_KEYTAB_KEY,SMOKEUSER_PRINCIPAL_KEY,SMOKEUSER_KEY,LIVY_SSL_ENABLED_KEY)
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def execute(configurations={}, parameters={}, host_name=None):
+    """
+    Returns a tuple containing the result code and a pre-formatted result label
+
+    Keyword arguments:
+    configurations (dictionary): a mapping of configuration key to value
+    parameters (dictionary): a mapping of script parameter key to value
+    host_name (string): the name of this host where the alert is running
+    """
+
+    if configurations is None:
+        return ('UNKNOWN', ['There were no configurations supplied to the script.'])
+
+    LIVY_PORT_DEFAULT = 8999
+
+    port = LIVY_PORT_DEFAULT
+    if LIVY_SERVER_PORT_KEY in configurations:
+        port = int(configurations[LIVY_SERVER_PORT_KEY])
+
+    if host_name is None:
+        host_name = socket.getfqdn()
+
+    livyuser = configurations[SMOKEUSER_KEY]
+
+    security_enabled = False
+    if SECURITY_ENABLED_KEY in configurations:
+        security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
+
+    smokeuser_kerberos_keytab = None
+    if SMOKEUSER_KEYTAB_KEY in configurations:
+        smokeuser_kerberos_keytab = configurations[SMOKEUSER_KEYTAB_KEY]
+
+    if host_name is None:
+        host_name = socket.getfqdn()
+
+    smokeuser_principal = None
+    if SMOKEUSER_PRINCIPAL_KEY in configurations:
+        smokeuser_principal = configurations[SMOKEUSER_PRINCIPAL_KEY]
+        smokeuser_principal = smokeuser_principal.replace('_HOST',host_name.lower())
+
+    # Get the configured Kerberos executable search paths, if any
+    if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
+        kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
+    else:
+        kerberos_executable_search_paths = None
+
+    kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
+
+    if security_enabled:
+        kinitcmd = format("{kinit_path_local} -kt {smokeuser_kerberos_keytab} {smokeuser_principal}; ")
+        # prevent concurrent kinit
+        kinit_lock = global_lock.get_lock(global_lock.LOCK_TYPE_KERBEROS)
+        kinit_lock.acquire()
+        try:
+            Execute(kinitcmd, user=livyuser)
+        finally:
+            kinit_lock.release()
+
+    http_scheme = 'https' if LIVY_SSL_ENABLED_KEY in configurations else 'http'
+    result_code = None
+    try:
+        start_time = time.time()
+        try:
+            livy2_livyserver_host = str(host_name)
+
+            livy_cmd = format("curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k {http_scheme}://{livy2_livyserver_host}:{port}/sessions | grep 200 ")
+
+            Execute(livy_cmd,
+                    tries=3,
+                    try_sleep=1,
+                    logoutput=True,
+                    user=livyuser
+                    )
+
+            total_time = time.time() - start_time
+            result_code = 'OK'
+            label = OK_MESSAGE.format(total_time, port)
+        except:
+            result_code = 'CRITICAL'
+            label = CRITICAL_MESSAGE.format(host_name, port, traceback.format_exc())
+    except:
+        label = traceback.format_exc()
+        result_code = 'UNKNOWN'
+
+    return (result_code, [label])
diff --git a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/alerts/alert_spark2_thrift_port.py b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/alerts/alert_spark2_thrift_port.py
new file mode 100644
index 0000000..12d2cda
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/alerts/alert_spark2_thrift_port.py
@@ -0,0 +1,152 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import os
+import socket
+import time
+import logging
+import traceback
+from resource_management.libraries.functions import format
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.core.resources import Execute
+from resource_management.core import global_lock
+
+
+stack_root = Script.get_stack_root()
+
+OK_MESSAGE = "TCP OK - {0:.3f}s response on port {1}"
+CRITICAL_MESSAGE = "Connection failed on host {0}:{1} ({2})"
+
+HIVE_SERVER_THRIFT_PORT_KEY = '{{spark2-hive-site-override/hive.server2.thrift.port}}'
+HIVE_SERVER_TRANSPORT_MODE_KEY = '{{spark2-hive-site-override/hive.server2.transport.mode}}'
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+
+HIVE_SERVER2_AUTHENTICATION_KEY = '{{hive-site/hive.server2.authentication}}'
+HIVE_SERVER2_KERBEROS_KEYTAB = '{{hive-site/hive.server2.authentication.kerberos.keytab}}'
+HIVE_SERVER2_PRINCIPAL_KEY = '{{hive-site/hive.server2.authentication.kerberos.principal}}'
+
+# The configured Kerberos executable search paths, if any
+KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
+
+THRIFT_PORT_DEFAULT = 10016
+HIVE_SERVER_TRANSPORT_MODE_DEFAULT = 'binary'
+
+HIVEUSER_DEFAULT = 'hive'
+
+CHECK_COMMAND_TIMEOUT_KEY = 'check.command.timeout'
+CHECK_COMMAND_TIMEOUT_DEFAULT = 60.0
+
+logger = logging.getLogger('ambari_alerts')
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def get_tokens():
+    """
+    Returns a tuple of tokens in the format {{site/property}} that will be used
+    to build the dictionary passed into execute
+    """
+    return (HIVE_SERVER_THRIFT_PORT_KEY, HIVE_SERVER_TRANSPORT_MODE_KEY, SECURITY_ENABLED_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY,
+            HIVEUSER_DEFAULT, HIVE_SERVER2_KERBEROS_KEYTAB, HIVE_SERVER2_PRINCIPAL_KEY)
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def execute(configurations={}, parameters={}, host_name=None):
+    """
+    Returns a tuple containing the result code and a pre-formatted result label
+
+    Keyword arguments:
+    configurations (dictionary): a mapping of configuration key to value
+    parameters (dictionary): a mapping of script parameter key to value
+    host_name (string): the name of this host where the alert is running
+    """
+
+    spark_home = os.path.join(stack_root, "current", 'spark2-client')
+
+    if configurations is None:
+        return ('UNKNOWN', ['There were no configurations supplied to the script.'])
+
+    transport_mode = HIVE_SERVER_TRANSPORT_MODE_DEFAULT
+    if HIVE_SERVER_TRANSPORT_MODE_KEY in configurations:
+        transport_mode = configurations[HIVE_SERVER_TRANSPORT_MODE_KEY]
+
+    port = THRIFT_PORT_DEFAULT
+    if transport_mode.lower() == 'binary' and HIVE_SERVER_THRIFT_PORT_KEY in configurations:
+        port = int(configurations[HIVE_SERVER_THRIFT_PORT_KEY])
+
+    security_enabled = False
+    if SECURITY_ENABLED_KEY in configurations:
+        security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
+
+    hive_kerberos_keytab = None
+    if HIVE_SERVER2_KERBEROS_KEYTAB in configurations:
+        hive_kerberos_keytab = configurations[HIVE_SERVER2_KERBEROS_KEYTAB]
+
+    if host_name is None:
+        host_name = socket.getfqdn()
+
+    hive_principal = None
+    if HIVE_SERVER2_PRINCIPAL_KEY in configurations:
+        hive_principal = configurations[HIVE_SERVER2_PRINCIPAL_KEY]
+        hive_principal = hive_principal.replace('_HOST',host_name.lower())
+
+    # Get the configured Kerberos executable search paths, if any
+    if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
+        kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
+    else:
+        kerberos_executable_search_paths = None
+
+    kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
+
+    hiveruser = HIVEUSER_DEFAULT
+
+    if security_enabled:
+        kinitcmd = format("{kinit_path_local} -kt {hive_kerberos_keytab} {hive_principal}; ")
+        # prevent concurrent kinit
+        kinit_lock = global_lock.get_lock(global_lock.LOCK_TYPE_KERBEROS)
+        kinit_lock.acquire()
+        try:
+            Execute(kinitcmd, user=hiveruser)
+        finally:
+            kinit_lock.release()
+
+    result_code = None
+    try:
+        if host_name is None:
+            host_name = socket.getfqdn()
+
+        beeline_url = ['jdbc:hive2://{host_name}:{port}/', "transportMode={transport_mode}"]
+        # append url according to used transport
+
+        beeline_cmd = os.path.join(spark_home, "bin", "beeline")
+        cmd = "! beeline -u %s  -e '' 2>&1| awk '{print}'|grep -i -e 'Connection refused' -e 'Invalid URL'" % \
+              (format(" ".join(beeline_url)))
+
+        start_time = time.time()
+        try:
+            Execute(cmd, user=hiveruser, path=[beeline_cmd], timeout=CHECK_COMMAND_TIMEOUT_DEFAULT)
+            total_time = time.time() - start_time
+            result_code = 'OK'
+            label = OK_MESSAGE.format(total_time, port)
+        except:
+            result_code = 'CRITICAL'
+            label = CRITICAL_MESSAGE.format(host_name, port, traceback.format_exc())
+    except:
+        label = traceback.format_exc()
+        result_code = 'UNKNOWN'
+
+    return (result_code, [label])
diff --git a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/params.py
index 4ed0718..74790ef 100755
--- a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/params.py
@@ -198,7 +198,7 @@
 # livy for spark2 is only supported from HDP 2.6
 has_livyserver = False
 
-if stack_version_formatted and check_stack_feature(StackFeature.SPARK_LIVY2, stack_version_formatted):
+if stack_version_formatted and check_stack_feature(StackFeature.SPARK_LIVY2, stack_version_formatted) and "livy2-env" in config['configurations']:
   livy2_component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "LIVY2_SERVER")
   livy2_conf = format("{stack_root}/current/{livy2_component_directory}/conf")
   livy2_log_dir = config['configurations']['livy2-env']['livy2_log_dir']
@@ -229,6 +229,7 @@
     livy_kerberos_principal = config['configurations']['livy2-conf']['livy.server.launch.kerberos.principal']
 
   livy2_livyserver_hosts = default("/clusterHostInfo/livy2_server_hosts", [])
+  livy2_http_scheme = 'https' if 'livy.keystore' in config['configurations']['livy2-conf'] else 'http'
 
   # ats 1.5 properties
   entity_groupfs_active_dir = config['configurations']['yarn-site']['yarn.timeline-service.entity-group-fs-store.active-dir']
diff --git a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/service_check.py
index 8e7a766..7667191 100755
--- a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/service_check.py
@@ -44,7 +44,7 @@
       live_livyserver_host = ""
       for livyserver_host in params.livy2_livyserver_hosts:
         try:
-          Execute(format("curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k http://{livyserver_host}:{livy2_livyserver_port}/sessions | grep 200"),
+          Execute(format("curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k {livy2_http_scheme}://{livyserver_host}:{livy2_livyserver_port}/sessions | grep 200"),
                   tries=3,
                   try_sleep=1,
                   logoutput=True,
diff --git a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/templates/input.config-spark2.json.j2 b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/templates/input.config-spark2.json.j2
new file mode 100644
index 0000000..e90aa65
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/templates/input.config-spark2.json.j2
@@ -0,0 +1,66 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"spark2_jobhistory_server",
+      "rowtype":"service",
+      "path":"{{default('/configurations/spark2-env/spark_log_dir', '/var/log/spark2')}}/spark-*-org.apache.spark.deploy.history.HistoryServer*.out"
+    },
+    {
+      "type":"spark2_thriftserver",
+      "rowtype":"service",
+      "path":"{{default('/configurations/spark2-env/spark_log_dir', '/var/log/spark2')}}/spark-*-org.apache.spark.sql.hive.thriftserver.HiveThriftServer2*.out"
+    },
+    {
+      "type":"livy2_server",
+      "rowtype":"service",
+      "path":"{{default('/configurations/livy2-env/livy2_log_dir', '/var/log/livy2')}}/livy-livy-server.out"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "spark2_jobhistory_server",
+            "spark2_thriftserver",
+            "livy2_server"
+          ]
+        }
+      },
+      "log4j_format":"",
+      "multiline_pattern":"^(%{SPARK_DATESTAMP:logtime}%{SPACE}%{LOGLEVEL:level})",
+      "message_pattern":"(?m)^%{SPARK_DATESTAMP:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVAFILE:file}:%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yy/MM/dd HH:mm:ss"
+          }
+        },
+        "level":{
+          "map_fieldvalue":{
+            "pre_value":"WARNING",
+            "post_value":"WARN"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/quicklinks/quicklinks.json
index ff285fc..9cd07f1 100755
--- a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/quicklinks/quicklinks.json
+++ b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/quicklinks/quicklinks.json
@@ -11,6 +11,7 @@
       {
         "name": "spark2_history_server_ui",
         "label": "Spark2 History Server UI",
+        "component_name": "SPARK2_JOBHISTORYSERVER",
         "requires_user_name": "false",
         "url": "%@://%@:%@",
         "port":{
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/configuration/sqoop-atlas-application.properties.xml b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/configuration/sqoop-atlas-application.properties.xml
new file mode 100644
index 0000000..1364776
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/configuration/sqoop-atlas-application.properties.xml
@@ -0,0 +1,47 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <!--
+  These two security properties will only be read if the cluster is Kerberized, but ok to add them even without Kerberos.
+  It's important to have at least one property in this config file so it gets added while merging configs during EU/RU
+  from an earlier stack to HDP 2.5+.
+  Also, it allows a fresh install with Sqoop to expose this config type in the UI.
+  -->
+  <property>
+    <name>atlas.jaas.KafkaClient.option.useTicketCache</name>
+    <value>true</value>
+    <description>
+      Set this to "true" if you want the TGT to be obtained from the ticket cache.
+      Set this option to "false" if you do not want this module to use the ticket cache.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.jaas.KafkaClient.option.renewTicket</name>
+    <value>true</value>
+    <description>
+      Set this to "true" if you want the TGT to renew the ticket when it expires.
+      Set this option to "false" if you do not want this module to renew tickets.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/configuration/sqoop-env.xml b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/configuration/sqoop-env.xml
new file mode 100644
index 0000000..6e16ab3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/configuration/sqoop-env.xml
@@ -0,0 +1,87 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <!-- sqoop-env.sh -->
+  <property>
+    <name>sqoop.atlas.hook</name>
+    <value>false</value>
+    <display-name>Enable Atlas Hook</display-name>
+    <description>Enable Atlas Hook</description>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.rest.address</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>sqoop-env template</display-name>
+    <description>This is the jinja template for sqoop-env.sh file</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+#Set path to where bin/hadoop is available
+#Set path to where bin/hadoop is available
+export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+#set the path to where bin/hbase is available
+export HBASE_HOME=${HBASE_HOME:-{{hbase_home}}}
+
+#Set the path to where bin/hive is available
+export HIVE_HOME=${HIVE_HOME:-{{hive_home}}}
+
+#Set the path for where zookeper config dir is
+export ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}
+
+# add libthrift in hive to sqoop class path first so hive imports work
+export SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2&gt; /dev/null`:${SQOOP_USER_CLASSPATH}"
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>sqoop_user</name>
+    <display-name>Sqoop User</display-name>
+    <description>User to run Sqoop as</description>
+    <property-type>USER</property-type>
+    <value>sqoop</value>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>jdbc_drivers</name>
+    <description>Comma separated list of additional JDBC drivers class names</description>
+    <value> </value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/configuration/sqoop-site.xml b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/configuration/sqoop-site.xml
new file mode 100644
index 0000000..389550d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/configuration/sqoop-site.xml
@@ -0,0 +1,38 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <!-- This property was valid in HDP 2.3 and 2.4, but removed in HDP 2.5 -->
+  <property>
+    <name>sqoop.job.data.publish.class</name>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>sqoop-env</type>
+        <name>sqoop.atlas.hook</name>
+      </property>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.rest.address</name>
+      </property>
+    </depends-on>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/kerberos.json b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/kerberos.json
new file mode 100644
index 0000000..de12e7c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/kerberos.json
@@ -0,0 +1,20 @@
+{
+  "services": [
+    {
+      "name": "SQOOP",
+      "configurations": [
+        {
+          "sqoop-atlas-application.properties": {
+            "atlas.jaas.KafkaClient.option.useTicketCache": "true",
+            "atlas.jaas.KafkaClient.option.renewTicket": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "SQOOP"
+        }
+      ]
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/metainfo.xml
new file mode 100644
index 0000000..999d93a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/metainfo.xml
@@ -0,0 +1,115 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SQOOP</name>
+      <displayName>Sqoop</displayName>
+      <comment>Tool for transferring bulk data between Apache Hadoop and
+        structured data stores such as relational databases
+      </comment>
+      <version>1.4.4.3.0</version>
+
+      <components>
+        <component>
+          <name>SQOOP</name>
+          <displayName>Sqoop Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/sqoop_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>sqoop-site.xml</fileName>
+              <dictionaryName>sqoop-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>sqoop-env.sh</fileName>
+              <dictionaryName>sqoop-env</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>mysql-connector-java</name>
+              <skipUpgrade>true</skipUpgrade>
+              <condition>should_install_mysql_connector</condition>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat7,amazon2015,redhat6,suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>sqoop_${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+          <packages>
+            <package>
+              <name>sqoop-${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      
+      <requiredServices>
+        <service>HDFS</service>
+      </requiredServices>
+      
+      <configuration-dependencies>
+        <config-type>sqoop-env</config-type>
+        <config-type>sqoop-site</config-type>
+        <config-type>application.properties</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/__init__.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/__init__.py
new file mode 100644
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/params.py
new file mode 100644
index 0000000..61573ee
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/params.py
@@ -0,0 +1,27 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from ambari_commons import OSCheck
+from resource_management.libraries.functions.default import default
+
+if OSCheck.is_windows_family():
+  from params_windows import *
+else:
+  from params_linux import *
+
+
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/params_linux.py
new file mode 100644
index 0000000..c1138b3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/params_linux.py
@@ -0,0 +1,135 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.get_kinit_path import get_kinit_path
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.expect import expect
+from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
+
+
+# a map of the Ambari role to the component name
+# for use with <stack-root>/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'SQOOP' : 'sqoop-client'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "SQOOP")
+
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+
+# Needed since this is an Atlas Hook service.
+cluster_name = config['clusterName']
+
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+stack_name = default("/hostLevelParams/stack_name", None)
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
+agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+
+# default hadoop params
+sqoop_conf_dir = "/usr/lib/sqoop/conf"
+sqoop_lib = "/usr/lib/sqoop/lib"
+hadoop_home = '/usr/lib/hadoop'
+hbase_home = "/usr/lib/hbase"
+hive_home = "/usr/lib/hive"
+sqoop_bin_dir = "/usr/bin"
+zoo_conf_dir = "/etc/zookeeper"
+
+# For stack versions supporting rolling upgrade
+if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+  sqoop_conf_dir = format("{stack_root}/current/sqoop-client/conf")
+  sqoop_lib = format("{stack_root}/current/sqoop-client/lib")
+  hadoop_home = format("{stack_root}/current/hadoop-client")
+  hbase_home = format("{stack_root}/current/hbase-client")
+  hive_home = format("{stack_root}/current/hive-client")
+  sqoop_bin_dir = format("{stack_root}/current/sqoop-client/bin/")
+  zoo_conf_dir = format("{stack_root}/current/zookeeper-client/conf")
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+user_group = config['configurations']['cluster-env']['user_group']
+sqoop_env_sh_template = config['configurations']['sqoop-env']['content']
+
+sqoop_user = config['configurations']['sqoop-env']['sqoop_user']
+
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+#JDBC driver jar name
+sqoop_jdbc_drivers_dict = []
+sqoop_jdbc_drivers_name_dict = {}
+sqoop_jdbc_drivers_to_remove = {}
+if "jdbc_drivers" in config['configurations']['sqoop-env']:
+  sqoop_jdbc_drivers = config['configurations']['sqoop-env']['jdbc_drivers'].split(',')
+
+  for driver_name in sqoop_jdbc_drivers:
+    previous_jdbc_jar_name = None
+    driver_name = driver_name.strip()
+    if driver_name and not driver_name == '':
+      if driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver":
+        jdbc_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
+        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
+        jdbc_driver_name = "mssql"
+      elif driver_name == "com.mysql.jdbc.Driver":
+        jdbc_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
+        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
+        jdbc_driver_name = "mysql"
+      elif driver_name == "org.postgresql.Driver":
+        jdbc_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
+        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
+        jdbc_driver_name = "postgres"
+      elif driver_name == "oracle.jdbc.driver.OracleDriver":
+        jdbc_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
+        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
+        jdbc_driver_name = "oracle"
+      elif driver_name == "org.hsqldb.jdbc.JDBCDriver":
+        jdbc_name = default("/hostLevelParams/custom_hsqldb_jdbc_name", None)
+        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_hsqldb_jdbc_name", None)
+        jdbc_driver_name = "hsqldb"
+    else:
+      continue
+    sqoop_jdbc_drivers_dict.append(jdbc_name)
+    sqoop_jdbc_drivers_to_remove[jdbc_name] = previous_jdbc_jar_name
+    sqoop_jdbc_drivers_name_dict[jdbc_name] = jdbc_driver_name
+jdk_location = config['hostLevelParams']['jdk_location']
+
+
+########################################################
+############# Atlas related params #####################
+########################################################
+#region Atlas Hooks
+sqoop_atlas_application_properties = default('/configurations/sqoop-atlas-application.properties', {})
+enable_atlas_hook = default('/configurations/sqoop-env/sqoop.atlas.hook', False)
+atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
+#endregion
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/params_windows.py
new file mode 100644
index 0000000..f930765
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/params_windows.py
@@ -0,0 +1,30 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script import Script
+import os
+
+config = Script.get_config()
+
+sqoop_user = "sqoop"
+
+stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"], ".."))
+sqoop_env_cmd_template = config['configurations']['sqoop-env']['content']
+sqoop_home_dir = os.environ["SQOOP_HOME"]
+sqoop_conf_dir = os.path.join(sqoop_home_dir, "conf")
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/service_check.py
new file mode 100644
index 0000000..bb503f5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/service_check.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+
+from resource_management.libraries.script.script import Script
+from resource_management.core.resources import Execute
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import format
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+import os
+
+class SqoopServiceCheck(Script):
+  pass
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class SqoopServiceCheckDefault(SqoopServiceCheck):
+
+  def get_component_name(self):
+    return "sqoop-server"
+
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    if params.security_enabled:
+      Execute(format("{kinit_path_local}  -kt {smoke_user_keytab} {smokeuser_principal}"),
+              user = params.smokeuser,
+      )
+    Execute("sqoop version",
+            user = params.smokeuser,
+            path = params.sqoop_bin_dir,
+            logoutput = True
+    )
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class SqoopServiceCheckWindows(SqoopServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    smoke_cmd = os.path.join(params.stack_root,"Run-SmokeTests.cmd")
+    service = "SQOOP"
+    Execute(format("cmd /C {smoke_cmd} {service}"), logoutput=True)
+
+if __name__ == "__main__":
+  SqoopServiceCheck().execute()
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/sqoop.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/sqoop.py
new file mode 100644
index 0000000..436402c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/sqoop.py
@@ -0,0 +1,124 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+# Python Imports
+import os
+
+# Local Imports
+from resource_management.core.source import InlineTemplate, DownloadSource
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.get_config import get_config
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.core.resources.system import File, Link, Directory
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook, setup_atlas_jar_symlinks
+from ambari_commons.constants import SERVICE
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def sqoop(type=None):
+  import params
+  File(os.path.join(params.sqoop_conf_dir, "sqoop-env.cmd"),
+       content=InlineTemplate(params.sqoop_env_cmd_template)
+  )
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def sqoop(type=None):
+  import params
+  Link(params.sqoop_lib + "/mysql-connector-java.jar",
+       to = '/usr/share/java/mysql-connector-java.jar'
+  )
+
+  jdbc_connector()
+  
+  Directory(params.sqoop_conf_dir,
+            owner = params.sqoop_user,
+            group = params.user_group,
+            create_parents = True
+  )
+
+  configs = {}
+  sqoop_site_config = get_config('sqoop-site')
+  if sqoop_site_config:
+    configs.update(sqoop_site_config)
+
+    XmlConfig("sqoop-site.xml",
+              conf_dir = params.sqoop_conf_dir,
+              configurations = configs,
+              configuration_attributes=params.config['configuration_attributes']['sqoop-site'],
+              owner = params.sqoop_user,
+              group = params.user_group
+              )
+
+  # Generate atlas-application.properties.xml file and symlink the hook jars
+  if params.enable_atlas_hook:
+    atlas_hook_filepath = os.path.join(params.sqoop_conf_dir, params.atlas_hook_filename)
+    setup_atlas_hook(SERVICE.SQOOP, params.sqoop_atlas_application_properties, atlas_hook_filepath, params.sqoop_user, params.user_group)
+    setup_atlas_jar_symlinks("sqoop", params.sqoop_lib)
+
+
+  File(format("{sqoop_conf_dir}/sqoop-env.sh"),
+    owner=params.sqoop_user,
+    group = params.user_group,
+    content=InlineTemplate(params.sqoop_env_sh_template)
+  )
+  update_config_permissions(["sqoop-env-template.sh",
+                             "sqoop-site-template.xml",
+                             "sqoop-site.xml"])
+  pass
+
+def update_config_permissions(names):
+  import params
+  for filename in names:
+    full_filename = os.path.join(params.sqoop_conf_dir, filename)
+    File(full_filename,
+          owner = params.sqoop_user,
+          group = params.user_group,
+          only_if = format("test -e {full_filename}")
+    )
+
+def jdbc_connector():
+  import params
+  from urllib2 import HTTPError
+  from resource_management import Fail
+  for jar_name in params.sqoop_jdbc_drivers_dict:
+    if not jar_name or 'mysql' in jar_name:
+      continue
+    downloaded_custom_connector = format("{sqoop_lib}/{jar_name}")
+    custom_connector_to_remove = format("{sqoop_lib}/" + str(params.sqoop_jdbc_drivers_to_remove[jar_name]))
+    jdbc_driver_label = params.sqoop_jdbc_drivers_name_dict[jar_name]
+    driver_curl_source = format("{jdk_location}/{jar_name}")
+    environment = {
+      "no_proxy": format("{ambari_server_hostname}")
+    }
+    try:
+      if custom_connector_to_remove and os.path.isfile(custom_connector_to_remove):
+        File(custom_connector_to_remove, action='delete')
+
+      File(downloaded_custom_connector,
+           content = DownloadSource(driver_curl_source),
+           mode = 0644,
+      )
+    except HTTPError:
+      error_string = format("Could not download {driver_curl_source}\n\
+                 Please upload jdbc driver to server by run command:\n\
+                 ambari-server setup --jdbc-db={jdbc_driver_label} --jdbc-driver=<PATH TO DRIVER>\n\
+                 at {ambari_server_hostname}") 
+      raise Fail(error_string)
+                 
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/sqoop_client.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/sqoop_client.py
new file mode 100644
index 0000000..d420fab
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/sqoop_client.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from sqoop import sqoop
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+
+class SqoopClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    sqoop(type='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class SqoopClientDefault(SqoopClient):
+  def get_component_name(self):
+    return "sqoop-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version): 
+      conf_select.select(params.stack_name, "sqoop", params.version)
+      stack_select.select("sqoop-client", params.version)
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class SqoopClientWindows(SqoopClient):
+  pass
+
+if __name__ == "__main__":
+  SqoopClient().execute()
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/role_command_order.json b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/role_command_order.json
new file mode 100644
index 0000000..f9a6cb4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/role_command_order.json
@@ -0,0 +1,6 @@
+{
+  "general_deps" : {
+    "_comment" : "dependencies for SQOOP",
+    "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"]
+  }
+}
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/service_advisor.py
new file mode 100644
index 0000000..115ca06
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/service_advisor.py
@@ -0,0 +1,197 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+# Python imports
+import imp
+import os
+import traceback
+import re
+import socket
+import fnmatch
+
+
+from resource_management.core.logger import Logger
+
+SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
+STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../stacks/')
+PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
+
+try:
+  with open(PARENT_FILE, 'rb') as fp:
+    service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE, ('.py', 'rb', imp.PY_SOURCE))
+except Exception as e:
+  traceback.print_exc()
+  print "Failed to load parent"
+
+class SqoopServiceAdvisor(service_advisor.ServiceAdvisor):
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(SqoopServiceAdvisor, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    # Always call these methods
+    self.modifyMastersWithMultipleInstances()
+    self.modifyCardinalitiesDict()
+    self.modifyHeapSizeProperties()
+    self.modifyNotValuableComponents()
+    self.modifyComponentsNotPreferableOnServer()
+    self.modifyComponentLayoutSchemes()
+
+  def modifyMastersWithMultipleInstances(self):
+    """
+    Modify the set of masters with multiple instances.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyCardinalitiesDict(self):
+    """
+    Modify the dictionary of cardinalities.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyHeapSizeProperties(self):
+    """
+    Modify the dictionary of heap size properties.
+    Must be overriden in child class.
+    """
+    pass
+
+  def modifyNotValuableComponents(self):
+    """
+    Modify the set of components whose host assignment is based on other services.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentsNotPreferableOnServer(self):
+    """
+    Modify the set of components that are not preferable on the server.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentLayoutSchemes(self):
+    """
+    Modify layout scheme dictionaries for components.
+    The scheme dictionary basically maps the number of hosts to
+    host index where component should exist.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def getServiceComponentLayoutValidations(self, services, hosts):
+    """
+    Get a list of errors.
+    Must be overriden in child class.
+    """
+
+    return []
+
+  def getServiceConfigurationRecommendations(self, configurations, clusterData, services, hosts):
+    """
+    Entry point.
+    Must be overriden in child class.
+    """
+    #Logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    recommender = SqoopRecommender()
+    recommender.recommendSqoopConfigurationsFromHDP23(configurations, clusterData, services, hosts)
+
+
+
+  def getServiceConfigurationsValidationItems(self, configurations, recommendedDefaults, services, hosts):
+    """
+    Entry point.
+    Validate configurations for the service. Return a list of errors.
+    The code for this function should be the same for each Service Advisor.
+    """
+    #Logger.info("Class: %s, Method: %s. Validating Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    validator = SqoopValidator()
+    # Calls the methods of the validator using arguments,
+    # method(siteProperties, siteRecommendations, configurations, services, hosts)
+    return validator.validateListOfConfigUsingMethod(configurations, recommendedDefaults, services, hosts, validator.validators)
+
+
+
+class SqoopRecommender(service_advisor.ServiceAdvisor):
+  """
+  Sqoop Recommender suggests properties when adding the service for the first time or modifying configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(SqoopRecommender, self)
+    self.as_super.__init__(*args, **kwargs)
+
+
+  def recommendSqoopConfigurationsFromHDP23(self, configurations, clusterData, services, hosts):
+    putSqoopSiteProperty = self.putProperty(configurations, "sqoop-site", services)
+    putSqoopEnvProperty = self.putProperty(configurations, "sqoop-env", services)
+
+    enable_external_atlas_for_sqoop = False
+    enable_atlas_hook = False
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if 'sqoop-atlas-application.properties' in services['configurations'] and 'enable.external.atlas.for.sqoop' in services['configurations']['sqoop-atlas-application.properties']['properties']:
+      enable_external_atlas_for_sqoop = services['configurations']['sqoop-atlas-application.properties']['properties']['enable.external.atlas.for.sqoop'].lower() == "true"
+
+    if "ATLAS" in servicesList:
+      putSqoopEnvProperty("sqoop.atlas.hook", "true")
+    elif enable_external_atlas_for_sqoop:
+      putSqoopEnvProperty("sqoop.atlas.hook", "true")
+    else:
+      putSqoopEnvProperty("sqoop.atlas.hook", "false")
+
+    if 'sqoop-env' in configurations and 'sqoop.atlas.hook' in configurations['sqoop-env']['properties']:
+      enable_atlas_hook = configurations['sqoop-env']['properties']['sqoop.atlas.hook'] == "true"
+    elif 'sqoop-env' in services['configurations'] and 'sqoop.atlas.hook' in services['configurations']['sqoop-env']['properties']:
+      enable_atlas_hook = services['configurations']['sqoop-env']['properties']['sqoop.atlas.hook'] == "true"
+
+    if enable_atlas_hook:
+      putSqoopSiteProperty('sqoop.job.data.publish.class', 'org.apache.atlas.sqoop.hook.SqoopHook')
+    else:
+      putSqoopSitePropertyAttribute = self.putPropertyAttribute(configurations, "sqoop-site")
+      putSqoopSitePropertyAttribute('sqoop.job.data.publish.class', 'delete', 'true')
+
+
+
+class SqoopValidator(service_advisor.ServiceAdvisor):
+  """
+  Sqoop Validator checks the correctness of properties whenever the service is first added or the user attempts to
+  change configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(SqoopValidator, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    self.validators = []
+
+
+
+
+
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-env.xml b/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-env.xml
index 4cfe3d5..3d4edad 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-env.xml
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-env.xml
@@ -125,6 +125,17 @@
 
 export STORM_CONF_DIR={{conf_dir}}
 export STORM_HOME={{storm_component_home_dir}}
+
+#set storm-auto creds
+# check if storm_jaas.conf in config , only enable storm_auto_creds in secure mode.
+STORM_HOME="$(dirname $(cd "$( dirname "${BASH_SOURCE[0]}" )" &amp;&amp; pwd ))"
+STORM_JAAS_CONF=$STORM_HOME/config/storm_jaas.conf
+STORM_AUTOCREDS_LIB_DIR=/usr/hdp/current/storm-client/external/storm-autocreds
+
+if [ -f $STORM_JAAS_CONF ] &amp;&amp; [ -d $STORM_AUTOCREDS_LIB_DIR ]; then
+    export STORM_EXT_CLASSPATH=$STORM_AUTOCREDS_LIB_DIR
+fi
+
     </value>
     <value-attributes>
       <type>content</type>
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-logsearch-conf.xml
deleted file mode 100644
index d485017..0000000
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-logsearch-conf.xml
+++ /dev/null
@@ -1,110 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>Storm</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>NIMBUS:storm_nimbus;SUPERVISOR:storm_supervisor,storm_worker,storm_logviewer;STORM_UI_SERVER:storm_ui;DRPC_SERVER:storm_drpc</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder Config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-  "input":[
-    {
-      "type":"storm_drpc",
-      "rowtype":"service",
-      "path":"{{default('/configurations/storm-env/storm_log_dir', '/var/log/storm')}}/drpc.log"
-    },
-    {
-      "type":"storm_logviewer",
-      "rowtype":"service",
-      "path":"{{default('/configurations/storm-env/storm_log_dir', '/var/log/storm')}}/logviewer.log"
-    },
-    {
-      "type":"storm_nimbus",
-      "rowtype":"service",
-      "path":"{{default('/configurations/storm-env/storm_log_dir', '/var/log/storm')}}/nimbus.log"
-    },
-    {
-      "type":"storm_supervisor",
-      "rowtype":"service",
-      "path":"{{default('/configurations/storm-env/storm_log_dir', '/var/log/storm')}}/supervisor.log"
-    },
-    {
-      "type":"storm_ui",
-      "rowtype":"service",
-      "path":"{{default('/configurations/storm-env/storm_log_dir', '/var/log/storm')}}/ui.log"
-    },
-    {
-      "type":"storm_worker",
-      "rowtype":"service",
-      "path":"{{default('/configurations/storm-env/storm_log_dir', '/var/log/storm')}}/*worker*.log"
-    }
-   ],
-  "filter":[
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "storm_drpc",
-            "storm_logviewer",
-            "storm_nimbus",
-            "storm_supervisor",
-            "storm_ui",
-            "storm_worker"
-          ]
-         }
-       },
-      "log4j_format":"",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\[%{LOGLEVEL:level}\\]%{SPACE}%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss.SSS"
-          }
-         }
-       }
-     }
-   ]
- }
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/drpc_server.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/drpc_server.py
index 178c043..f991e71 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/drpc_server.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/drpc_server.py
@@ -74,58 +74,6 @@
     import status_params
     env.set_params(status_params)
     check_process_status(status_params.pid_drpc)
-
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    if status_params.security_enabled:
-      # Expect the following files to be available in status_params.config_dir:
-      #   storm_jaas.conf
-
-      try:
-        props_value_check = None
-        props_empty_check = ['StormServer/keyTab', 'StormServer/principal']
-        props_read_check = ['StormServer/keyTab']
-        storm_env_expectations = build_expectations('storm_jaas', props_value_check, props_empty_check,
-                                                 props_read_check)
-
-        storm_expectations = {}
-        storm_expectations.update(storm_env_expectations)
-
-        security_params = get_params_from_filesystem(status_params.conf_dir,
-                                                     {'storm_jaas.conf': FILE_TYPE_JAAS_CONF})
-
-        result_issues = validate_security_config_properties(security_params, storm_expectations)
-        if not result_issues:  # If all validations passed successfully
-          # Double check the dict before calling execute
-          if ( 'storm_jaas' not in security_params
-               or 'StormServer' not in security_params['storm_jaas']
-               or 'keyTab' not in security_params['storm_jaas']['StormServer']
-               or 'principal' not in security_params['storm_jaas']['StormServer']):
-            self.put_structured_out({"securityState": "ERROR"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.storm_user,
-                                security_params['storm_jaas']['StormServer']['keyTab'],
-                                security_params['storm_jaas']['StormServer']['principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        else:
-          issues = []
-          for cf in result_issues:
-            issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-          self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-          self.put_structured_out({"securityState": "UNSECURED"})
-      except Exception as e:
-        self.put_structured_out({"securityState": "ERROR"})
-        self.put_structured_out({"securityStateErrorInfo": str(e)})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
       
   def get_log_folder(self):
     import params
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/nimbus.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/nimbus.py
index a974103..360af5d 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/nimbus.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/nimbus.py
@@ -82,51 +82,6 @@
     env.set_params(status_params)
     check_process_status(status_params.pid_nimbus)
 
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      # Expect the following files to be available in status_params.config_dir:
-      #   storm_jaas.conf
-      try:
-        props_value_check = None
-        props_empty_check = ['StormServer/keyTab', 'StormServer/principal']
-        props_read_check = ['StormServer/keyTab']
-        storm_env_expectations = build_expectations('storm_jaas', props_value_check, props_empty_check,  props_read_check)
-        storm_expectations = {}
-        storm_expectations.update(storm_env_expectations)
-        security_params = get_params_from_filesystem(status_params.conf_dir, {'storm_jaas.conf': FILE_TYPE_JAAS_CONF})
-        result_issues = validate_security_config_properties(security_params, storm_expectations)
-        if not result_issues:  # If all validations passed successfully
-          # Double check the dict before calling execute
-          if ( 'storm_jaas' not in security_params
-               or 'StormServer' not in security_params['storm_jaas']
-               or 'keyTab' not in security_params['storm_jaas']['StormServer']
-               or 'principal' not in security_params['storm_jaas']['StormServer']):
-            self.put_structured_out({"securityState": "ERROR"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.storm_user,
-                                security_params['storm_jaas']['StormServer']['keyTab'],
-                                security_params['storm_jaas']['StormServer']['principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        else:
-          issues = []
-          for cf in result_issues:
-            issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-          self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-          self.put_structured_out({"securityState": "UNSECURED"})
-      except Exception as e:
-        self.put_structured_out({"securityState": "ERROR"})
-        self.put_structured_out({"securityStateErrorInfo": str(e)})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
     return params.log_dir
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/pacemaker.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/pacemaker.py
index a56c0cd..fa3112d 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/pacemaker.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/pacemaker.py
@@ -74,58 +74,6 @@
       env.set_params(status_params)
       check_process_status(status_params.pid_pacemaker)
 
-  def security_status(self, env):
-      import status_params
-
-      env.set_params(status_params)
-
-      if status_params.security_enabled:
-          # Expect the following files to be available in status_params.config_dir:
-          #   storm_jaas.conf
-
-          try:
-              props_value_check = None
-              props_empty_check = ['StormServer/keyTab', 'StormServer/principal']
-              props_read_check = ['StormServer/keyTab']
-              storm_env_expectations = build_expectations('storm_jaas', props_value_check, props_empty_check,
-                                                          props_read_check)
-
-              storm_expectations = {}
-              storm_expectations.update(storm_env_expectations)
-
-              security_params = get_params_from_filesystem(status_params.conf_dir,
-                                                     {'storm_jaas.conf': FILE_TYPE_JAAS_CONF})
-
-              result_issues = validate_security_config_properties(security_params, storm_expectations)
-              if not result_issues:  # If all validations passed successfully
-                  # Double check the dict before calling execute
-                  if ( 'storm_jaas' not in security_params
-                       or 'StormServer' not in security_params['storm_jaas']
-                       or 'keyTab' not in security_params['storm_jaas']['StormServer']
-                       or 'principal' not in security_params['storm_jaas']['StormServer']):
-                      self.put_structured_out({"securityState": "ERROR"})
-                      self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-                      return
-
-                  cached_kinit_executor(status_params.kinit_path_local,
-                                        status_params.storm_user,
-                                        security_params['storm_jaas']['StormServer']['keyTab'],
-                                        security_params['storm_jaas']['StormServer']['principal'],
-                                        status_params.hostname,
-                                        status_params.tmp_dir)
-                  self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-              else:
-                  issues = []
-                  for cf in result_issues:
-                      issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-                      self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-                      self.put_structured_out({"securityState": "UNSECURED"})
-          except Exception as e:
-              self.put_structured_out({"securityState": "ERROR"})
-              self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
       import params
       return params.log_dir
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
index 44b256e..78ec165 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
@@ -174,13 +174,19 @@
   else:
     storm_thrift_transport = config['configurations']['storm-site']['_storm.thrift.nonsecure.transport']
 
-ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+set_instanceId = "false"
+if 'cluster-env' in config['configurations'] and \
+        'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
+  ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
+  set_instanceId = "true"
+else:
+  ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
 has_metric_collector = not len(ams_collector_hosts) == 0
 metric_collector_port = None
 if has_metric_collector:
   if 'cluster-env' in config['configurations'] and \
-      'metrics_collector_vip_port' in config['configurations']['cluster-env']:
-    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
+      'metrics_collector_external_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
   else:
     metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
     if metric_collector_web_address.find(':') != -1:
@@ -202,6 +208,8 @@
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 metric_collector_sink_jar = "/usr/lib/storm/lib/ambari-metrics-storm-sink-with-common-*.jar"
 metric_collector_legacy_sink_jar = "/usr/lib/storm/lib/ambari-metrics-storm-sink-legacy-with-common-*.jar"
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
 
 
 # Cluster Zookeeper quorum
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/ui_server.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/ui_server.py
index 63acecf..e257ef9 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/ui_server.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/ui_server.py
@@ -120,59 +120,6 @@
     import status_params
     env.set_params(status_params)
     check_process_status(status_params.pid_ui)
-
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    if status_params.security_enabled:
-      # Expect the following files to be available in status_params.config_dir:
-      #   storm_jaas.conf
-
-      try:
-        props_value_check = None
-        props_empty_check = ['storm_ui_principal_name', 'storm_ui_keytab']
-        props_read_check = ['storm_ui_keytab']
-        storm_env_expectations = build_expectations('storm_ui', props_value_check, props_empty_check,
-                                                 props_read_check)
-
-        storm_expectations = {}
-        storm_expectations.update(storm_env_expectations)
-
-        security_params = {}
-        security_params['storm_ui'] = {}
-        security_params['storm_ui']['storm_ui_principal_name'] = status_params.storm_ui_principal
-        security_params['storm_ui']['storm_ui_keytab'] = status_params.storm_ui_keytab
-
-        result_issues = validate_security_config_properties(security_params, storm_expectations)
-        if not result_issues:  # If all validations passed successfully
-          # Double check the dict before calling execute
-          if ( 'storm_ui' not in security_params
-               or 'storm_ui_principal_name' not in security_params['storm_ui']
-               or 'storm_ui_keytab' not in security_params['storm_ui']):
-            self.put_structured_out({"securityState": "ERROR"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.storm_user,
-                                security_params['storm_ui']['storm_ui_keytab'],
-                                security_params['storm_ui']['storm_ui_principal_name'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        else:
-          issues = []
-          for cf in result_issues:
-            issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-          self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-          self.put_structured_out({"securityState": "UNSECURED"})
-      except Exception as e:
-        self.put_structured_out({"securityState": "ERROR"})
-        self.put_structured_out({"securityStateErrorInfo": str(e)})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
       
   def get_log_folder(self):
     import params
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/config.yaml.j2 b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/config.yaml.j2
index f2a75cf..b2dd3c8 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/config.yaml.j2
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/config.yaml.j2
@@ -61,6 +61,8 @@
   protocol: "{{metric_collector_protocol}}"
   port: "{{metric_collector_port}}"
   appId: "{{metric_collector_app_id}}"
+  host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
   # HTTPS settings
   truststore.path : "{{metric_truststore_path}}"
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/input.config-storm.json.j2 b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/input.config-storm.json.j2
new file mode 100644
index 0000000..a2a4841
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/input.config-storm.json.j2
@@ -0,0 +1,78 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"storm_drpc",
+      "rowtype":"service",
+      "path":"{{default('/configurations/storm-env/storm_log_dir', '/var/log/storm')}}/drpc.log"
+    },
+    {
+      "type":"storm_logviewer",
+      "rowtype":"service",
+      "path":"{{default('/configurations/storm-env/storm_log_dir', '/var/log/storm')}}/logviewer.log"
+    },
+    {
+      "type":"storm_nimbus",
+      "rowtype":"service",
+      "path":"{{default('/configurations/storm-env/storm_log_dir', '/var/log/storm')}}/nimbus.log"
+    },
+    {
+      "type":"storm_supervisor",
+      "rowtype":"service",
+      "path":"{{default('/configurations/storm-env/storm_log_dir', '/var/log/storm')}}/supervisor.log"
+    },
+    {
+      "type":"storm_ui",
+      "rowtype":"service",
+      "path":"{{default('/configurations/storm-env/storm_log_dir', '/var/log/storm')}}/ui.log"
+    },
+    {
+      "type":"storm_worker",
+      "rowtype":"service",
+      "path":"{{default('/configurations/storm-env/storm_log_dir', '/var/log/storm')}}/*worker*.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "storm_drpc",
+            "storm_logviewer",
+            "storm_nimbus",
+            "storm_supervisor",
+            "storm_ui",
+            "storm_worker"
+          ]
+        }
+      },
+      "log4j_format":"",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\[%{LOGLEVEL:level}\\]%{SPACE}%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss.SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/storm-metrics2.properties.j2 b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/storm-metrics2.properties.j2
index 001ff73..e7db91e 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/storm-metrics2.properties.j2
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/storm-metrics2.properties.j2
@@ -23,6 +23,8 @@
 maxRowCacheSize=10000
 sendInterval={{metrics_report_interval}}000
 clusterReporterAppId=nimbus
+host_in_memory_aggregation = {{host_in_memory_aggregation}}
+host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 # HTTPS properties
 truststore.path = {{metric_truststore_path}}
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/storm_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/storm_jaas.conf.j2
index 8116492..c22cb51 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/storm_jaas.conf.j2
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/storm_jaas.conf.j2
@@ -33,6 +33,14 @@
    serviceName="{{nimbus_bare_jaas_principal}}"
    principal="{{storm_jaas_principal}}";
 };
+RegistryClient {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="{{storm_keytab_path}}"
+   storeKey=true
+   useTicketCache=false
+   principal="{{storm_jaas_principal}}";
+};
 {% endif %}
 Client {
    com.sun.security.auth.module.Krb5LoginModule required
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/alerts.json b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/alerts.json
new file mode 100644
index 0000000..acd9d85
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/alerts.json
@@ -0,0 +1,145 @@
+{
+  "STORM": {
+    "service": [
+      {
+        "name": "storm_supervisor_process_percent",
+        "label": "Percent Supervisors Available",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "storm_supervisor_process",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 10
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 30
+            },
+            "units" : "%",
+            "type": "PERCENT"
+          }
+        }
+      }
+    ],
+    "STORM_UI_SERVER": [
+      {
+        "name": "storm_webui",
+        "label": "Storm Web UI",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{storm-site/ui.port}}",
+            "https" : "{{storm-site/ui.https.port}}",
+            "kerberos_keytab": "{{storm-env/storm_ui_keytab}}",
+            "kerberos_principal": "{{storm-env/storm_ui_principal_name}}",
+            "connection_timeout": 5.0,
+            "https_property": "{{storm-site/ui.https.keystore.type}}",
+            "https_property_value": "jks"
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning":{
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      }      
+    ],
+    "NIMBUS": [
+      {
+        "name": "storm_nimbus_process",
+        "label": "Nimbus Process",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "{{storm-site/nimbus.thrift.port}}",
+          "default_port": 6627,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ],
+    "DRPC_SERVER": [
+      {
+        "name": "storm_drpc_server",
+        "label": "DRPC Server Process",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "{{storm-site/drpc.port}}",
+          "default_port": 3772,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ],
+    "SUPERVISOR": [
+      {
+        "name": "storm_supervisor_process",
+        "label": "Supervisor Process",
+        "interval": 1,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "{{storm-env/jmxremote_port}}",
+          "default_port": 56431,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-audit.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-audit.xml
new file mode 100644
index 0000000..18a6c93
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-audit.xml
@@ -0,0 +1,133 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.audit.is.enabled</name>
+    <value>true</value>
+    <description>Is Audit enabled?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.audit.destination.hdfs</name>
+    <value>true</value>
+    <display-name>Audit to HDFS</display-name>
+    <description>Is Audit to HDFS enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.dir</name>
+    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
+    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs.dir</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
+    <value>/var/log/storm/audit/hdfs/spool</value>
+    <description>/var/log/storm/audit/hdfs/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr</name>
+    <value>false</value>
+    <display-name>Audit to SOLR</display-name>
+    <description>Is Solr audit enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.solr</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.urls</name>
+    <value/>
+    <description>Solr URL</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.urls</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.zookeepers</name>
+    <value>NONE</value>
+    <description>Solr Zookeeper string</description>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.zookeepers</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
+    <value>/var/log/storm/audit/solr/spool</value>
+    <description>/var/log/storm/audit/solr/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.provider.summary.enabled</name>
+    <value>false</value>
+    <display-name>Audit provider summary enabled</display-name>
+    <description>Enable Summary audit?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+
+  <property>
+    <name>ranger.plugin.storm.ambari.cluster.name</name>
+    <value>{{cluster_name}}</value>
+    <description>Capture cluster name from where Ranger storm plugin is enabled.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-plugin-properties.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-plugin-properties.xml
new file mode 100644
index 0000000..99f6e4d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-plugin-properties.xml
@@ -0,0 +1,121 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>policy_user</name>
+    <value>{{policy_user}}</value>
+    <display-name>Policy user for STORM</display-name>
+    <description>This user must be system user and also present at Ranger admin portal</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>common.name.for.certificate</name>
+    <value/>
+    <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger-storm-plugin-enabled</name>
+    <value>No</value>
+    <display-name>Enable Ranger for STORM</display-name>
+    <description>Enable ranger storm plugin ?</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>ranger-storm-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_USERNAME</name>
+    <value>stormtestuser@EXAMPLE.COM</value>
+    <display-name>Ranger repository config user</display-name>
+    <description>Used for repository creation on ranger admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_PASSWORD</name>
+    <value>stormtestuser</value>
+    <display-name>Ranger repository config password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Used for repository creation on ranger admin</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>external_admin_username</name>
+    <value></value>
+    <display-name>External Ranger admin username</display-name>
+    <description>Add ranger default admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>external_admin_password</name>
+    <value></value>
+    <display-name>External Ranger admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_username</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin username</display-name>
+    <description>Add ranger default ambari admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_password</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default ambari admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-policymgr-ssl.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-policymgr-ssl.xml
new file mode 100644
index 0000000..cec82b0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-policymgr-ssl.xml
@@ -0,0 +1,70 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.password</name>
+    <value>myKeyFilePassword</value>
+    <property-type>PASSWORD</property-type>
+    <description>password for keystore</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.password</name>
+    <value>changeit</value>
+    <property-type>PASSWORD</property-type>
+    <description>java truststore password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java keystore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java truststore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore</name>
+    <value>/usr/hdp/current/storm-client/conf/ranger-plugin-keystore.jks</value>
+    <description>Java Keystore files</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore</name>
+    <value>/usr/hdp/current/storm-client/conf/ranger-plugin-truststore.jks</value>
+    <description>java truststore file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-security.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-security.xml
new file mode 100644
index 0000000..7b1ed0f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/ranger-storm-security.xml
@@ -0,0 +1,67 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>ranger.plugin.storm.service.name</name>
+    <value>{{repo_name}}</value>
+    <description>Name of the Ranger service containing policies for this Storm instance</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.storm.policy.source.impl</name>
+    <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
+    <description>Class to retrieve policies from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.storm.policy.rest.url</name>
+    <value>{{policymgr_mgr_url}}</value>
+    <description>URL to Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>admin-properties</type>
+        <name>policymgr_external_url</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>ranger.plugin.storm.policy.pollIntervalMs</name>
+    <value>30000</value>
+    <description>How often to poll for changes in policies?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.storm.policy.cache.dir</name>
+    <value>/etc/ranger/{{repo_name}}/policycache</value>
+    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+
+  <property>
+    <name>ranger.plugin.storm.policy.rest.ssl.config.file</name>
+    <value>/usr/hdp/current/storm-client/conf/ranger-policymgr-ssl.xml</value>
+    <description>Path to the file containing SSL details to contact Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-atlas-application.properties.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-atlas-application.properties.xml
new file mode 100644
index 0000000..47d7758
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-atlas-application.properties.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false">
+  <!-- These are the Atlas Hooks properties specific to this service. This file is then merged with common properties
+  that apply to all services. -->
+  <property>
+    <name>atlas.hook.storm.numRetries</name>
+    <value>3</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-cluster-log4j.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-cluster-log4j.xml
new file mode 100644
index 0000000..d7f7ae0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-cluster-log4j.xml
@@ -0,0 +1,133 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="false">
+
+  <property>
+    <name>storm_a1_maxfilesize</name>
+    <value>100</value>
+    <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>Storm Log: backup file size</display-name>
+    <value-attributes>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>storm_a1_maxbackupindex</name>
+    <value>9</value>
+    <description>The number of backup files</description>
+    <display-name>Storm Log: # of backup files</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>storm-cluster-log4j template</display-name>
+    <description>Custom cluster.xml</description>
+    <value><![CDATA[
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration monitorInterval="60">
+<properties>
+    <property name="pattern">%d{yyyy-MM-dd HH:mm:ss.SSS} %c{1.} %t [%p] %msg%n</property>
+</properties>
+<appenders>
+    <RollingFile name="A1" immediateFlush="false"
+                 fileName="${sys:storm.log.dir}/${sys:logfile.name}"
+                 filePattern="${sys:storm.log.dir}/${sys:logfile.name}.%i.gz">
+        <PatternLayout>
+            <pattern>${pattern}</pattern>
+        </PatternLayout>
+        <Policies>
+            <SizeBasedTriggeringPolicy size="{{storm_a1_maxfilesize}} MB"/> <!-- Or every 100 MB -->
+        </Policies>
+        <DefaultRolloverStrategy max="{{storm_a1_maxbackupindex}}"/>
+    </RollingFile>
+    <RollingFile name="WEB-ACCESS" immediateFlush="false"
+                 fileName="${sys:storm.log.dir}/access-web-${sys:daemon.name}.log"
+                 filePattern="${sys:storm.log.dir}/access-web-${sys:daemon.name}.log.%i.gz">
+        <PatternLayout>
+            <pattern>${pattern}</pattern>
+        </PatternLayout>
+        <Policies>
+            <SizeBasedTriggeringPolicy size="100 MB"/> <!-- Or every 100 MB -->
+        </Policies>
+        <DefaultRolloverStrategy max="9"/>
+    </RollingFile>
+    <RollingFile name="THRIFT-ACCESS" immediateFlush="false"
+                 fileName="${sys:storm.log.dir}/access-${sys:logfile.name}"
+                 filePattern="${sys:storm.log.dir}/access-${sys:logfile.name}.%i.gz">
+    <PatternLayout>
+        <pattern>${pattern}</pattern>
+    </PatternLayout>
+        <Policies>
+            <SizeBasedTriggeringPolicy size="100 MB"/> <!-- Or every 100 MB -->
+        </Policies>
+        <DefaultRolloverStrategy max="9"/>
+    </RollingFile>
+    <Syslog name="syslog" format="RFC5424" charset="UTF-8" host="localhost" port="514"
+            protocol="UDP" appName="[${sys:daemon.name}]" mdcId="mdc" includeMDC="true"
+            facility="LOCAL5" enterpriseNumber="18060" newLine="true" exceptionPattern="%rEx{full}"
+            messageId="[${sys:user.name}:S0]" id="storm" immediateFlush="true" immediateFail="true"/>
+</appenders>
+<loggers>
+
+    <Logger name="org.apache.storm.logging.filters.AccessLoggingFilter" level="info" additivity="false">
+        <AppenderRef ref="WEB-ACCESS"/>
+        <AppenderRef ref="syslog"/>
+    </Logger>
+    <Logger name="org.apache.storm.logging.ThriftAccessLogger" level="info" additivity="false">
+        <AppenderRef ref="THRIFT-ACCESS"/>
+        <AppenderRef ref="syslog"/>
+    </Logger>
+    <root level="info"> <!-- We log everything -->
+        <appender-ref ref="A1"/>
+        <appender-ref ref="syslog"/>
+    </root>
+</loggers>
+</configuration>
+
+    ]]></value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-env.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-env.xml
new file mode 100644
index 0000000..3ee0602
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-env.xml
@@ -0,0 +1,165 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+    <property>
+        <name>storm_user</name>
+        <display-name>Storm User</display-name>
+        <value>storm</value>
+        <property-type>USER</property-type>
+        <description/>
+        <value-attributes>
+            <type>user</type>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_log_dir</name>
+        <value>/var/log/storm</value>
+        <description/>
+        <value-attributes>
+            <type>directory</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_pid_dir</name>
+        <value>/var/run/storm</value>
+        <description/>
+        <value-attributes>
+            <type>directory</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>jmxremote_port</name>
+        <value>56431</value>
+        <description/>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_principal_name</name>
+        <description>Storm principal name</description>
+        <property-type>KERBEROS_PRINCIPAL</property-type>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_principal_name</name>
+        <description>Storm principal name</description>
+        <property-type>KERBEROS_PRINCIPAL</property-type>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_keytab</name>
+        <description>Storm keytab path</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_ui_principal_name</name>
+        <description>Storm UI principal name</description>
+        <property-type>KERBEROS_PRINCIPAL</property-type>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_ui_keytab</name>
+        <description>Storm UI keytab path</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus_keytab</name>
+        <description>Nimbus keytab path</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus_principal_name</name>
+        <description>Nimbus principal name</description>
+        <property-type>KERBEROS_PRINCIPAL</property-type>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_user_nofile_limit</name>
+        <value>128000</value>
+        <description>Max open files limit setting for STORM user.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_user_nproc_limit</name>
+        <value>65536</value>
+        <description>Max number of processes limit setting for STORM user.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <!-- storm-env.sh -->
+
+
+    <property>
+        <name>storm.atlas.hook</name>
+        <value>false</value>
+        <display-name>Enable Atlas Hook</display-name>
+        <description>Enable Atlas Hook</description>
+        <value-attributes>
+            <type>boolean</type>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+        <depends-on>
+            <property>
+                <type>application-properties</type>
+                <name>atlas.rest.address</name>
+            </property>
+        </depends-on>
+    </property>
+    <property>
+        <name>nimbus_seeds_supported</name>
+        <value>true</value>
+        <description/>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm_logs_supported</name>
+        <value>true</value>
+        <description/>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <!-- storm-env.sh -->
+    <property>
+        <name>content</name>
+        <display-name>storm-env template</display-name>
+        <description>This is the jinja template for storm-env.sh file</description>
+        <value>
+            #!/bin/bash
+
+            # Set Storm specific environment variables here.
+
+            # The java implementation to use.
+            export JAVA_HOME={{java64_home}}
+
+            export STORM_CONF_DIR={{conf_dir}}
+            export STORM_HOME={{storm_component_home_dir}}
+
+            export STORM_JAR_JVM_OPTS={{jar_jvm_opts}}
+        </value>
+        <value-attributes>
+            <type>content</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-site.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-site.xml
new file mode 100644
index 0000000..6b97fb6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-site.xml
@@ -0,0 +1,1002 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+
+    <property>
+        <name>storm.local.dir</name>
+        <value>/hadoop/storm</value>
+        <description>A directory on the local filesystem used by Storm for any local
+            filesystem usage it needs. The directory must exist and the Storm daemons must
+            have permission to read/write from this location.</description>
+        <value-attributes>
+            <type>directory</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.zookeeper.servers</name>
+        <value>['localhost']</value>
+        <description>A list of hosts of ZooKeeper servers used to manage the cluster.</description>
+        <value-attributes>
+            <type>multiLine</type>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.zookeeper.port</name>
+        <value>2181</value>
+        <description>The port Storm will use to connect to each of the ZooKeeper servers.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.zookeeper.root</name>
+        <value>/storm</value>
+        <description>The root location at which Storm stores data in ZooKeeper.</description>
+        <value-attributes>
+            <type>directory</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.zookeeper.session.timeout</name>
+        <value>30000</value>
+        <description>The session timeout for clients to ZooKeeper.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.zookeeper.connection.timeout</name>
+        <value>30000</value>
+        <description>The connection timeout for clients to ZooKeeper.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.zookeeper.retry.times</name>
+        <value>5</value>
+        <description>The number of times to retry a Zookeeper operation.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.zookeeper.retry.interval</name>
+        <value>1000</value>
+        <description>The interval between retries of a Zookeeper operation.</description>
+        <value-attributes>
+            <unit>ms</unit>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.zookeeper.retry.intervalceiling.millis</name>
+        <value>30000</value>
+        <description>The ceiling of the interval between retries of a Zookeeper operation.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>ms</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.cluster.mode</name>
+        <value>distributed</value>
+        <description>The mode this Storm cluster is running in. Either "distributed" or "local".</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.local.mode.zmq</name>
+        <value>false</value>
+        <description>Whether or not to use ZeroMQ for messaging in local mode. If this is set
+            to false, then Storm will use a pure-Java messaging system. The purpose
+            of this flag is to make it easy to run Storm in local mode by eliminating
+            the need for native dependencies, which can be difficult to install.
+        </description>
+        <value-attributes>
+            <type>boolean</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+
+    <property>
+        <name>nimbus.thrift.port</name>
+        <value>6627</value>
+        <description> Which port the Thrift interface of Nimbus should run on. Clients should
+            connect to this port to upload jars and submit topologies.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.thrift.max_buffer_size</name>
+        <value>1048576</value>
+        <description>The maximum buffer size thrift should use when reading messages.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>bytes</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>nimbus.task.timeout.secs</name>
+        <value>30</value>
+        <description>How long without heartbeating a task can go before nimbus will consider the task dead and reassign it to another location.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.supervisor.timeout.secs</name>
+        <value>60</value>
+        <description>How long before a supervisor can go without heartbeating before nimbus considers it dead and stops assigning new work to it.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.monitor.freq.secs</name>
+        <value>10</value>
+        <description>
+            How often nimbus should wake up to check heartbeats and do reassignments. Note
+            that if a machine ever goes down Nimbus will immediately wake up and take action.
+            This parameter is for checking for failures when there's no explicit event like that occuring.
+        </description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.cleanup.inbox.freq.secs</name>
+        <value>600</value>
+        <description>How often nimbus should wake the cleanup thread to clean the inbox.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.inbox.jar.expiration.secs</name>
+        <value>3600</value>
+        <description>
+            The length of time a jar file lives in the inbox before being deleted by the cleanup thread.
+
+            Probably keep this value greater than or equal to NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS.
+            Note that the time it takes to delete an inbox jar file is going to be somewhat more than
+            NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS (depending on how often NIMBUS_CLEANUP_FREQ_SECS is set to).
+        </description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.task.launch.secs</name>
+        <value>120</value>
+        <description>A special timeout used when a task is initially launched. During launch, this is the timeout
+            used until the first heartbeat, overriding nimbus.task.timeout.secs.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.reassign</name>
+        <value>true</value>
+        <description>Whether or not nimbus should reassign tasks if it detects that a task goes down.
+            Defaults to true, and it's not recommended to change this value.</description>
+        <value-attributes>
+            <type>boolean</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.file.copy.expiration.secs</name>
+        <value>600</value>
+        <description>During upload/download with the master, how long an upload or download connection is idle
+            before nimbus considers it dead and drops the connection.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>ui.port</name>
+        <value>8744</value>
+        <description>Storm UI binds to this port.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>logviewer.port</name>
+        <value>8000</value>
+        <description>HTTP UI port for log viewer.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>logviewer.appender.name</name>
+        <value>A1</value>
+        <description>Appender name used by log viewer to determine log directory.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>drpc.port</name>
+        <value>3772</value>
+        <description>This port is used by Storm DRPC for receiving DPRC requests from clients.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>drpc.worker.threads</name>
+        <value>64</value>
+        <description>DRPC thrift server worker threads.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>drpc.queue.size</name>
+        <value>128</value>
+        <description>DRPC thrift server queue size.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>drpc.invocations.port</name>
+        <value>3773</value>
+        <description>This port on Storm DRPC is used by DRPC topologies to receive function invocations and send results back.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>drpc.request.timeout.secs</name>
+        <value>600</value>
+        <description>The timeout on DRPC requests within the DRPC server. Defaults to 10 minutes. Note that requests can also
+            timeout based on the socket timeout on the DRPC client, and separately based on the topology message
+            timeout for the topology implementing the DRPC function.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>transactional.zookeeper.root</name>
+        <value>/transactional</value>
+        <description>The root directory in ZooKeeper for metadata about TransactionalSpouts.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>transactional.zookeeper.servers</name>
+        <value>null</value>
+        <description>The list of zookeeper servers in which to keep the transactional state. If null (which is default),
+            will use storm.zookeeper.servers</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>transactional.zookeeper.port</name>
+        <value>null</value>
+        <description>The port to use to connect to the transactional zookeeper servers. If null (which is default),
+            will use storm.zookeeper.port</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>supervisor.slots.ports</name>
+        <value>[6700, 6701]</value>
+        <description>A list of ports that can run workers on this supervisor. Each worker uses one port, and
+            the supervisor will only run one worker per port. Use this configuration to tune
+            how many workers run on each machine.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>supervisor.worker.start.timeout.secs</name>
+        <value>120</value>
+        <description>How long a worker can go without heartbeating during the initial launch before
+            the supervisor tries to restart the worker process. This value override
+            supervisor.worker.timeout.secs during launch because there is additional
+            overhead to starting and configuring the JVM on launch.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>supervisor.worker.timeout.secs</name>
+        <value>30</value>
+        <description>How long a worker can go without heartbeating before the supervisor tries to restart the worker process.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>supervisor.monitor.frequency.secs</name>
+        <value>3</value>
+        <description>How often the supervisor checks the worker heartbeats to see if any of them need to be restarted.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>supervisor.heartbeat.frequency.secs</name>
+        <value>5</value>
+        <description>How often the supervisor sends a heartbeat to the master.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>seconds</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>worker.heartbeat.frequency.secs</name>
+        <value>1</value>
+        <description>How often this worker should heartbeat to the supervisor.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>task.heartbeat.frequency.secs</name>
+        <value>3</value>
+        <description>How often a task should heartbeat its status to the master.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>task.refresh.poll.secs</name>
+        <value>10</value>
+        <description>How often a task should sync its connections with other tasks (if a task is
+            reassigned, the other tasks sending messages to it need to refresh their connections).
+            In general though, when a reassignment happens other tasks will be notified
+            almost immediately. This configuration is here just in case that notification doesn't
+            come through.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zmq.threads</name>
+        <value>1</value>
+        <description>The number of threads that should be used by the zeromq context in each worker process.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zmq.linger.millis</name>
+        <value>5000</value>
+        <description>How long a connection should retry sending messages to a target host when
+            the connection is closed. This is an advanced configuration and can almost
+            certainly be ignored.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zmq.hwm</name>
+        <value>0</value>
+        <description>The high water for the ZeroMQ push sockets used for networking. Use this config to prevent buffer explosion
+            on the networking layer.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.messaging.netty.server_worker_threads</name>
+        <value>1</value>
+        <description>Netty based messaging: The # of worker threads for the server.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.messaging.netty.client_worker_threads</name>
+        <value>1</value>
+        <description>Netty based messaging: The # of worker threads for the client.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.messaging.netty.buffer_size</name>
+        <value>5242880</value>
+        <description>Netty based messaging: The buffer size for send/recv buffer.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>bytes</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.messaging.netty.max_retries</name>
+        <value>30</value>
+        <description>Netty based messaging: The max # of retries that a peer will perform when a remote is not accessible.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.messaging.netty.max_wait_ms</name>
+        <value>1000</value>
+        <description>Netty based messaging: The max # of milliseconds that a peer will wait.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>ms</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.messaging.netty.min_wait_ms</name>
+        <value>100</value>
+        <description>Netty based messaging: The min # of milliseconds that a peer will wait.</description>
+        <value-attributes>
+            <type>int</type>
+            <unit>ms</unit>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.enable.message.timeouts</name>
+        <value>true</value>
+        <description>True if Storm should timeout messages or not. Defaults to true. This is meant to be used
+            in unit tests to prevent tuples from being accidentally timed out during the test.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.debug</name>
+        <value>false</value>
+        <description>When set to true, Storm will log every message that's emitted.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.optimize</name>
+        <value>true</value>
+        <description>Whether or not the master should optimize topologies by running multiple tasks in a single thread where appropriate.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.workers</name>
+        <value>1</value>
+        <description>How many processes should be spawned around the cluster to execute this
+            topology. Each process will execute some number of tasks as threads within
+            them. This parameter should be used in conjunction with the parallelism hints
+            on each component in the topology to tune the performance of a topology.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.acker.executors</name>
+        <value>null</value>
+        <description>How many executors to spawn for ackers.
+
+            If this is set to 0, then Storm will immediately ack tuples as soon
+            as they come off the spout, effectively disabling reliability.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.message.timeout.secs</name>
+        <value>30</value>
+        <description>The maximum amount of time given to the topology to fully process a message
+            emitted by a spout. If the message is not acked within this time frame, Storm
+            will fail the message on the spout. Some spouts implementations will then replay
+            the message at a later time.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.skip.missing.kryo.registrations</name>
+        <value>false</value>
+        <description> Whether or not Storm should skip the loading of kryo registrations for which it
+            does not know the class or have the serializer implementation. Otherwise, the task will
+            fail to load and will throw an error at runtime. The use case of this is if you want to
+            declare your serializations on the storm.yaml files on the cluster rather than every single
+            time you submit a topology. Different applications may use different serializations and so
+            a single application may not have the code for the other serializers used by other apps.
+            By setting this config to true, Storm will ignore that it doesn't have those other serializations
+            rather than throw an error.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.max.task.parallelism</name>
+        <value>null</value>
+        <description>The maximum parallelism allowed for a component in this topology. This configuration is
+            typically used in testing to limit the number of threads spawned in local mode.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.max.spout.pending</name>
+        <value>1000</value>
+        <description>The maximum number of tuples that can be pending on a spout task at any given time.
+            This config applies to individual tasks, not to spouts or topologies as a whole.
+
+            A pending tuple is one that has been emitted from a spout but has not been acked or failed yet.
+            Note that this config parameter has no effect for unreliable spouts that don't tag
+            their tuples with a message id.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.state.synchronization.timeout.secs</name>
+        <value>60</value>
+        <description>The maximum amount of time a component gives a source of state to synchronize before it requests
+            synchronization again.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.stats.sample.rate</name>
+        <value>0.05</value>
+        <description>The percentage of tuples to sample to produce stats for a task.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.builtin.metrics.bucket.size.secs</name>
+        <value>60</value>
+        <description>The time period that builtin metrics data in bucketed into.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.fall.back.on.java.serialization</name>
+        <value>true</value>
+        <description>Whether or not to use Java serialization in a topology.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.worker.childopts</name>
+        <value>null</value>
+        <description>Topology-specific options for the worker child process. This is used in addition to WORKER_CHILDOPTS.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.executor.receive.buffer.size</name>
+        <value>1024</value>
+        <description>The size of the Disruptor receive queue for each executor. Must be a power of 2.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.executor.send.buffer.size</name>
+        <value>1024</value>
+        <description>The size of the Disruptor send queue for each executor. Must be a power of 2.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.receiver.buffer.size</name>
+        <value>8</value>
+        <description>The maximum number of messages to batch from the thread receiving off the network to the
+            executor queues. Must be a power of 2.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.transfer.buffer.size</name>
+        <value>1024</value>
+        <description>The size of the Disruptor transfer queue for each worker.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.tick.tuple.freq.secs</name>
+        <value>null</value>
+        <description>How often a tick tuple from the "__system" component and "__tick" stream should be sent
+            to tasks. Meant to be used as a component-specific configuration.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.worker.shared.thread.pool.size</name>
+        <value>4</value>
+        <description>The size of the shared thread pool for worker tasks to make use of. The thread pool can be accessed
+            via the TopologyContext.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.disruptor.wait.strategy</name>
+        <value>com.lmax.disruptor.BlockingWaitStrategy</value>
+        <description>Configure the wait strategy used for internal queuing. Can be used to tradeoff latency
+            vs. throughput.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.executor.send.buffer.size</name>
+        <value>1024</value>
+        <description>The size of the Disruptor send queue for each executor. Must be a power of 2.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.receiver.buffer.size</name>
+        <value>8</value>
+        <description>The maximum number of messages to batch from the thread receiving off the network to the
+            executor queues. Must be a power of 2.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.transfer.buffer.size</name>
+        <value>1024</value>
+        <description>The size of the Disruptor transfer queue for each worker.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.tick.tuple.freq.secs</name>
+        <value>null</value>
+        <description>How often a tick tuple from the "__system" component and "__tick" stream should be sent
+            to tasks. Meant to be used as a component-specific configuration.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.worker.shared.thread.pool.size</name>
+        <value>4</value>
+        <description>The size of the shared thread pool for worker tasks to make use of. The thread pool can be accessed
+            via the TopologyContext.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>topology.sleep.spout.wait.strategy.time.ms</name>
+        <value>1</value>
+        <description>The amount of milliseconds the SleepEmptyEmitStrategy should sleep for.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.error.throttle.interval.secs</name>
+        <value>10</value>
+        <description>The interval in seconds to use for determining whether to throttle error reported to Zookeeper. For example,
+            an interval of 10 seconds with topology.max.error.report.per.interval set to 5 will only allow 5 errors to be
+            reported to Zookeeper per task for every 10 second interval of time.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.max.error.report.per.interval</name>
+        <value>5</value>
+        <description>The interval in seconds to use for determining whether to throttle error reported to Zookeeper. For example,
+            an interval of 10 seconds with topology.max.error.report.per.interval set to 5 will only allow 5 errors to be
+            reported to Zookeeper per task for every 10 second interval of time.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+    <property>
+        <name>topology.trident.batch.emit.interval.millis</name>
+        <value>500</value>
+        <description>How often a batch can be emitted in a Trident topology.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>dev.zookeeper.path</name>
+        <value>/tmp/dev-storm-zookeeper</value>
+        <description>The path to use as the zookeeper dir when running a zookeeper server via
+            "storm dev-zookeeper". This zookeeper instance is only intended for development;
+            it is not a production grade zookeeper setup.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+
+
+    <property>
+        <name>ui.childopts</name>
+        <value>-Xmx768m _JAAS_PLACEHOLDER</value>
+        <description>Childopts for Storm UI Java process.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>ui.filter</name>
+        <value>null</value>
+        <description>Class for Storm UI authentication</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>logviewer.childopts</name>
+        <value>-Xmx128m _JAAS_PLACEHOLDER</value>
+        <description>Childopts for log viewer java process.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>drpc.childopts</name>
+        <value>-Xmx768m _JAAS_PLACEHOLDER</value>
+        <description>Childopts for Storm DRPC Java process.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>_storm.min.ruid</name>
+        <value>null</value>
+        <description>min.user.id is set to the first real user id on the system. If value is 'null' than default value will be taken from key UID_MIN of /etc/login.defs otherwise the specified value will be used for all hosts.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.log.dir</name>
+        <value>{{log_dir}}</value>
+        <description>Log directory for Storm.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.authorizer</name>
+        <description>Log directory for Storm.</description>
+        <depends-on>
+            <property>
+                <type>ranger-storm-plugin-properties</type>
+                <name>ranger-storm-plugin-enabled</name>
+            </property>
+        </depends-on>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>nimbus.seeds</name>
+        <value>localhost</value>
+        <description>Comma-delimited list of the hosts running nimbus server.</description>
+        <value-attributes>
+            <type>componentHosts</type>
+            <editable-only-at-install>true</editable-only-at-install>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.thrift.threads</name>
+        <value>196</value>
+        <description>The number of threads that should be used by the nimbus thrift server.</description>
+        <value-attributes>
+            <type>int</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.min.replication.count.default</name>
+        <value>1</value>
+        <description>Default minimum number of nimbus hosts where the code must be replicated before leader nimbus can mark the topology as active and create assignments. </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.min.replication.count</name>
+        <value>{{actual_topology_min_replication_count}}</value>
+        <description>Calculated minimum number of nimbus hosts where the code must be replicated before leader nimbus can mark the topology as active and create assignments. </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.max.replication.wait.time.sec.default</name>
+        <value>60</value>
+        <description>Default maximum wait time for the nimbus host replication to achieve the nimbus.min.replication.count. Once this time is elapsed nimbus will go ahead and perform topology activation tasks even if required nimbus.min.replication.count is not achieved</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.max.replication.wait.time.sec</name>
+        <value>{{actual_topology_max_replication_wait_time_sec}}</value>
+        <description>Calculated maximum wait time for the nimbus host replication to achieve the nimbus.min.replication.count. Once this time is elapsed nimbus will go ahead and perform topology activation tasks even if required nimbus.min.replication.count is not achieved</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+
+
+
+    <property>
+        <name>storm.thrift.transport</name>
+        <value>{{storm_thrift_transport}}</value>
+        <description>The transport plug-in that used for Thrift client/server communication.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>_storm.thrift.nonsecure.transport</name>
+        <value>org.apache.storm.security.auth.SimpleTransportPlugin</value>
+        <description>The transport plug-in that used for non-secure mode for for Thrift client/server communication.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>_storm.thrift.secure.transport</name>
+        <value>org.apache.storm.security.auth.kerberos.KerberosSaslTransportPlugin</value>
+        <description>The transport plug-in that used for secure mode for Thrift client/server communication.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>storm.messaging.transport</name>
+        <value>org.apache.storm.messaging.netty.Context</value>
+        <description>The transporter for communication among Storm tasks.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.topology.validator</name>
+        <value>org.apache.storm.nimbus.DefaultTopologyValidator</value>
+        <description>A custom class that implements ITopologyValidator that is run whenever a
+            topology is submitted. Can be used to provide business-specific logic for
+            whether topologies are allowed to run or not.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.spout.wait.strategy</name>
+        <value>org.apache.storm.spout.SleepSpoutWaitStrategy</value>
+        <description>A class that implements a strategy for what to do when a spout needs to wait. Waiting is
+            triggered in one of two conditions:
+
+            1. nextTuple emits no tuples
+            2. The spout has hit maxSpoutPending and can't emit any more tuples</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.kryo.factory</name>
+        <value>org.apache.storm.serialization.DefaultKryoFactory</value>
+        <description>Class that specifies how to create a Kryo instance for serialization. Storm will then apply
+            topology.kryo.register and topology.kryo.decorators on top of this. The default implementation
+            implements topology.fall.back.on.java.serialization and turns references off.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.tuple.serializer</name>
+        <value>org.apache.storm.serialization.types.ListDelegateSerializer</value>
+        <description>The serializer class for ListDelegate (tuple payload).
+            The default serializer will be ListDelegateSerializer</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>client.jartransformer.class</name>
+        <description>Storm Topology backward comptability transformer</description>
+        <value>org.apache.storm.hack.StormShadeTransformer</value>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.impersonation.authorizer</name>
+        <description>
+            To ensure only authorized users can perform impersonation you should start nimbus with nimbus.impersonation.authorizer set to org.apache.storm.security.auth.authorizer.ImpersonationAuthorizer.
+            A storm client may submit requests on behalf of another user. For example, if a userX submits an oozie workflow and as part of workflow execution if user oozie wants to submit a topology on behalf of userX it can do so by leveraging the impersonation feature.In order to submit topology as some other user , you can use StormSubmitter.submitTopologyAs API. Alternatively you can use NimbusClient.getConfiguredClientAs to get a nimbus client as some other user and perform any nimbus action(i.e. kill/rebalance/activate/deactivate) using this client.
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.impersonation.acl</name>
+        <description>
+            The ImpersonationAuthorizer uses nimbus.impersonation.acl as the acl to authorize users. Following is a sample nimbus config for supporting impersonation:
+            nimbus.impersonation.acl:
+            impersonating_user1:
+            hosts:
+            [comma separated list of hosts from which impersonating_user1 is allowed to impersonate other users]
+            groups:
+            [comma separated list of groups whose users impersonating_user1 is allowed to impersonate]
+            impersonating_user2:
+            hosts:
+            [comma separated list of hosts from which impersonating_user2 is allowed to impersonate other users]
+            groups:
+            [comma separated list of groups whose users impersonating_user2 is allowed to impersonate]
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <!-- Deleted configs. -->
+
+    <property>
+        <name>storm.cluster.metrics.consumer.register</name>
+        <value>[{"class": "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter"}]</value>
+        <description></description>
+        <value-attributes>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.metrics.consumer.register</name>
+        <value>[{"class": "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsSink", "parallelism.hint": 1, "whitelist": ["kafkaOffset\\..+/", "__complete-latency", "__process-latency", "__receive\\.population$", "__sendqueue\\.population$", "__execute-count", "__emit-count", "__ack-count", "__fail-count", "memory/heap\\.usedBytes$", "memory/nonHeap\\.usedBytes$", "GC/.+\\.count$", "GC/.+\\.timeMs$"]}]</value>
+        <description></description>
+        <value-attributes>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.metrics.aggregate.per.worker</name>
+        <value>true</value>
+        <description></description>
+        <value-attributes>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.metrics.aggregate.metric.evict.secs</name>
+        <value>5</value>
+        <description></description>
+        <value-attributes>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.metrics.expand.map.type</name>
+        <value>true</value>
+        <description></description>
+        <value-attributes>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>topology.metrics.metric.name.separator</name>
+        <value>.</value>
+        <description></description>
+        <value-attributes>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+    <property>
+        <name>java.library.path</name>
+        <value>/usr/local/lib:/opt/local/lib:/usr/lib:/usr/hdp/current/storm-client/lib</value>
+        <description>This value is passed to spawned JVMs (e.g., Nimbus, Supervisor, and Workers)
+            for the java.library.path value. java.library.path tells the JVM where
+            to look for native libraries. It is necessary to set this config correctly since
+            Storm uses the ZeroMQ and JZMQ native libs. </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>nimbus.childopts</name>
+        <value>-Xmx1024m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm-nimbus/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-nimbus/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM</value>
+        <description>This parameter is used by the storm-deploy project to configure the jvm options for the nimbus daemon.</description>
+        <value-attributes>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>worker.childopts</name>
+        <value>-Xmx768m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm-client/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-client/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM</value>
+        <description>The jvm opts provided to workers launched by this supervisor. All \"%ID%\" substrings are replaced with an identifier for this worker.</description>
+        <value-attributes>
+            <type>multiLine</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>supervisor.childopts</name>
+        <value>-Xmx256m _JAAS_PLACEHOLDER -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port={{jmxremote_port}} -javaagent:/usr/hdp/current/storm-supervisor/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-supervisor/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM</value>
+        <description>This parameter is used by the storm-deploy project to configure the jvm options for the supervisor daemon.</description>
+        <value-attributes>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-worker-log4j.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-worker-log4j.xml
new file mode 100644
index 0000000..46291f7
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-worker-log4j.xml
@@ -0,0 +1,189 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="false">
+  <property>
+    <name>storm_wrkr_a1_maxfilesize</name>
+    <value>100</value>
+    <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>Storm Worker Log: backup file size</display-name>
+    <value-attributes>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>storm_wrkr_a1_maxbackupindex</name>
+    <value>9</value>
+    <description>The number of backup files</description>
+    <display-name>Storm Worker Log: # of backup files</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>storm_wrkr_out_maxfilesize</name>
+    <value>100</value>
+    <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>Storm Worker Standard out Log: backup file size</display-name>
+    <value-attributes>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>storm_wrkr_out_maxbackupindex</name>
+    <value>4</value>
+    <description>The number of backup files</description>
+    <display-name>Storm Worker Standard out Log: # of backup files</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>storm_wrkr_err_maxfilesize</name>
+    <value>100</value>
+    <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>Storm Worker Standard Error Log: backup file size</display-name>
+    <value-attributes>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>storm_wrkr_err_maxbackupindex</name>
+    <value>4</value>
+    <description>The number of backup files</description>
+    <display-name>Storm Worker Standard Error Log: # of backup files</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>storm-worker-log4j template</display-name>
+    <description>Custom worker.xml</description>
+    <value><![CDATA[
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<configuration monitorInterval="60">
+<properties>
+    <property name="pattern">%d{yyyy-MM-dd HH:mm:ss.SSS} %c{1.} %t [%p] %msg%n</property>
+    <property name="patternNoTime">%msg%n</property>
+    <property name="patternMetrics">%d %-8r %m%n</property>
+</properties>
+<appenders>
+    <RollingFile name="A1"
+		fileName="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}"
+		filePattern="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.%i.gz">
+        <PatternLayout>
+            <pattern>${pattern}</pattern>
+        </PatternLayout>
+        <Policies>
+            <SizeBasedTriggeringPolicy size="{{storm_wrkr_a1_maxfilesize}} MB"/> <!-- Or every 100 MB -->
+        </Policies>
+        <DefaultRolloverStrategy max="{{storm_wrkr_a1_maxbackupindex}}"/>
+    </RollingFile>
+    <RollingFile name="STDOUT"
+		fileName="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.out"
+		filePattern="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.out.%i.gz">
+        <PatternLayout>
+            <pattern>${patternNoTime}</pattern>
+        </PatternLayout>
+        <Policies>
+            <SizeBasedTriggeringPolicy size="{{storm_wrkr_out_maxfilesize}} MB"/> <!-- Or every 100 MB -->
+        </Policies>
+        <DefaultRolloverStrategy max="{{storm_wrkr_out_maxbackupindex}}"/>
+    </RollingFile>
+    <RollingFile name="STDERR"
+		fileName="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.err"
+		filePattern="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.err.%i.gz">
+        <PatternLayout>
+            <pattern>${patternNoTime}</pattern>
+        </PatternLayout>
+        <Policies>
+            <SizeBasedTriggeringPolicy size="{{storm_wrkr_err_maxfilesize}} MB"/> <!-- Or every 100 MB -->
+        </Policies>
+        <DefaultRolloverStrategy max="{{storm_wrkr_err_maxbackupindex}}"/>
+    </RollingFile>
+    <RollingFile name="METRICS"
+		fileName="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.metrics"
+		filePattern="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.metrics.%i.gz">
+        <PatternLayout>
+            <pattern>${patternMetrics}</pattern>
+        </PatternLayout>
+        <Policies>
+            <SizeBasedTriggeringPolicy size="2 MB"/>
+        </Policies>
+        <DefaultRolloverStrategy max="9"/>
+    </RollingFile>
+    <Syslog name="syslog" format="RFC5424" charset="UTF-8" host="localhost" port="514"
+        protocol="UDP" appName="[${sys:storm.id}:${sys:worker.port}]" mdcId="mdc" includeMDC="true"
+        facility="LOCAL5" enterpriseNumber="18060" newLine="true" exceptionPattern="%rEx{full}"
+        messageId="[${sys:user.name}:${sys:logging.sensitivity}]" id="storm" immediateFail="true" immediateFlush="true"/>
+</appenders>
+<loggers>
+    <root level="info"> <!-- We log everything -->
+        <appender-ref ref="A1"/>
+        <appender-ref ref="syslog"/>
+    </root>
+    <Logger name="org.apache.storm.metric.LoggingMetricsConsumer" level="info" additivity="false">
+        <appender-ref ref="METRICS"/>
+    </Logger>
+    <Logger name="STDERR" level="INFO">
+        <appender-ref ref="STDERR"/>
+        <appender-ref ref="syslog"/>
+    </Logger>
+    <Logger name="STDOUT" level="INFO">
+        <appender-ref ref="STDOUT"/>
+        <appender-ref ref="syslog"/>
+    </Logger>
+</loggers>
+</configuration>
+    ]]></value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/kerberos.json b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/kerberos.json
new file mode 100644
index 0000000..a034411
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/kerberos.json
@@ -0,0 +1,134 @@
+{
+  "services": [
+    {
+      "name": "STORM",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        },
+        {
+          "name": "storm_components",
+          "principal": {
+            "value": "${storm-env/storm_user}${principal_suffix}@${realm}",
+            "type": "user",
+            "configuration": "storm-env/storm_principal_name"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/storm.headless.keytab",
+            "owner": {
+              "name": "${storm-env/storm_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": ""
+            },
+            "configuration": "storm-env/storm_keytab"
+          }
+        },
+        {
+          "name": "/STORM/storm_components",
+          "principal": {
+            "configuration": "storm-atlas-application.properties/atlas.jaas.KafkaClient.option.principal"
+          },
+          "keytab": {
+            "configuration": "storm-atlas-application.properties/atlas.jaas.KafkaClient.option.keyTab"
+          }
+        }
+      ],
+      "configurations": [
+        {
+          "storm-site": {
+            "nimbus.authorizer": "org.apache.storm.security.auth.authorizer.SimpleACLAuthorizer",
+            "drpc.authorizer": "org.apache.storm.security.auth.authorizer.DRPCSimpleACLAuthorizer",
+            "ui.filter": "org.apache.hadoop.security.authentication.server.AuthenticationFilter",
+            "storm.principal.tolocal": "org.apache.storm.security.auth.KerberosPrincipalToLocal",
+            "supervisor.enable": "true",
+            "storm.zookeeper.superACL": "sasl:{{storm_bare_jaas_principal}}",
+            "java.security.auth.login.config": "{{conf_dir}}/storm_jaas.conf",
+            "nimbus.impersonation.authorizer": "org.apache.storm.security.auth.authorizer.ImpersonationAuthorizer",
+            "nimbus.impersonation.acl": "{ {{storm_bare_jaas_principal}} : {hosts: ['*'], groups: ['*']}}",
+            "nimbus.admins": "['{{storm_bare_jaas_principal}}', '{{ambari_bare_jaas_principal}}']",
+            "nimbus.supervisor.users": "['{{storm_bare_jaas_principal}}']",
+            "ui.filter.params": "{'type': 'kerberos', 'kerberos.principal': '{{storm_ui_jaas_principal}}', 'kerberos.keytab': '{{storm_ui_keytab_path}}', 'kerberos.name.rules': 'DEFAULT'}"
+          }
+        },
+        {
+          "ranger-storm-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "STORM_UI_SERVER",
+          "identities": [
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "storm-env/storm_ui_principal_name"
+              },
+              "keytab": {
+                "configuration": "storm-env/storm_ui_keytab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "NIMBUS",
+          "identities": [
+            {
+              "name": "nimbus_server",
+              "principal": {
+                "value": "nimbus/_HOST@${realm}",
+                "type": "service",
+                "configuration": "storm-env/nimbus_principal_name"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nimbus.service.keytab",
+                "owner": {
+                  "name": "${storm-env/storm_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "storm-env/nimbus_keytab"
+              }
+            },
+            {
+              "name": "/STORM/storm_components",
+              "principal": {
+                "configuration": "ranger-storm-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-storm-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "DRPC_SERVER",
+          "identities": [
+            {
+              "name": "drpc_server",
+              "reference": "/STORM/NIMBUS/nimbus_server"
+            }
+          ]
+        },
+        {
+          "name" : "SUPERVISOR"
+        }
+      ]
+    }
+  ]
+}
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/metainfo.xml
new file mode 100644
index 0000000..1bc23e4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/metainfo.xml
@@ -0,0 +1,179 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>STORM</name>
+      <displayName>Storm</displayName>
+      <comment>Apache Hadoop Stream processing framework</comment>
+      <version>1.0.1.3.0</version>
+      <components>
+
+        <component>
+          <name>NIMBUS</name>
+          <displayName>Nimbus</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/nimbus.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>storm_nimbus</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+
+        <component>
+          <name>SUPERVISOR</name>
+          <displayName>Supervisor</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/supervisor.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <bulkCommands>
+            <displayName>Supervisors</displayName>
+            <masterComponent>SUPERVISOR</masterComponent>
+          </bulkCommands>
+          <logs>
+            <log>
+              <logId>storm_supervisor</logId>
+              <primary>true</primary>
+            </log>
+            <log>
+              <logId>storm_worker</logId>
+            </log>
+            <log>
+              <logId>storm_logviewer</logId>
+            </log>
+          </logs>
+        </component>
+
+        <component>
+          <name>STORM_UI_SERVER</name>
+          <displayName>Storm UI Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/ui_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>storm_ui</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+
+        <component>
+          <name>DRPC_SERVER</name>
+          <displayName>DRPC Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/drpc_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>storm_drpc</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat7,amazon2015,redhat6,suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>storm_${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+          <packages>
+            <package>
+              <name>storm-${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>storm-site</config-type>
+        <config-type>storm-env</config-type>
+        <config-type>ranger-storm-plugin-properties</config-type>
+        <config-type>ranger-storm-audit</config-type>
+        <config-type>ranger-storm-policymgr-ssl</config-type>
+        <config-type>ranger-storm-security</config-type>
+        <config-type>admin-properties</config-type>
+        <config-type>ranger-ugsync-site</config-type>
+        <config-type>ranger-admin-site</config-type>
+        <config-type>zookeeper-env</config-type>
+        <config-type>zoo.cfg</config-type>
+        <config-type>application.properties</config-type>
+        <config-type>storm-atlas-application.properties</config-type>
+      </configuration-dependencies>
+
+
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+    </service>
+  </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/metrics.json b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/metrics.json
new file mode 100644
index 0000000..2c27d58
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/metrics.json
@@ -0,0 +1,1202 @@
+{
+  "STORM_UI_SERVER": {
+    "Component": [
+      {
+        "type": "org.apache.ambari.server.controller.metrics.RestMetricsPropertyProvider",
+        "properties": {
+          "default_port": "8744",
+          "port_config_type": "storm-site",
+          "port_property_name": "ui.port",
+          "protocol": "http",
+          "https_port_property_name" : "ui.https.port",
+          "https_property_name" : "ui.https.keystore.type"
+        },
+        "metrics": {
+          "default": {
+            "metrics/api/v1/cluster/summary/tasksTotal": {
+              "metric": "/api/v1/cluster/summary##tasksTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/topology/summary": {
+              "metric": "/api/v1/topology/summary?field=topologies##topologies",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/cluster/summary/slotsTotal": {
+              "metric": "/api/v1/cluster/summary##slotsTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/cluster/summary/slotsFree": {
+              "metric": "/api/v1/cluster/summary##slotsFree",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/cluster/summary/supervisors": {
+              "metric": "/api/v1/cluster/summary##supervisors",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/cluster/summary/executorsTotal": {
+              "metric": "/api/v1/cluster/summary##executorsTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/cluster/summary/slotsUsed": {
+              "metric": "/api/v1/cluster/summary##slotsUsed",
+              "pointInTime": true,
+              "temporal": false
+            },
+              "metrics/api/v1/nimbus/summary": {
+              "metric": "/api/v1/nimbus/summary?field=nimbuses##nimbuses",
+              "pointInTime": true,
+              "temporal": false
+          }
+
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "org.apache.ambari.server.controller.metrics.RestMetricsPropertyProvider",
+        "properties": {
+          "default_port": "8744",
+          "port_config_type": "storm-site",
+          "port_property_name": "ui.port",
+          "protocol": "http",
+          "https_port_property_name" : "ui.https.port",
+          "https_property_name" : "ui.https.keystore.type"
+        },
+        "metrics": {
+          "default": {
+            "metrics/api/v1/cluster/summary/tasksTotal": {
+              "metric": "/api/v1/cluster/summary##tasksTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/topology/summary": {
+              "metric": "/api/v1/topology/summary?field=topologies##topologies",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/cluster/summary/slotsTotal": {
+              "metric": "/api/v1/cluster/summary##slotsTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/cluster/summary/slotsFree": {
+              "metric": "/api/v1/cluster/summary##slotsFree",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/cluster/summary/supervisors": {
+              "metric": "/api/v1/cluster/summary##supervisors",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/cluster/summary/executorsTotal": {
+              "metric": "/api/v1/cluster/summary##executorsTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/cluster/summary/slotsUsed": {
+              "metric": "/api/v1/cluster/summary##slotsUsed",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/api/v1/nimbus/summary": {
+              "metric": "/api/v1/nimbus/summary?field=nimbuses##nimbuses",
+              "pointInTime": true,
+              "temporal": false
+            }
+          }
+        }
+      }
+    ]
+  },
+  "NIMBUS": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/boottime": {
+              "metric": "boottime",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_in": {
+              "metric": "bytes_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_out": {
+              "metric": "bytes_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_aidle": {
+              "metric": "cpu_aidle",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_idle": {
+              "metric": "cpu_idle",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_nice": {
+              "metric": "cpu_nice",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+
+            "metrics/cpu/cpu_num": {
+              "metric": "cpu_num",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_speed": {
+              "metric": "cpu_speed",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_system": {
+              "metric": "cpu_system",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_user": {
+              "metric": "cpu_user",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_wio": {
+              "metric": "cpu_wio",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_free": {
+              "metric": "disk_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_total": {
+              "metric": "disk_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_fifteen": {
+              "metric": "load_fifteen",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_five": {
+              "metric": "load_five",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_one": {
+              "metric": "load_one",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_buffers": {
+              "metric": "mem_buffers",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_cached": {
+              "metric": "mem_cached",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_free": {
+              "metric": "mem_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_shared": {
+              "metric": "mem_shared",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_total": {
+              "metric": "mem_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/part_max_used": {
+              "metric": "part_max_used",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_in": {
+              "metric": "pkts_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_out": {
+              "metric": "pkts_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_run": {
+              "metric": "proc_run",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_total": {
+              "metric": "proc_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_free": {
+              "metric": "swap_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_total": {
+              "metric": "swap_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/storm/nimbus/jvm/memory/heap/committed": {
+              "metric": "Nimbus.JVM.Memory.Heap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/heap/init": {
+              "metric": "Nimbus.JVM.Memory.Heap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/heap/max": {
+              "metric": "Nimbus.JVM.Memory.Heap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/heap/used": {
+              "metric": "Nimbus.JVM.Memory.Heap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/nonheap/committed": {
+              "metric": "Nimbus.JVM.Memory.NonHeap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/nonheap/init": {
+              "metric": "Nimbus.JVM.Memory.NonHeap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/nonheap/max": {
+              "metric": "Nimbus.JVM.Memory.NonHeap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/nonheap/used": {
+              "metric": "Nimbus.JVM.Memory.NonHeap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/os/processcputime": {
+              "metric": "Nimbus.JVM.OS.ProcessCpuTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/threading/daemonthreadcount": {
+              "metric": "Nimbus.JVM.Threading.DaemonThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/threading/threadcount": {
+              "metric": "Nimbus.JVM.Threading.ThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+
+            "metrics/storm/nimbus/freeslots": {
+              "metric": "Free Slots",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/supervisors": {
+              "metric": "Supervisors",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/topologies": {
+              "metric": "Topologies",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/totalexecutors": {
+              "metric": "Total Executors",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/totalslots": {
+              "metric": "Total Slots",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/totaltasks": {
+              "metric": "Total Tasks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/usedslots": {
+              "metric": "Used Slots",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/boottime": {
+              "metric": "boottime",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_in": {
+              "metric": "bytes_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_out": {
+              "metric": "bytes_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_aidle": {
+              "metric": "cpu_aidle",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_idle": {
+              "metric": "cpu_idle",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_nice": {
+              "metric": "cpu_nice",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+
+            "metrics/cpu/cpu_num": {
+              "metric": "cpu_num",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_speed": {
+              "metric": "cpu_speed",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_system": {
+              "metric": "cpu_system",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_user": {
+              "metric": "cpu_user",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_wio": {
+              "metric": "cpu_wio",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_free": {
+              "metric": "disk_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_total": {
+              "metric": "disk_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_fifteen": {
+              "metric": "load_fifteen",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_five": {
+              "metric": "load_five",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_one": {
+              "metric": "load_one",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_buffers": {
+              "metric": "mem_buffers",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_cached": {
+              "metric": "mem_cached",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_free": {
+              "metric": "mem_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_shared": {
+              "metric": "mem_shared",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_total": {
+              "metric": "mem_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/part_max_used": {
+              "metric": "part_max_used",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_in": {
+              "metric": "pkts_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_out": {
+              "metric": "pkts_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_run": {
+              "metric": "proc_run",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/process/proc_total": {
+              "metric": "proc_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_free": {
+              "metric": "swap_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_total": {
+              "metric": "swap_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/storm/nimbus/jvm/memory/heap/committed": {
+              "metric": "Nimbus.JVM.Memory.Heap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/heap/init": {
+              "metric": "Nimbus.JVM.Memory.Heap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/heap/max": {
+              "metric": "Nimbus.JVM.Memory.Heap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/heap/used": {
+              "metric": "Nimbus.JVM.Memory.Heap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/nonheap/committed": {
+              "metric": "Nimbus.JVM.Memory.NonHeap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/nonheap/init": {
+              "metric": "Nimbus.JVM.Memory.NonHeap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/nonheap/max": {
+              "metric": "Nimbus.JVM.Memory.NonHeap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/memory/nonheap/used": {
+              "metric": "Nimbus.JVM.Memory.NonHeap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/os/processcputime": {
+              "metric": "Nimbus.JVM.OS.ProcessCpuTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/threading/daemonthreadcount": {
+              "metric": "Nimbus.JVM.Threading.DaemonThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/nimbus/jvm/threading/threadcount": {
+              "metric": "Nimbus.JVM.Threading.ThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            }
+
+          }
+        }
+      }
+    ]
+  },
+  "SUPERVISOR": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/boottime": {
+              "metric": "boottime",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_in": {
+              "metric": "bytes_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_out": {
+              "metric": "bytes_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_aidle": {
+              "metric": "cpu_aidle",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_idle": {
+              "metric": "cpu_idle",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_nice": {
+              "metric": "cpu_nice",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+
+            "metrics/cpu/cpu_num": {
+              "metric": "cpu_num",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_speed": {
+              "metric": "cpu_speed",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_system": {
+              "metric": "cpu_system",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_user": {
+              "metric": "cpu_user",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_wio": {
+              "metric": "cpu_wio",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_free": {
+              "metric": "disk_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_total": {
+              "metric": "disk_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_fifteen": {
+              "metric": "load_fifteen",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_five": {
+              "metric": "load_five",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_one": {
+              "metric": "load_one",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_buffers": {
+              "metric": "mem_buffers",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_cached": {
+              "metric": "mem_cached",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_free": {
+              "metric": "mem_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_shared": {
+              "metric": "mem_shared",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_total": {
+              "metric": "mem_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/part_max_used": {
+              "metric": "part_max_used",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_in": {
+              "metric": "pkts_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_out": {
+              "metric": "pkts_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_run": {
+              "metric": "proc_run",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_total": {
+              "metric": "proc_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_free": {
+              "metric": "swap_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_total": {
+              "metric": "swap_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/storm/supervisor/jvm/memory/heap/committed": {
+              "metric": "Supervisor.JVM.Memory.Heap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/heap/init": {
+              "metric": "Supervisor.JVM.Memory.Heap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/heap/max": {
+              "metric": "Supervisor.JVM.Memory.Heap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/heap/used": {
+              "metric": "Supervisor.JVM.Memory.Heap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/nonheap/committed": {
+              "metric": "Supervisor.JVM.Memory.NonHeap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/nonheap/init": {
+              "metric": "Supervisor.JVM.Memory.NonHeap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/nonheap/max": {
+              "metric": "Supervisor.JVM.Memory.NonHeap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/nonheap/used": {
+              "metric": "Supervisor.JVM.Memory.NonHeap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/os/processcputime": {
+              "metric": "Supervisor.JVM.OS.ProcessCpuTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/threading/daemonthreadcount": {
+              "metric": "Supervisor.JVM.Threading.DaemonThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/threading/threadcount": {
+              "metric": "Supervisor.JVM.Threading.ThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/heap/committed": {
+              "metric": "Worker.(.+).JVM.Memory.Heap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/heap/init": {
+              "metric": "Worker.(.+).JVM.Memory.Heap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/heap/max": {
+              "metric": "Worker.(.+).JVM.Memory.Heap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/heap/used": {
+              "metric": "Worker.(.+).JVM.Memory.Heap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/nonheap/committed": {
+              "metric": "Worker.(.+).JVM.Memory.NonHeap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/nonheap/init": {
+              "metric": "Worker.(.+).JVM.Memory.NonHeap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/nonheap/max": {
+              "metric": "Worker.(.+).JVM.Memory.NonHeap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/nonheap/used": {
+              "metric": "Worker.(.+).JVM.Memory.NonHeap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/os/processcputime": {
+              "metric": "Worker.(.+).JVM.OS.ProcessCpuTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/threading/daemonthreadcount": {
+              "metric": "Worker.(.+).JVM.Threading.DaemonThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/threading/threadcount": {
+              "metric": "Worker.(.+).JVM.Threading.ThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            }
+
+
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/boottime": {
+              "metric": "boottime",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_in": {
+              "metric": "bytes_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_out": {
+              "metric": "bytes_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_aidle": {
+              "metric": "cpu_aidle",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_idle": {
+              "metric": "cpu_idle",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_nice": {
+              "metric": "cpu_nice",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+
+            "metrics/cpu/cpu_num": {
+              "metric": "cpu_num",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_speed": {
+              "metric": "cpu_speed",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_system": {
+              "metric": "cpu_system",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_user": {
+              "metric": "cpu_user",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_wio": {
+              "metric": "cpu_wio",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_free": {
+              "metric": "disk_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_total": {
+              "metric": "disk_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_fifteen": {
+              "metric": "load_fifteen",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_five": {
+              "metric": "load_five",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/load_one": {
+              "metric": "load_one",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_buffers": {
+              "metric": "mem_buffers",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_cached": {
+              "metric": "mem_cached",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_free": {
+              "metric": "mem_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_shared": {
+              "metric": "mem_shared",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_total": {
+              "metric": "mem_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/part_max_used": {
+              "metric": "part_max_used",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_in": {
+              "metric": "pkts_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_out": {
+              "metric": "pkts_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_run": {
+              "metric": "proc_run",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/process/proc_total": {
+              "metric": "proc_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_free": {
+              "metric": "swap_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_total": {
+              "metric": "swap_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric":true
+            },
+            "metrics/storm/supervisor/jvm/memory/heap/committed": {
+              "metric": "Supervisor.JVM.Memory.Heap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/heap/init": {
+              "metric": "Supervisor.JVM.Memory.Heap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/heap/max": {
+              "metric": "Supervisor.JVM.Memory.Heap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/heap/used": {
+              "metric": "Supervisor.JVM.Memory.Heap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/nonheap/committed": {
+              "metric": "Supervisor.JVM.Memory.NonHeap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/nonheap/init": {
+              "metric": "Supervisor.JVM.Memory.NonHeap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/nonheap/max": {
+              "metric": "Supervisor.JVM.Memory.NonHeap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/memory/nonheap/used": {
+              "metric": "Supervisor.JVM.Memory.NonHeap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/os/processcputime": {
+              "metric": "Supervisor.JVM.OS.ProcessCpuTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/threading/daemonthreadcount": {
+              "metric": "Supervisor.JVM.Threading.DaemonThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/supervisor/jvm/threading/threadcount": {
+              "metric": "Supervisor.JVM.Threading.ThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/heap/committed": {
+              "metric": "Worker.(.+).JVM.Memory.Heap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/heap/init": {
+              "metric": "Worker.(.+).JVM.Memory.Heap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/heap/max": {
+              "metric": "Worker.(.+).JVM.Memory.Heap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/heap/used": {
+              "metric": "Worker.(.+).JVM.Memory.Heap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/nonheap/committed": {
+              "metric": "Worker.(.+).JVM.Memory.NonHeap.committed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/nonheap/init": {
+              "metric": "Worker.(.+).JVM.Memory.NonHeap.init",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/nonheap/max": {
+              "metric": "Worker.(.+).JVM.Memory.NonHeap.max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/memory/nonheap/used": {
+              "metric": "Worker.(.+).JVM.Memory.NonHeap.used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/os/processcputime": {
+              "metric": "Worker.(.+).JVM.OS.ProcessCpuTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/threading/daemonthreadcount": {
+              "metric": "Worker.(.+).JVM.Threading.DaemonThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/storm/worker/$1/jvm/threading/threadcount": {
+              "metric": "Worker.(.+).JVM.Threading.ThreadCount",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      }
+    ]
+  }
+}
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/alerts/check_supervisor_process_win.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/alerts/check_supervisor_process_win.py
new file mode 100644
index 0000000..a698415
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/alerts/check_supervisor_process_win.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management.libraries.functions import check_windows_service_status
+
+
+RESULT_CODE_OK = 'OK'
+RESULT_CODE_CRITICAL = 'CRITICAL'
+RESULT_CODE_UNKNOWN = 'UNKNOWN'
+
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return ()
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  try:
+    check_windows_service_status("supervisor")
+    return (RESULT_CODE_OK, ["Supervisor is running"])
+  except:
+    return (RESULT_CODE_CRITICAL, ["Supervisor is stopped"])
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/files/wordCount.jar b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/files/wordCount.jar
new file mode 100644
index 0000000..aed64be
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/files/wordCount.jar
Binary files differ
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/drpc_server.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/drpc_server.py
new file mode 100644
index 0000000..f991e71
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/drpc_server.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.functions import check_process_status
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+from storm import storm
+from service import service
+from service_check import ServiceCheck
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_JAAS_CONF
+
+class DrpcServer(Script):
+
+  def get_component_name(self):
+    return "storm-client"
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    storm()
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "storm", params.version)
+      stack_select.select("storm-client", params.version)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+
+    service("drpc", action="start")
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    service("drpc", action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.pid_drpc)
+      
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.storm_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.pid_drpc]
+
+if __name__ == "__main__":
+  DrpcServer().execute()
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/nimbus.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/nimbus.py
new file mode 100644
index 0000000..360af5d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/nimbus.py
@@ -0,0 +1,116 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.functions import check_process_status
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+from storm import storm
+from service import service
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_JAAS_CONF
+from setup_ranger_storm import setup_ranger_storm
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+from resource_management.core.resources.service import Service
+
+class Nimbus(Script):
+  def get_component_name(self):
+    return "storm-nimbus"
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    storm("nimbus")
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class NimbusDefault(Nimbus):
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "storm", params.version)
+      stack_select.select("storm-client", params.version)
+      stack_select.select("storm-nimbus", params.version)
+
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    setup_ranger_storm(upgrade_type=upgrade_type)
+    service("nimbus", action="start")
+
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    service("nimbus", action="stop")
+
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.pid_nimbus)
+
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.storm_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.pid_nimbus]
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class NimbusWindows(Nimbus):
+  def start(self, env):
+    import status_params
+    env.set_params(status_params)
+    Service(status_params.nimbus_win_service_name, action="start")
+
+  def stop(self, env):
+    import status_params
+    env.set_params(status_params)
+    Service(status_params.nimbus_win_service_name, action="stop")
+
+  def status(self, env):
+    import status_params
+    from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
+    env.set_params(status_params)
+    check_windows_service_status(status_params.nimbus_win_service_name)
+
+if __name__ == "__main__":
+  Nimbus().execute()
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/nimbus_prod.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/nimbus_prod.py
new file mode 100644
index 0000000..39bda4d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/nimbus_prod.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.script import Script
+from storm import storm
+from supervisord_service import supervisord_service, supervisord_check_status
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+
+class Nimbus(Script):
+
+  def get_component_name(self):
+    return "storm-nimbus"
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    storm()
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "storm", params.version)
+      stack_select.select("storm-client", params.version)
+      stack_select.select("storm-nimbus", params.version)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+
+    supervisord_service("nimbus", action="start")
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    supervisord_service("nimbus", action="stop")
+
+  def status(self, env):
+    supervisord_check_status("nimbus")
+    
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.storm_user
+
+if __name__ == "__main__":
+  Nimbus().execute()
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/pacemaker.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/pacemaker.py
new file mode 100644
index 0000000..fa3112d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/pacemaker.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.functions import check_process_status
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+from storm import storm
+from service import service
+from service_check import ServiceCheck
+from resource_management.libraries.functions.security_commons import build_expectations, \
+    cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+    FILE_TYPE_JAAS_CONF
+
+class PaceMaker(Script):
+
+  def get_component_name(self):
+      return "storm-client"
+
+  def install(self, env):
+      self.install_packages(env)
+      self.configure(env)
+
+  def configure(self, env):
+      import params
+      env.set_params(params)
+      storm()
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+      import params
+      env.set_params(params)
+
+      if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+        conf_select.select(params.stack_name, "storm", params.version)
+        stack_select.select("storm-client", params.version)
+
+  def start(self, env, upgrade_type=None):
+      import params
+      env.set_params(params)
+      self.configure(env)
+
+      service("pacemaker", action="start")
+
+  def stop(self, env, upgrade_type=None):
+      import params
+      env.set_params(params)
+
+      service("pacemaker", action="stop")
+
+  def status(self, env):
+      import status_params
+      env.set_params(status_params)
+      check_process_status(status_params.pid_pacemaker)
+
+  def get_log_folder(self):
+      import params
+      return params.log_dir
+
+  def get_user(self):
+      import params
+      return params.storm_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.pid_pacemaker]
+
+if __name__ == "__main__":
+    PaceMaker().execute()
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/params.py
new file mode 100644
index 0000000..5d53de8
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/params.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from ambari_commons import OSCheck
+from resource_management.libraries.functions.default import default
+
+if OSCheck.is_windows_family():
+  from params_windows import *
+else:
+  from params_linux import *
+
+retryAble = default("/commandParams/command_retry_enabled", False)
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/params_linux.py
new file mode 100644
index 0000000..78ec165
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/params_linux.py
@@ -0,0 +1,424 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import re
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+
+import status_params
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from ambari_commons import yaml_utils
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.get_bare_principal import get_bare_principal
+from resource_management.libraries.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.expect import expect
+from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
+from resource_management.libraries.functions import is_empty
+from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames
+from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+stack_root = status_params.stack_root
+sudo = AMBARI_SUDO_BINARY
+
+limits_conf_dir = "/etc/security/limits.d"
+
+# Needed since this is an Atlas Hook service.
+cluster_name = config['clusterName']
+
+stack_name = status_params.stack_name
+upgrade_direction = default("/commandParams/upgrade_direction", None)
+version = default("/commandParams/version", None)
+
+agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
+agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
+
+storm_component_home_dir = status_params.storm_component_home_dir
+conf_dir = status_params.conf_dir
+
+stack_version_unformatted = status_params.stack_version_unformatted
+stack_version_formatted = status_params.stack_version_formatted
+stack_supports_ru = stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted)
+stack_supports_storm_kerberos = stack_version_formatted and check_stack_feature(StackFeature.STORM_KERBEROS, stack_version_formatted)
+stack_supports_storm_ams = stack_version_formatted and check_stack_feature(StackFeature.STORM_AMS, stack_version_formatted)
+stack_supports_core_site_for_ranger_plugin = check_stack_feature(StackFeature.CORE_SITE_FOR_RANGER_PLUGINS_SUPPORT, stack_version_formatted)
+
+# get the correct version to use for checking stack features
+version_for_stack_feature_checks = get_stack_feature_version(config)
+
+stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
+stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
+
+# default hadoop params
+rest_lib_dir = "/usr/lib/storm/contrib/storm-rest"
+storm_bin_dir = "/usr/bin"
+storm_lib_dir = "/usr/lib/storm/lib/"
+
+# hadoop parameters for 2.2+
+if stack_supports_ru:
+  rest_lib_dir = format("{storm_component_home_dir}/contrib/storm-rest")
+  storm_bin_dir = format("{storm_component_home_dir}/bin")
+  storm_lib_dir = format("{storm_component_home_dir}/lib")
+  log4j_dir = format("{storm_component_home_dir}/log4j2")
+
+storm_user = config['configurations']['storm-env']['storm_user']
+log_dir = config['configurations']['storm-env']['storm_log_dir']
+pid_dir = status_params.pid_dir
+local_dir = config['configurations']['storm-site']['storm.local.dir']
+user_group = config['configurations']['cluster-env']['user_group']
+java64_home = config['hostLevelParams']['java_home']
+jps_binary = format("{java64_home}/bin/jps")
+nimbus_port = config['configurations']['storm-site']['nimbus.thrift.port']
+storm_zookeeper_root_dir = default('/configurations/storm-site/storm.zookeeper.root', None)
+storm_zookeeper_servers = config['configurations']['storm-site']['storm.zookeeper.servers']
+storm_zookeeper_port = config['configurations']['storm-site']['storm.zookeeper.port']
+storm_logs_supported = config['configurations']['storm-env']['storm_logs_supported']
+
+# nimbus.seeds is supported in HDP 2.3.0.0 and higher
+nimbus_seeds_supported = default('/configurations/storm-env/nimbus_seeds_supported', False)
+nimbus_host = default('/configurations/storm-site/nimbus.host', None)
+nimbus_seeds = default('/configurations/storm-site/nimbus.seeds', None)
+default_topology_max_replication_wait_time_sec = default('/configurations/storm-site/topology.max.replication.wait.time.sec.default', -1)
+nimbus_hosts = default("/clusterHostInfo/nimbus_hosts", [])
+default_topology_min_replication_count = default('/configurations/storm-site/topology.min.replication.count.default', 1)
+
+#Calculate topology.max.replication.wait.time.sec and topology.min.replication.count
+if len(nimbus_hosts) > 1:
+  # for HA Nimbus
+  actual_topology_max_replication_wait_time_sec = -1
+  actual_topology_min_replication_count = len(nimbus_hosts) / 2 + 1
+else:
+  # for non-HA Nimbus
+  actual_topology_max_replication_wait_time_sec = default_topology_max_replication_wait_time_sec
+  actual_topology_min_replication_count = default_topology_min_replication_count
+
+if 'topology.max.replication.wait.time.sec.default' in config['configurations']['storm-site']:
+  del config['configurations']['storm-site']['topology.max.replication.wait.time.sec.default']
+if 'topology.min.replication.count.default' in config['configurations']['storm-site']:
+  del config['configurations']['storm-site']['topology.min.replication.count.default']
+
+rest_api_port = "8745"
+rest_api_admin_port = "8746"
+rest_api_conf_file = format("{conf_dir}/config.yaml")
+storm_env_sh_template = config['configurations']['storm-env']['content']
+jmxremote_port = config['configurations']['storm-env']['jmxremote_port']
+
+if 'ganglia_server_host' in config['clusterHostInfo'] and len(config['clusterHostInfo']['ganglia_server_host'])>0:
+  ganglia_installed = True
+  ganglia_server = config['clusterHostInfo']['ganglia_server_host'][0]
+  ganglia_report_interval = 60
+else:
+  ganglia_installed = False
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+storm_ui_host = default("/clusterHostInfo/storm_ui_server_hosts", [])
+
+storm_user_nofile_limit = default('/configurations/storm-env/storm_user_nofile_limit', 128000)
+storm_user_nproc_limit = default('/configurations/storm-env/storm_user_noproc_limit', 65536)
+
+if security_enabled:
+  _hostname_lowercase = config['hostname'].lower()
+  _storm_principal_name = config['configurations']['storm-env']['storm_principal_name']
+  storm_jaas_principal = _storm_principal_name.replace('_HOST',_hostname_lowercase)
+  _ambari_principal_name = default('/configurations/cluster-env/ambari_principal_name', None)
+  storm_keytab_path = config['configurations']['storm-env']['storm_keytab']
+
+  if stack_supports_storm_kerberos:
+    storm_ui_keytab_path = config['configurations']['storm-env']['storm_ui_keytab']
+    _storm_ui_jaas_principal_name = config['configurations']['storm-env']['storm_ui_principal_name']
+    storm_ui_jaas_principal = _storm_ui_jaas_principal_name.replace('_HOST',_hostname_lowercase)
+    storm_bare_jaas_principal = get_bare_principal(_storm_principal_name)
+    if _ambari_principal_name:
+      ambari_bare_jaas_principal = get_bare_principal(_ambari_principal_name)
+    _nimbus_principal_name = config['configurations']['storm-env']['nimbus_principal_name']
+    nimbus_jaas_principal = _nimbus_principal_name.replace('_HOST', _hostname_lowercase)
+    nimbus_bare_jaas_principal = get_bare_principal(_nimbus_principal_name)
+    nimbus_keytab_path = config['configurations']['storm-env']['nimbus_keytab']
+
+kafka_bare_jaas_principal = None
+if stack_supports_storm_kerberos:
+  if security_enabled:
+    storm_thrift_transport = config['configurations']['storm-site']['_storm.thrift.secure.transport']
+    # generate KafkaClient jaas config if kafka is kerberoized
+    _kafka_principal_name = default("/configurations/kafka-env/kafka_principal_name", None)
+    kafka_bare_jaas_principal = get_bare_principal(_kafka_principal_name)
+  else:
+    storm_thrift_transport = config['configurations']['storm-site']['_storm.thrift.nonsecure.transport']
+
+set_instanceId = "false"
+if 'cluster-env' in config['configurations'] and \
+        'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
+  ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
+  set_instanceId = "true"
+else:
+  ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+has_metric_collector = not len(ams_collector_hosts) == 0
+metric_collector_port = None
+if has_metric_collector:
+  if 'cluster-env' in config['configurations'] and \
+      'metrics_collector_external_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
+  else:
+    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
+    if metric_collector_web_address.find(':') != -1:
+      metric_collector_port = metric_collector_web_address.split(':')[1]
+    else:
+      metric_collector_port = '6188'
+
+  metric_collector_report_interval = 60
+  metric_collector_app_id = "nimbus"
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
+  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+  pass
+metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
+metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
+metric_collector_sink_jar = "/usr/lib/storm/lib/ambari-metrics-storm-sink-with-common-*.jar"
+metric_collector_legacy_sink_jar = "/usr/lib/storm/lib/ambari-metrics-storm-sink-legacy-with-common-*.jar"
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
+
+
+# Cluster Zookeeper quorum
+zookeeper_quorum = ""
+if storm_zookeeper_servers:
+  storm_zookeeper_servers_list = yaml_utils.get_values_from_yaml_array(storm_zookeeper_servers)
+  zookeeper_quorum = (":" + storm_zookeeper_port + ",").join(storm_zookeeper_servers_list)
+  zookeeper_quorum += ":" + storm_zookeeper_port
+
+jar_jvm_opts = ''
+
+########################################################
+############# Atlas related params #####################
+########################################################
+#region Atlas Hooks
+storm_atlas_application_properties = default('/configurations/storm-atlas-application.properties', {})
+enable_atlas_hook = default('/configurations/storm-env/storm.atlas.hook', False)
+atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
+
+if enable_atlas_hook:
+  # Only append /etc/atlas/conf to classpath if on HDP 2.4.*
+  if check_stack_feature(StackFeature.ATLAS_CONF_DIR_IN_PATH, stack_version_formatted):
+    atlas_conf_dir = format('{stack_root}/current/atlas-server/conf')
+    jar_jvm_opts += '-Datlas.conf=' + atlas_conf_dir
+#endregion
+
+storm_ui_port = config['configurations']['storm-site']['ui.port']
+
+#Storm log4j properties
+storm_a1_maxfilesize = default('/configurations/storm-cluster-log4j/storm_a1_maxfilesize', 100)
+storm_a1_maxbackupindex = default('/configurations/storm-cluster-log4j/storm_a1_maxbackupindex', 9)
+storm_wrkr_a1_maxfilesize = default('/configurations/storm-worker-log4j/storm_wrkr_a1_maxfilesize', 100)
+storm_wrkr_a1_maxbackupindex = default('/configurations/storm-worker-log4j/storm_wrkr_a1_maxbackupindex', 9)
+storm_wrkr_out_maxfilesize = default('/configurations/storm-worker-log4j/storm_wrkr_out_maxfilesize', 100)
+storm_wrkr_out_maxbackupindex = default('/configurations/storm-worker-log4j/storm_wrkr_out_maxbackupindex', 4)
+storm_wrkr_err_maxfilesize = default('/configurations/storm-worker-log4j/storm_wrkr_err_maxfilesize', 100)
+storm_wrkr_err_maxbackupindex = default('/configurations/storm-worker-log4j/storm_wrkr_err_maxbackupindex', 4)
+
+storm_cluster_log4j_content = config['configurations']['storm-cluster-log4j']['content']
+storm_worker_log4j_content = config['configurations']['storm-worker-log4j']['content']
+
+# some commands may need to supply the JAAS location when running as storm
+storm_jaas_file = format("{conf_dir}/storm_jaas.conf")
+
+# for curl command in ranger plugin to get db connector
+jdk_location = config['hostLevelParams']['jdk_location']
+
+# ranger storm plugin start section
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+
+# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature
+xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
+
+# ambari-server hostname
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+# ranger storm plugin enabled property
+enable_ranger_storm = default("/configurations/ranger-storm-plugin-properties/ranger-storm-plugin-enabled", "No")
+enable_ranger_storm = True if enable_ranger_storm.lower() == 'yes' else False
+
+# ranger storm properties
+if enable_ranger_storm:
+  # get ranger policy url
+  policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
+  if xml_configurations_supported:
+    policymgr_mgr_url = config['configurations']['ranger-storm-security']['ranger.plugin.storm.policy.rest.url']
+
+  if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
+    policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
+
+  # ranger audit db user
+  xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
+
+  # ranger storm service name
+  repo_name = str(config['clusterName']) + '_storm'
+  repo_name_value = config['configurations']['ranger-storm-security']['ranger.plugin.storm.service.name']
+  if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
+    repo_name = repo_name_value
+
+  common_name_for_certificate = config['configurations']['ranger-storm-plugin-properties']['common.name.for.certificate']
+  repo_config_username = config['configurations']['ranger-storm-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+
+  # ranger-env config
+  ranger_env = config['configurations']['ranger-env']
+
+  # create ranger-env config having external ranger credential properties
+  if not has_ranger_admin and enable_ranger_storm:
+    external_admin_username = default('/configurations/ranger-storm-plugin-properties/external_admin_username', 'admin')
+    external_admin_password = default('/configurations/ranger-storm-plugin-properties/external_admin_password', 'admin')
+    external_ranger_admin_username = default('/configurations/ranger-storm-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
+    external_ranger_admin_password = default('/configurations/ranger-storm-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
+    ranger_env = {}
+    ranger_env['admin_username'] = external_admin_username
+    ranger_env['admin_password'] = external_admin_password
+    ranger_env['ranger_admin_username'] = external_ranger_admin_username
+    ranger_env['ranger_admin_password'] = external_ranger_admin_password
+
+  ranger_plugin_properties = config['configurations']['ranger-storm-plugin-properties']
+  policy_user = storm_user
+  repo_config_password = config['configurations']['ranger-storm-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
+
+  xa_audit_db_password = ''
+  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db and has_ranger_admin:
+    xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password']
+
+  repo_config_password = config['configurations']['ranger-storm-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
+
+  downloaded_custom_connector = None
+  previous_jdbc_jar_name = None
+  driver_curl_source = None
+  driver_curl_target = None
+  previous_jdbc_jar = None
+
+  if has_ranger_admin and stack_supports_ranger_audit_db:
+    xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
+    jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver = get_audit_configs(config)
+
+    downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_target = format("{storm_component_home_dir}/lib/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    previous_jdbc_jar = format("{storm_component_home_dir}/lib/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    sql_connector_jar = ''
+
+  storm_ranger_plugin_config = {
+    'username': repo_config_username,
+    'password': repo_config_password,
+    'nimbus.url': 'http://' + storm_ui_host[0].lower() + ':' + str(storm_ui_port),
+    'commonNameForCertificate': common_name_for_certificate
+  }
+
+  storm_ranger_plugin_repo = {
+    'isActive': 'true',
+    'config': json.dumps(storm_ranger_plugin_config),
+    'description': 'storm repo',
+    'name': repo_name,
+    'repositoryType': 'storm',
+    'assetType': '6'
+  }
+
+  custom_ranger_service_config = generate_ranger_service_config(ranger_plugin_properties)
+  if len(custom_ranger_service_config) > 0:
+    storm_ranger_plugin_config.update(custom_ranger_service_config)
+
+  if stack_supports_ranger_kerberos and security_enabled:
+    policy_user = format('{storm_user},{storm_bare_jaas_principal}')
+    storm_ranger_plugin_config['policy.download.auth.users'] = policy_user
+    storm_ranger_plugin_config['tag.download.auth.users'] = policy_user
+    storm_ranger_plugin_config['ambari.service.check.user'] = policy_user
+
+    storm_ranger_plugin_repo = {
+      'isEnabled': 'true',
+      'configs': storm_ranger_plugin_config,
+      'description': 'storm repo',
+      'name': repo_name,
+      'type': 'storm'
+    }
+
+  ranger_storm_principal = None
+  ranger_storm_keytab = None
+  if stack_supports_ranger_kerberos and security_enabled:
+    ranger_storm_principal = storm_jaas_principal
+    ranger_storm_keytab = storm_keytab_path
+
+  xa_audit_db_is_enabled = False
+  if xml_configurations_supported and stack_supports_ranger_audit_db:
+    xa_audit_db_is_enabled = config['configurations']['ranger-storm-audit']['xasecure.audit.destination.db']
+
+  xa_audit_hdfs_is_enabled = default('/configurations/ranger-storm-audit/xasecure.audit.destination.hdfs', False)
+  ssl_keystore_password = config['configurations']['ranger-storm-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None
+  ssl_truststore_password = config['configurations']['ranger-storm-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None
+  credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
+
+  # for SQLA explicitly disable audit to DB for Ranger
+  if has_ranger_admin and stack_supports_ranger_audit_db and xa_audit_db_flavor.lower() == 'sqla':
+    xa_audit_db_is_enabled = False
+
+# ranger storm plugin end section
+
+namenode_hosts = default("/clusterHostInfo/namenode_host", [])
+has_namenode = not len(namenode_hosts) == 0
+
+availableServices = config['availableServices']
+
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] if has_namenode else None
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] if has_namenode else None
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] if has_namenode else None
+hdfs_site = config['configurations']['hdfs-site'] if has_namenode else None
+default_fs = config['configurations']['core-site']['fs.defaultFS'] if has_namenode else None
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin") if has_namenode else None
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir() if has_namenode else None
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources()
+)
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/params_windows.py
new file mode 100644
index 0000000..a758375
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/params_windows.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from status_params import *
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.default import default
+
+# server configurations
+config = Script.get_config()
+
+stack_is_hdp23_or_further = Script.is_stack_greater_or_equal("2.3")
+
+stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
+conf_dir = os.environ["STORM_CONF_DIR"]
+hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
+storm_user = hadoop_user
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+default_topology_max_replication_wait_time_sec = default('/configurations/storm-site/topology.max.replication.wait.time.sec.default', -1)
+nimbus_hosts = default("/clusterHostInfo/nimbus_hosts", [])
+default_topology_min_replication_count = default('/configurations/storm-site/topology.min.replication.count.default', 1)
+
+#Calculate topology.max.replication.wait.time.sec and topology.min.replication.count
+if len(nimbus_hosts) > 1:
+  # for HA Nimbus
+  actual_topology_max_replication_wait_time_sec = -1
+  actual_topology_min_replication_count = len(nimbus_hosts) / 2 + 1
+else:
+  # for non-HA Nimbus
+  actual_topology_max_replication_wait_time_sec = default_topology_max_replication_wait_time_sec
+  actual_topology_min_replication_count = default_topology_min_replication_count
+
+if stack_is_hdp23_or_further:
+  if security_enabled:
+    storm_thrift_transport = config['configurations']['storm-site']['_storm.thrift.secure.transport']
+  else:
+    storm_thrift_transport = config['configurations']['storm-site']['_storm.thrift.nonsecure.transport']
+
+service_map = {
+  "nimbus" : nimbus_win_service_name,
+  "supervisor" : supervisor_win_service_name,
+  "ui" : ui_win_service_name
+}
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/rest_api.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/rest_api.py
new file mode 100644
index 0000000..f9b3b80
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/rest_api.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.functions import check_process_status
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format
+from resource_management.core.resources.system import Execute
+
+from storm import storm
+from service import service
+from service_check import ServiceCheck
+
+
+class StormRestApi(Script):
+  """
+  Storm REST API.
+  It was available in HDP 2.0 and 2.1.
+  In HDP 2.2, it was removed since the functionality was moved to Storm UI Server.
+  """
+
+  def get_component_name(self):
+    return "storm-client"
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    storm()
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+
+    service("rest_api", action="start")
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    service("rest_api", action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.pid_rest_api)
+
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.storm_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.pid_rest_api]
+  
+if __name__ == "__main__":
+  StormRestApi().execute()
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/service.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/service.py
new file mode 100644
index 0000000..b5e5cd5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/service.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management.core.resources import Execute
+from resource_management.core.resources import File
+from resource_management.core.shell import as_user
+from resource_management.core import shell
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import get_user_call_output
+from resource_management.libraries.functions.show_logs import show_logs
+import time
+
+
+def service(name, action = 'start'):
+  import params
+  import status_params
+
+  pid_file = status_params.pid_files[name]
+  no_op_test = as_user(format(
+    "ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"), user=params.storm_user)
+
+  if name == 'ui':
+    process_grep = "storm.ui.core$"
+  elif name == "rest_api":
+    process_grep = format("{rest_lib_dir}/storm-rest-.*\.jar$")
+  else:
+    process_grep = format("storm.daemon.{name}$")
+
+  find_proc = format("{jps_binary} -l  | grep {process_grep}")
+  write_pid = format("{find_proc} | awk {{'print $1'}} > {pid_file}")
+  crt_pid_cmd = format("{find_proc} && {write_pid}")
+  storm_env = format(
+    "source {conf_dir}/storm-env.sh ; export PATH=$JAVA_HOME/bin:$PATH")
+
+  if action == "start":
+    if name == "rest_api":
+      process_cmd = format(
+        "{storm_env} ; java -jar {rest_lib_dir}/`ls {rest_lib_dir} | grep -wE storm-rest-[0-9.-]+\.jar` server")
+      cmd = format(
+        "{process_cmd} {rest_api_conf_file} > {log_dir}/restapi.log 2>&1")
+    else:
+      # Storm start script gets forked into actual storm java process.
+      # Which means we can use the pid of start script as a pid of start component
+      cmd = format("{storm_env} ; storm {name} > {log_dir}/{name}.out 2>&1")
+
+    cmd = format("{cmd} &\n echo $! > {pid_file}")
+    
+    Execute(cmd,
+      not_if = no_op_test,
+      user = params.storm_user,
+      path = params.storm_bin_dir,
+    )
+    
+    File(pid_file,
+         owner = params.storm_user,
+         group = params.user_group
+    )
+  elif action == "stop":
+    process_dont_exist = format("! ({no_op_test})")
+    if os.path.exists(pid_file):
+      pid = get_user_call_output.get_user_call_output(format("! test -f {pid_file} ||  cat {pid_file}"), user=params.storm_user)[1]
+
+      # if multiple processes are running (for example user can start logviewer from console)
+      # there can be more than one id
+      pid = pid.replace("\n", " ")
+
+      Execute(format("{sudo} kill {pid}"),
+        not_if = process_dont_exist)
+
+      Execute(format("{sudo} kill -9 {pid}"),
+        not_if = format(
+          "sleep 2; {process_dont_exist} || sleep 20; {process_dont_exist}"),
+        ignore_failures = True)
+
+      File(pid_file, action = "delete")
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/service_check.py
new file mode 100644
index 0000000..80ea0f5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/service_check.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import get_unique_id_and_date
+from resource_management.core.resources import File
+from resource_management.core.resources import Execute
+from resource_management.libraries.script import Script
+from resource_management.core.source import StaticFile
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+class ServiceCheck(Script):
+  pass
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class ServiceCheckWindows(ServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    smoke_cmd = os.path.join(params.stack_root,"Run-SmokeTests.cmd")
+    service = "STORM"
+    Execute(format("cmd /C {smoke_cmd} {service}", smoke_cmd=smoke_cmd, service=service), user=params.storm_user, logoutput=True)
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class ServiceCheckDefault(ServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    unique = get_unique_id_and_date()
+
+    File("/tmp/wordCount.jar",
+         content=StaticFile("wordCount.jar"),
+         owner=params.storm_user
+    )
+
+    cmd = ""
+    if params.nimbus_seeds_supported:
+      # Because this command is guaranteed to run on one of the hosts with storm client, there is no need
+      # to specify "-c nimbus.seeds={nimbus_seeds}"
+      cmd = format("storm jar /tmp/wordCount.jar storm.starter.WordCountTopology WordCount{unique}")
+    elif params.nimbus_host is not None:
+      cmd = format("storm jar /tmp/wordCount.jar storm.starter.WordCountTopology WordCount{unique} -c nimbus.host={nimbus_host}")
+
+    Execute(cmd,
+            logoutput=True,
+            path=params.storm_bin_dir,
+            user=params.storm_user
+    )
+
+    Execute(format("storm kill WordCount{unique}"),
+            path=params.storm_bin_dir,
+            user=params.storm_user
+    )
+
+if __name__ == "__main__":
+  ServiceCheck().execute()
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/setup_ranger_storm.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/setup_ranger_storm.py
new file mode 100644
index 0000000..c04496e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/setup_ranger_storm.py
@@ -0,0 +1,133 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_core_site_for_required_plugins
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.functions.format import format
+from resource_management.core.resources import File, Directory
+
+def setup_ranger_storm(upgrade_type=None):
+  """
+  :param upgrade_type: Upgrade Type such as "rolling" or "nonrolling"
+  """
+  import params
+  if params.enable_ranger_storm and params.security_enabled:
+
+    stack_version = None
+    if upgrade_type is not None:
+      stack_version = params.version
+
+    if params.retryAble:
+      Logger.info("Storm: Setup ranger: command retry enables thus retrying if ranger admin is down !")
+    else:
+      Logger.info("Storm: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
+
+    if params.xml_configurations_supported and params.enable_ranger_storm and params.xa_audit_hdfs_is_enabled:
+      if params.has_namenode:
+        params.HdfsResource("/ranger/audit",
+                           type="directory",
+                           action="create_on_execute",
+                           owner=params.hdfs_user,
+                           group=params.hdfs_user,
+                           mode=0755,
+                           recursive_chmod=True
+        )
+        params.HdfsResource("/ranger/audit/storm",
+                           type="directory",
+                           action="create_on_execute",
+                           owner=params.storm_user,
+                           group=params.storm_user,
+                           mode=0700,
+                           recursive_chmod=True
+        )
+        params.HdfsResource(None, action="execute")
+
+    if params.xml_configurations_supported:
+      api_version=None
+      if params.stack_supports_ranger_kerberos:
+        api_version='v2'
+      from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
+      setup_ranger_plugin('storm-nimbus', 'storm', params.previous_jdbc_jar,
+                          params.downloaded_custom_connector, params.driver_curl_source,
+                          params.driver_curl_target, params.java64_home,
+                          params.repo_name, params.storm_ranger_plugin_repo,
+                          params.ranger_env, params.ranger_plugin_properties,
+                          params.policy_user, params.policymgr_mgr_url,
+                          params.enable_ranger_storm, conf_dict=params.conf_dir,
+                          component_user=params.storm_user, component_group=params.user_group, cache_service_list=['storm'],
+                          plugin_audit_properties=params.config['configurations']['ranger-storm-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-storm-audit'],
+                          plugin_security_properties=params.config['configurations']['ranger-storm-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-storm-security'],
+                          plugin_policymgr_ssl_properties=params.config['configurations']['ranger-storm-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-storm-policymgr-ssl'],
+                          component_list=['storm-client', 'storm-nimbus'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
+                          credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password,
+                          ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
+                          stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble,api_version=api_version,
+                          is_security_enabled = params.security_enabled,
+                          is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
+                          component_user_principal=params.ranger_storm_principal if params.security_enabled else None,
+                          component_user_keytab=params.ranger_storm_keytab if params.security_enabled else None)
+    else:
+      from resource_management.libraries.functions.setup_ranger_plugin import setup_ranger_plugin
+      setup_ranger_plugin('storm-nimbus', 'storm', params.previous_jdbc_jar,
+                        params.downloaded_custom_connector, params.driver_curl_source,
+                        params.driver_curl_target, params.java64_home,
+                        params.repo_name, params.storm_ranger_plugin_repo,
+                        params.ranger_env, params.ranger_plugin_properties,
+                        params.policy_user, params.policymgr_mgr_url,
+                        params.enable_ranger_storm, conf_dict=params.conf_dir,
+                        component_user=params.storm_user, component_group=params.user_group, cache_service_list=['storm'],
+                        plugin_audit_properties=params.config['configurations']['ranger-storm-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-storm-audit'],
+                        plugin_security_properties=params.config['configurations']['ranger-storm-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-storm-security'],
+                        plugin_policymgr_ssl_properties=params.config['configurations']['ranger-storm-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-storm-policymgr-ssl'],
+                        component_list=['storm-client', 'storm-nimbus'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
+                        credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password,
+                        ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
+                        stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
+
+
+    site_files_create_path = format('{storm_component_home_dir}/extlib-daemon/ranger-storm-plugin-impl/conf')
+    Directory(site_files_create_path,
+            owner = params.storm_user,
+            group = params.user_group,
+            mode=0775,
+            create_parents = True,
+            cd_access = 'a'
+            )
+
+    if params.stack_supports_core_site_for_ranger_plugin and params.enable_ranger_storm and params.has_namenode and params.security_enabled:
+      Logger.info("Stack supports core-site.xml creation for Ranger plugin, creating create core-site.xml from namenode configuraitions")
+      setup_core_site_for_required_plugins(component_user=params.storm_user,component_group=params.user_group,create_core_site_path = site_files_create_path, config = params.config)
+      if len(params.namenode_hosts) > 1:
+        Logger.info('Ranger Storm plugin is enabled along with security and NameNode is HA , creating hdfs-site.xml')
+        XmlConfig("hdfs-site.xml",
+          conf_dir=site_files_create_path,
+          configurations=params.config['configurations']['hdfs-site'],
+          configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+          owner=params.storm_user,
+          group=params.user_group,
+          mode=0644
+        )
+      else:
+        Logger.info('Ranger Storm plugin is not enabled or security is disabled, removing hdfs-site.xml')
+        File(format('{site_files_create_path}/hdfs-site.xml'), action="delete")
+    else:
+      Logger.info("Stack does not support core-site.xml creation for Ranger plugin, skipping core-site.xml configurations")
+  else:
+    Logger.info('Ranger Storm plugin is not enabled')
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/status_params.py
new file mode 100644
index 0000000..d84b095
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/status_params.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import default, format
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+from ambari_commons import OSCheck
+
+# a map of the Ambari role to the component name
+# for use with <stack-root>/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'NIMBUS' : 'storm-nimbus',
+  'SUPERVISOR' : 'storm-supervisor',
+  'STORM_UI_SERVER' : 'storm-client',
+  'DRPC_SERVER' : 'storm-client',
+  'STORM_SERVICE_CHECK' : 'storm-client'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "STORM_SERVICE_CHECK")
+
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+if OSCheck.is_windows_family():
+  nimbus_win_service_name = "nimbus"
+  supervisor_win_service_name = "supervisor"
+  ui_win_service_name = "ui"
+else:
+  pid_dir = config['configurations']['storm-env']['storm_pid_dir']
+  pid_nimbus = format("{pid_dir}/nimbus.pid")
+  pid_supervisor = format("{pid_dir}/supervisor.pid")
+  pid_drpc = format("{pid_dir}/drpc.pid")
+  pid_ui = format("{pid_dir}/ui.pid")
+  pid_logviewer = format("{pid_dir}/logviewer.pid")
+  pid_rest_api = format("{pid_dir}/restapi.pid")
+
+  pid_files = {
+    "logviewer":pid_logviewer,
+    "ui": pid_ui,
+    "nimbus": pid_nimbus,
+    "supervisor": pid_supervisor,
+    "drpc": pid_drpc,
+    "rest_api": pid_rest_api
+  }
+
+  # Security related/required params
+  hostname = config['hostname']
+  security_enabled = config['configurations']['cluster-env']['security_enabled']
+  kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+  tmp_dir = Script.get_tmp_dir()
+
+  storm_component_home_dir = "/usr/lib/storm"
+  conf_dir = "/etc/storm/conf"
+  if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+    storm_component_home_dir = format("{stack_root}/current/{component_directory}")
+    conf_dir = format("{stack_root}/current/{component_directory}/conf")
+
+  storm_user = config['configurations']['storm-env']['storm_user']
+  storm_ui_principal = default('/configurations/storm-env/storm_ui_principal_name', None)
+  storm_ui_keytab = default('/configurations/storm-env/storm_ui_keytab', None)
+
+stack_name = default("/hostLevelParams/stack_name", None)
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/storm.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/storm.py
new file mode 100644
index 0000000..99579d2
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/storm.py
@@ -0,0 +1,182 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.service import ServiceConfig
+from resource_management.core.resources.system import Directory, Execute, File, Link
+from resource_management.core.source import InlineTemplate
+from resource_management.libraries.resources.template_config import TemplateConfig
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.script.script import Script
+from resource_management.core.source import Template
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+from storm_yaml_utils import yaml_config_template, yaml_config
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook, setup_atlas_jar_symlinks
+from ambari_commons.constants import SERVICE
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def storm(name=None):
+  import params
+  yaml_config("storm.yaml",
+              conf_dir=params.conf_dir,
+              configurations=params.config['configurations']['storm-site'],
+              owner=params.storm_user
+  )
+
+  if params.service_map.has_key(name):
+    service_name = params.service_map[name]
+    ServiceConfig(service_name,
+                  action="change_user",
+                  username = params.storm_user,
+                  password = Script.get_password(params.storm_user))
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def storm(name=None):
+  import params
+  import os
+
+  Directory(params.log_dir,
+            owner=params.storm_user,
+            group=params.user_group,
+            mode=0777,
+            create_parents = True,
+            cd_access="a",
+  )
+
+  Directory([params.pid_dir, params.local_dir],
+            owner=params.storm_user,
+            group=params.user_group,
+            create_parents = True,
+            cd_access="a",
+            mode=0755,
+  )
+
+  Directory(params.conf_dir,
+            group=params.user_group,
+            create_parents = True,
+            cd_access="a",
+  )
+
+  File(format("{limits_conf_dir}/storm.conf"),
+       owner='root',
+       group='root',
+       mode=0644,
+       content=Template("storm.conf.j2")
+  )
+
+  File(format("{conf_dir}/config.yaml"),
+       content=Template("config.yaml.j2"),
+       owner=params.storm_user,
+       group=params.user_group
+  )
+
+  configurations = params.config['configurations']['storm-site']
+
+  File(format("{conf_dir}/storm.yaml"),
+       content=yaml_config_template(configurations),
+       owner=params.storm_user,
+       group=params.user_group
+  )
+
+  File(format("{conf_dir}/storm-env.sh"),
+       owner=params.storm_user,
+       content=InlineTemplate(params.storm_env_sh_template)
+  )
+
+  # Generate atlas-application.properties.xml file and symlink the hook jars
+  if params.enable_atlas_hook:
+    atlas_hook_filepath = os.path.join(params.conf_dir, params.atlas_hook_filename)
+    setup_atlas_hook(SERVICE.STORM, params.storm_atlas_application_properties, atlas_hook_filepath, params.storm_user, params.user_group)
+    storm_extlib_dir = os.path.join(params.storm_component_home_dir, "extlib")
+    setup_atlas_jar_symlinks("storm", storm_extlib_dir)
+
+  if params.has_metric_collector:
+    File(format("{conf_dir}/storm-metrics2.properties"),
+        owner=params.storm_user,
+        group=params.user_group,
+        content=Template("storm-metrics2.properties.j2")
+    )
+
+    # Remove symlinks. They can be there, if you doing upgrade from HDP < 2.2 to HDP >= 2.2
+    Link(format("{storm_lib_dir}/ambari-metrics-storm-sink.jar"),
+         action="delete")
+    # On old HDP 2.1 versions, this symlink may also exist and break EU to newer versions
+    Link("/usr/lib/storm/lib/ambari-metrics-storm-sink.jar", action="delete")
+
+    if check_stack_feature(StackFeature.STORM_METRICS_APACHE_CLASSES, params.version_for_stack_feature_checks):
+      sink_jar = params.metric_collector_sink_jar
+    else:
+      sink_jar = params.metric_collector_legacy_sink_jar
+
+    Execute(format("{sudo} ln -s {sink_jar} {storm_lib_dir}/ambari-metrics-storm-sink.jar"),
+            not_if=format("ls {storm_lib_dir}/ambari-metrics-storm-sink.jar"),
+            only_if=format("ls {sink_jar}")
+    )
+
+  if params.storm_logs_supported:
+    Directory(params.log4j_dir,
+              owner=params.storm_user,
+              group=params.user_group,
+              mode=0755,
+              create_parents = True
+    )
+    
+    File(format("{log4j_dir}/cluster.xml"),
+      owner=params.storm_user,
+      content=InlineTemplate(params.storm_cluster_log4j_content)
+    )
+    File(format("{log4j_dir}/worker.xml"),
+      owner=params.storm_user,
+      content=InlineTemplate(params.storm_worker_log4j_content)
+    )
+
+  if params.security_enabled:
+    TemplateConfig(format("{conf_dir}/storm_jaas.conf"),
+                   owner=params.storm_user
+    )
+    if params.stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version_formatted):
+      TemplateConfig(format("{conf_dir}/client_jaas.conf"),
+                     owner=params.storm_user
+      )
+      minRuid = configurations['_storm.min.ruid'] if configurations.has_key('_storm.min.ruid') else ''
+      
+      min_user_ruid = int(minRuid) if minRuid.isdigit() else _find_real_user_min_uid()
+      
+      File(format("{conf_dir}/worker-launcher.cfg"),
+           content=Template("worker-launcher.cfg.j2", min_user_ruid = min_user_ruid),
+           owner='root',
+           group=params.user_group
+      )
+
+
+'''
+Finds minimal real user UID
+'''
+def _find_real_user_min_uid():
+  with open('/etc/login.defs') as f:
+    for line in f:
+      if line.strip().startswith('UID_MIN') and len(line.split()) == 2 and line.split()[1].isdigit():
+        return int(line.split()[1])
+  raise Fail("Unable to find UID_MIN in file /etc/login.defs. Expecting format e.g.: 'UID_MIN    500'")  
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/storm_upgrade.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/storm_upgrade.py
new file mode 100644
index 0000000..bc245c4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/storm_upgrade.py
@@ -0,0 +1,177 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+import os
+
+from ambari_commons import yaml_utils
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.system import Directory
+from resource_management.core.resources.system import File
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.format import format
+
+class StormUpgrade(Script):
+  """
+  Applies to Rolling/Express Upgrade from HDP 2.1 or 2.2 to 2.3 or higher.
+
+  Requirements: Needs to run from a host with ZooKeeper Client.
+
+  This class helps perform some of the upgrade tasks needed for Storm during
+  a Rolling or Express upgrade. Storm writes data to disk locally and to ZooKeeper.
+  If any HDP 2.1 or 2.2 bits exist in these directories when an HDP 2.3 instance
+  starts up, it will fail to start properly. Because the upgrade framework in
+  Ambari doesn't yet have a mechanism to say "stop all" before starting to
+  upgrade each component, we need to rely on a Storm trick to bring down
+  running daemons. By removing the ZooKeeper data with running daemons, those
+  daemons will die.
+  """
+
+  def delete_storm_zookeeper_data(self, env):
+    """
+    Deletes the Storm data from ZooKeeper, effectively bringing down all
+    Storm daemons.
+    :return:
+    """
+    import params
+
+    Logger.info('Clearing Storm data from ZooKeeper')
+
+    storm_zookeeper_root_dir = params.storm_zookeeper_root_dir
+    if storm_zookeeper_root_dir is None:
+      raise Fail("The storm ZooKeeper directory specified by storm-site/storm.zookeeper.root must be specified")
+
+    # The zookeeper client must be given a zookeeper host to contact. Guaranteed to have at least one host.
+    storm_zookeeper_server_list = yaml_utils.get_values_from_yaml_array(params.storm_zookeeper_servers)
+    if storm_zookeeper_server_list is None:
+      Logger.info("Unable to extract ZooKeeper hosts from '{0}', assuming localhost").format(params.storm_zookeeper_servers)
+      storm_zookeeper_server_list = ["localhost"]
+
+    # For every zk server, try to remove /storm
+    zookeeper_data_cleared = False
+    for storm_zookeeper_server in storm_zookeeper_server_list:
+      # Determine where the zkCli.sh shell script is
+      zk_command_location = os.path.join(params.stack_root, "current", "zookeeper-client", "bin", "zkCli.sh")
+      if params.version is not None:
+        zk_command_location = os.path.join(params.stack_root, params.version, "zookeeper", "bin", "zkCli.sh")
+
+      # create the ZooKeeper delete command
+      command = "{0} -server {1}:{2} rmr /storm".format(
+        zk_command_location, storm_zookeeper_server, params.storm_zookeeper_port)
+
+      # clean out ZK
+      try:
+        # the ZK client requires Java to run; ensure it's on the path
+        env_map = {
+          'JAVA_HOME': params.java64_home
+        }
+
+        # AMBARI-12094: if security is enabled, then we need to tell zookeeper where the
+        # JAAS file is located since we don't use kinit directly with STORM
+        if params.security_enabled:
+          env_map['JVMFLAGS'] = "-Djava.security.auth.login.config={0}".format(params.storm_jaas_file)
+
+        Execute(command, user=params.storm_user, environment=env_map,
+          logoutput=True, tries=1)
+
+        zookeeper_data_cleared = True
+        break
+      except:
+        # the command failed, try a different ZK server
+        pass
+
+    # fail if the ZK data could not be cleared
+    if not zookeeper_data_cleared:
+      raise Fail("Unable to clear ZooKeeper Storm data on any of the following ZooKeeper hosts: {0}".format(
+        storm_zookeeper_server_list))
+
+
+  def delete_storm_local_data(self, env):
+    """
+    Deletes Storm data from local directories. This will create a marker file
+    with JSON data representing the upgrade stack and request/stage ID. This
+    will prevent multiple Storm components on the same host from removing
+    the local directories more than once.
+    :return:
+    """
+    import params
+
+    Logger.info('Clearing Storm data from local directories...')
+
+    storm_local_directory = params.local_dir
+    if storm_local_directory is None:
+      raise Fail("The storm local directory specified by storm-site/storm.local.dir must be specified")
+
+    request_id = default("/requestId", None)
+
+    stack_name = params.stack_name
+    stack_version = params.version
+    upgrade_direction = params.upgrade_direction
+
+    json_map = {}
+    json_map["requestId"] = request_id
+    json_map["stackName"] = stack_name
+    json_map["stackVersion"] = stack_version
+    json_map["direction"] = upgrade_direction
+
+    temp_directory = params.tmp_dir
+    marker_file = os.path.join(temp_directory, "storm-upgrade-{0}.json".format(stack_version))
+    Logger.info("Marker file for upgrade/downgrade of Storm, {0}".format(marker_file))
+
+    if os.path.exists(marker_file):
+      Logger.info("The marker file exists.")
+      try:
+        with open(marker_file) as file_pointer:
+          existing_json_map = json.load(file_pointer)
+
+        if cmp(json_map, existing_json_map) == 0:
+          Logger.info("The storm upgrade has already removed the local directories for {0}-{1} for "
+                      "request {2} and direction {3}. Nothing else to do.".format(stack_name, stack_version, request_id, upgrade_direction))
+
+          # Nothing else to do here for this as it appears to have already been
+          # removed by another component being upgraded
+          return
+        else:
+          Logger.info("The marker file differs from the new value. Will proceed to delete Storm local dir, "
+                      "and generate new file. Current marker file: {0}".format(str(existing_json_map)))
+      except Exception, e:
+        Logger.error("The marker file {0} appears to be corrupt; removing it. Error: {1}".format(marker_file, str(e)))
+        File(marker_file, action="delete")
+    else:
+      Logger.info('The marker file {0} does not exist; will attempt to delete local Storm directory if it exists.'.format(marker_file))
+
+    # Delete from local directory
+    if os.path.isdir(storm_local_directory):
+      Logger.info("Deleting storm local directory, {0}".format(storm_local_directory))
+      Directory(storm_local_directory, action="delete", create_parents = True)
+
+    # Recreate storm local directory
+    Logger.info("Recreating storm local directory, {0}".format(storm_local_directory))
+    Directory(storm_local_directory, mode=0755, owner=params.storm_user,
+      group=params.user_group, create_parents = True)
+
+    # The file doesn't exist, so create it
+    Logger.info("Saving marker file to {0} with contents: {1}".format(marker_file, str(json_map)))
+    with open(marker_file, 'w') as file_pointer:
+      json.dump(json_map, file_pointer, indent=2)
+
+if __name__ == "__main__":
+  StormUpgrade().execute()
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/storm_yaml_utils.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/storm_yaml_utils.py
new file mode 100644
index 0000000..9d78e71
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/storm_yaml_utils.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import resource_management
+
+from ambari_commons.yaml_utils import escape_yaml_property
+from resource_management.core.source import InlineTemplate
+from resource_management.core.resources.system import File
+
+def replace_jaas_placeholder(name, security_enabled, conf_dir):
+  if name.find('_JAAS_PLACEHOLDER') > -1:
+    if security_enabled:
+      return name.replace('_JAAS_PLACEHOLDER', '-Djava.security.auth.login.config=' + conf_dir + '/storm_jaas.conf')
+    else:
+      return name.replace('_JAAS_PLACEHOLDER', '')
+  else:
+    return name
+
+storm_yaml_template = """{% for key, value in configurations|dictsort if not key.startswith('_') %}{{key}} : {{ escape_yaml_property(replace_jaas_placeholder(resource_management.core.source.InlineTemplate(value).get_content().strip(), security_enabled, conf_dir)) }}
+{% endfor %}"""
+
+def yaml_config_template(configurations):
+  return InlineTemplate(storm_yaml_template, configurations=configurations,
+                        extra_imports=[escape_yaml_property, replace_jaas_placeholder, resource_management,
+                                       resource_management.core, resource_management.core.source])
+
+def yaml_config(filename, configurations = None, conf_dir = None, owner = None, group = None):
+  import params
+  config_content = InlineTemplate('''{% for key, value in configurations_dict|dictsort %}{{ key }}: {{ escape_yaml_property(resource_management.core.source.InlineTemplate(value).get_content()) }}
+{% endfor %}''', configurations_dict=configurations, extra_imports=[escape_yaml_property, resource_management, resource_management.core, resource_management.core.source])
+
+  File (os.path.join(params.conf_dir, filename),
+        content = config_content,
+        owner = owner,
+        mode = "f"
+  )
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/supervisor.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/supervisor.py
new file mode 100644
index 0000000..ec3f533
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/supervisor.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.functions import check_process_status
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+from storm import storm
+from service import service
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+from resource_management.core.resources.service import Service
+
+
+class Supervisor(Script):
+  def get_component_name(self):
+    return "storm-supervisor"
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    storm("supervisor")
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class SupervisorWindows(Supervisor):
+  def start(self, env):
+    import status_params
+    env.set_params(status_params)
+    self.configure(env)
+    Service(status_params.supervisor_win_service_name, action="start")
+
+  def stop(self, env):
+    import status_params
+    env.set_params(status_params)
+    Service(status_params.supervisor_win_service_name, action="stop")
+
+  def status(self, env):
+    import status_params
+    from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
+    env.set_params(status_params)
+    check_windows_service_status(status_params.supervisor_win_service_name)
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class SupervisorDefault(Supervisor):
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "storm", params.version)
+      stack_select.select("storm-client", params.version)
+      stack_select.select("storm-supervisor", params.version)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+
+    service("supervisor", action="start")
+    service("logviewer", action="start")
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    service("supervisor", action="stop")
+    service("logviewer", action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.pid_supervisor)
+
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.storm_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.pid_supervisor]
+
+if __name__ == "__main__":
+  Supervisor().execute()
+
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/supervisor_prod.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/supervisor_prod.py
new file mode 100644
index 0000000..d6c3545
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/supervisor_prod.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from storm import storm
+from service import service
+from supervisord_service import supervisord_service, supervisord_check_status
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+
+
+class Supervisor(Script):
+
+  def get_component_name(self):
+    return "storm-supervisor"
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    storm()
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "storm", params.version)
+      stack_select.select("storm-client", params.version)
+      stack_select.select("storm-supervisor", params.version)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+
+    supervisord_service("supervisor", action="start")
+    service("logviewer", action="start")
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    supervisord_service("supervisor", action="stop")
+    service("logviewer", action="stop")
+
+  def status(self, env):
+    supervisord_check_status("supervisor")
+    
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.storm_user
+
+if __name__ == "__main__":
+  Supervisor().execute()
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/supervisord_service.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/supervisord_service.py
new file mode 100644
index 0000000..6ff9f9c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/supervisord_service.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.format import format
+
+def supervisord_service(component_name, action):
+  Execute(format("supervisorctl {action} storm-{component_name}"),
+    wait_for_finish=False
+  )
+
+def supervisord_check_status(component_name):
+  try:
+    Execute(format("supervisorctl status storm-{component_name} | grep RUNNING"))
+  except Fail:
+    raise ComponentIsNotRunning() 
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/ui_server.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/ui_server.py
new file mode 100644
index 0000000..e257ef9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/ui_server.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from storm import storm
+from service import service
+from service_check import ServiceCheck
+from resource_management.libraries.functions import check_process_status
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format
+from resource_management.core.resources.system import Link
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_JAAS_CONF
+from setup_ranger_storm import setup_ranger_storm
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+from resource_management.core.resources.service import Service
+
+
+class UiServer(Script):
+
+  def get_component_name(self):
+    return "storm-client"
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    storm("ui")
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class UiServerWindows(UiServer):
+  def start(self, env):
+    import status_params
+    env.set_params(status_params)
+    self.configure(env)
+    Service(status_params.ui_win_service_name, action="start")
+
+  def stop(self, env):
+    import status_params
+    env.set_params(status_params)
+    Service(status_params.ui_win_service_name, action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
+    check_windows_service_status(status_params.ui_win_service_name)
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class UiServerDefault(UiServer):
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "storm", params.version)
+      stack_select.select("storm-client", params.version)
+
+  def link_metrics_sink_jar(self):
+    import params
+    # Add storm metrics reporter JAR to storm-ui-server classpath.
+    # Remove symlinks. They can be there, if you doing upgrade from HDP < 2.2 to HDP >= 2.2
+    Link(format("{storm_lib_dir}/ambari-metrics-storm-sink.jar"),
+         action="delete")
+    # On old HDP 2.1 versions, this symlink may also exist and break EU to newer versions
+    Link("/usr/lib/storm/lib/ambari-metrics-storm-sink.jar", action="delete")
+
+    if check_stack_feature(StackFeature.STORM_METRICS_APACHE_CLASSES, params.version_for_stack_feature_checks):
+      sink_jar = params.metric_collector_sink_jar
+    else:
+      sink_jar = params.metric_collector_legacy_sink_jar
+
+    Execute(format("{sudo} ln -s {sink_jar} {storm_lib_dir}/ambari-metrics-storm-sink.jar"),
+            not_if=format("ls {storm_lib_dir}/ambari-metrics-storm-sink.jar"),
+            only_if=format("ls {sink_jar}")
+            )
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    self.link_metrics_sink_jar()
+    setup_ranger_storm(upgrade_type=upgrade_type)
+    service("ui", action="start")
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    service("ui", action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.pid_ui)
+      
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.storm_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.pid_ui]
+
+if __name__ == "__main__":
+  UiServer().execute()
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/client_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/client_jaas.conf.j2
new file mode 100644
index 0000000..b061cd1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/client_jaas.conf.j2
@@ -0,0 +1,33 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+StormClient {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useTicketCache=true
+   renewTicket=true
+   serviceName="{{nimbus_bare_jaas_principal}}";
+};
+
+{% if kafka_bare_jaas_principal %}
+KafkaClient {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useTicketCache=true
+   renewTicket=true
+   serviceName="{{kafka_bare_jaas_principal}}";
+};
+{% endif %}
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/config.yaml.j2 b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/config.yaml.j2
new file mode 100644
index 0000000..b2dd3c8
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/config.yaml.j2
@@ -0,0 +1,72 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+nimbusHost: {{nimbus_host}}
+nimbusPort: {{nimbus_port}}
+
+# HTTP-specific options.
+http:
+
+  # The port on which the HTTP server listens for service requests.
+  port: {{rest_api_port}}
+
+  # The port on which the HTTP server listens for administrative requests.
+  adminPort: {{rest_api_admin_port}}
+
+{% if ganglia_installed %}
+enableGanglia: {{ganglia_installed}}
+
+# ganglia configuration (necessary if ganglia reporting is enabled)
+ganglia:
+
+  # how often to report to ganglia metrics (in seconds)
+  reportInterval: {{ganglia_report_interval}}
+
+  # the hostname of the gmond server where storm cluster metrics will be sent
+  host: "{{ganglia_server}}"
+
+  # address mode
+  # default is MULTICAST
+  addressMode: "UNICAST"
+
+  # an <IP>:<HOSTNAME> pair to spoof
+  # this allows us to simulate storm cluster metrics coming from a specific host
+  #spoof: "192.168.1.1:storm"
+{% endif %}
+
+{% if has_metric_collector and stack_supports_storm_ams %}
+enableGanglia: False
+
+ganglia:
+  reportInterval: {{metric_collector_report_interval}}
+
+enableMetricsSink: True
+
+metrics_collector:
+
+  reportInterval: {{metric_collector_report_interval}}
+  collector.hosts: "{{ams_collector_hosts}}"
+  protocol: "{{metric_collector_protocol}}"
+  port: "{{metric_collector_port}}"
+  appId: "{{metric_collector_app_id}}"
+  host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
+
+  # HTTPS settings
+  truststore.path : "{{metric_truststore_path}}"
+  truststore.type : "{{metric_truststore_type}}"
+  truststore.password : "{{metric_truststore_password}}"
+
+{% endif %}
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/input.config-storm.json.j2 b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/input.config-storm.json.j2
new file mode 100644
index 0000000..a2a4841
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/input.config-storm.json.j2
@@ -0,0 +1,78 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"storm_drpc",
+      "rowtype":"service",
+      "path":"{{default('/configurations/storm-env/storm_log_dir', '/var/log/storm')}}/drpc.log"
+    },
+    {
+      "type":"storm_logviewer",
+      "rowtype":"service",
+      "path":"{{default('/configurations/storm-env/storm_log_dir', '/var/log/storm')}}/logviewer.log"
+    },
+    {
+      "type":"storm_nimbus",
+      "rowtype":"service",
+      "path":"{{default('/configurations/storm-env/storm_log_dir', '/var/log/storm')}}/nimbus.log"
+    },
+    {
+      "type":"storm_supervisor",
+      "rowtype":"service",
+      "path":"{{default('/configurations/storm-env/storm_log_dir', '/var/log/storm')}}/supervisor.log"
+    },
+    {
+      "type":"storm_ui",
+      "rowtype":"service",
+      "path":"{{default('/configurations/storm-env/storm_log_dir', '/var/log/storm')}}/ui.log"
+    },
+    {
+      "type":"storm_worker",
+      "rowtype":"service",
+      "path":"{{default('/configurations/storm-env/storm_log_dir', '/var/log/storm')}}/*worker*.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "storm_drpc",
+            "storm_logviewer",
+            "storm_nimbus",
+            "storm_supervisor",
+            "storm_ui",
+            "storm_worker"
+          ]
+        }
+      },
+      "log4j_format":"",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\[%{LOGLEVEL:level}\\]%{SPACE}%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss.SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm-metrics2.properties.j2 b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm-metrics2.properties.j2
new file mode 100644
index 0000000..e7db91e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm-metrics2.properties.j2
@@ -0,0 +1,32 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+collector.hosts={{ams_collector_hosts}}
+protocol={{metric_collector_protocol}}
+port={{metric_collector_port}}
+zookeeper.quorum={{zookeeper_quorum}}
+maxRowCacheSize=10000
+sendInterval={{metrics_report_interval}}000
+clusterReporterAppId=nimbus
+host_in_memory_aggregation = {{host_in_memory_aggregation}}
+host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
+
+# HTTPS properties
+truststore.path = {{metric_truststore_path}}
+truststore.type = {{metric_truststore_type}}
+truststore.password = {{metric_truststore_password}}
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm.conf.j2 b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm.conf.j2
new file mode 100644
index 0000000..82a26fe
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{storm_user}}   - nofile   {{storm_user_nofile_limit}}
+{{storm_user}}   - nproc    {{storm_user_nproc_limit}}
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm_jaas.conf.j2
new file mode 100644
index 0000000..c22cb51
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/storm_jaas.conf.j2
@@ -0,0 +1,65 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+{% if stack_supports_storm_kerberos %}
+StormServer {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="{{nimbus_keytab_path}}"
+   storeKey=true
+   useTicketCache=false
+   principal="{{nimbus_jaas_principal}}";
+};
+StormClient {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="{{storm_keytab_path}}"
+   storeKey=true
+   useTicketCache=false
+   serviceName="{{nimbus_bare_jaas_principal}}"
+   principal="{{storm_jaas_principal}}";
+};
+RegistryClient {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="{{storm_keytab_path}}"
+   storeKey=true
+   useTicketCache=false
+   principal="{{storm_jaas_principal}}";
+};
+{% endif %}
+Client {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="{{storm_keytab_path}}"
+   storeKey=true
+   useTicketCache=false
+   serviceName="zookeeper"
+   principal="{{storm_jaas_principal}}";
+};
+
+{% if kafka_bare_jaas_principal %}
+KafkaClient {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="{{storm_keytab_path}}"
+   storeKey=true
+   useTicketCache=false
+   serviceName="{{kafka_bare_jaas_principal}}"
+   principal="{{storm_jaas_principal}}";
+};
+{% endif %}
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/worker-launcher.cfg.j2 b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/worker-launcher.cfg.j2
new file mode 100644
index 0000000..2228601
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/templates/worker-launcher.cfg.j2
@@ -0,0 +1,19 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+storm.worker-launcher.group={{user_group}}
+min.user.id={{min_user_ruid}}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/quicklinks/quicklinks.json
new file mode 100644
index 0000000..d45f337
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/quicklinks/quicklinks.json
@@ -0,0 +1,45 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"https",
+      "checks":[
+        {
+          "property":"ui.https.keystore.path",
+          "desired":"EXIST",
+          "site":"storm-site"
+        },
+        {
+          "property":"ui.https.key.password",
+          "desired":"EXIST",
+          "site":"storm-site"
+        },
+        {
+          "property":"ui.https.port",
+          "desired":"EXIST",
+          "site":"storm-site"
+        }
+      ]
+    },
+
+    "links": [
+      {
+        "name": "storm_ui",
+        "label": "Storm UI",
+        "requires_user_name": "false",
+        "component_name": "STORM_UI_SERVER",
+        "url":"%@://%@:%@/",
+        "port":{
+          "http_property": "ui.port",
+          "http_default_port": "8744",
+          "https_property": "ui.https.port",
+          "https_default_port": "8740",
+          "regex": "^(\\d+)$",
+          "site": "storm-site"
+        }
+      }
+    ]
+  }
+}
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/role_command_order.json b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/role_command_order.json
new file mode 100644
index 0000000..c8dfd8b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/role_command_order.json
@@ -0,0 +1,13 @@
+{
+  "general_deps" : {
+    "_comment" : "dependencies for STORM",
+    "NIMBUS-START" : ["ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START", "NAMENODE-START"],
+    "SUPERVISOR-START" : ["NIMBUS-START"],
+    "STORM_UI_SERVER-START" : ["NIMBUS-START", "NAMENODE-START"],
+    "DRPC_SERVER-START" : ["NIMBUS-START"],
+    "STORM_REST_API-START" : ["NIMBUS-START", "STORM_UI_SERVER-START", "SUPERVISOR-START", "DRPC_SERVER-START"],
+    "STORM_SERVICE_CHECK-SERVICE_CHECK": ["NIMBUS-START", "SUPERVISOR-START", "STORM_UI_SERVER-START",
+      "DRPC_SERVER-START"],
+    "NIMBUS-STOP" : ["SUPERVISOR-STOP", "STORM_UI_SERVER-STOP", "DRPC_SERVER-STOP"]
+  }
+}
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/service_advisor.py
new file mode 100644
index 0000000..1d6bbe0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/service_advisor.py
@@ -0,0 +1,387 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+# Python imports
+import imp
+import os
+import traceback
+import re
+import socket
+import fnmatch
+
+
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.get_bare_principal import get_bare_principal
+
+SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
+STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../stacks/')
+PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
+
+try:
+  with open(PARENT_FILE, 'rb') as fp:
+    service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE, ('.py', 'rb', imp.PY_SOURCE))
+except Exception as e:
+  traceback.print_exc()
+  print "Failed to load parent"
+
+class StormServiceAdvisor(service_advisor.ServiceAdvisor):
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(StormServiceAdvisor, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    # Always call these methods
+    self.modifyMastersWithMultipleInstances()
+    self.modifyCardinalitiesDict()
+    self.modifyHeapSizeProperties()
+    self.modifyNotValuableComponents()
+    self.modifyComponentsNotPreferableOnServer()
+    self.modifyComponentLayoutSchemes()
+
+  def modifyMastersWithMultipleInstances(self):
+    """
+    Modify the set of masters with multiple instances.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyCardinalitiesDict(self):
+    """
+    Modify the dictionary of cardinalities.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyHeapSizeProperties(self):
+    """
+    Modify the dictionary of heap size properties.
+    Must be overriden in child class.
+    """
+    pass
+
+  def modifyNotValuableComponents(self):
+    """
+    Modify the set of components whose host assignment is based on other services.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentsNotPreferableOnServer(self):
+    """
+    Modify the set of components that are not preferable on the server.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentLayoutSchemes(self):
+    """
+    Modify layout scheme dictionaries for components.
+    The scheme dictionary basically maps the number of hosts to
+    host index where component should exist.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def getServiceComponentLayoutValidations(self, services, hosts):
+    """
+    Get a list of errors.
+    Must be overriden in child class.
+    """
+
+    return []
+
+  def getServiceConfigurationRecommendations(self, configurations, clusterData, services, hosts):
+    """
+    Entry point.
+    Must be overriden in child class.
+    """
+    #Logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    recommender = StormRecommender()
+    recommender.recommendStormConfigurationsFromHDP206(configurations, clusterData, services, hosts)
+    recommender.recommendStormConfigurationsFromHDP21(configurations, clusterData, services, hosts)
+    recommender.recommendStormConfigurationsFromHDP22(configurations, clusterData, services, hosts)
+    recommender.recommendStormConfigurationsFromHDP23(configurations, clusterData, services, hosts)
+    recommender.recommendStormConfigurationsFromHDP25(configurations, clusterData, services, hosts)
+
+
+
+  def getServiceConfigurationsValidationItems(self, configurations, recommendedDefaults, services, hosts):
+    """
+    Entry point.
+    Validate configurations for the service. Return a list of errors.
+    The code for this function should be the same for each Service Advisor.
+    """
+    #Logger.info("Class: %s, Method: %s. Validating Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    validator = StormValidator()
+    # Calls the methods of the validator using arguments,
+    # method(siteProperties, siteRecommendations, configurations, services, hosts)
+    return validator.validateListOfConfigUsingMethod(configurations, recommendedDefaults, services, hosts, validator.validators)
+
+
+
+class StormRecommender(service_advisor.ServiceAdvisor):
+  """
+  Storm Recommender suggests properties when adding the service for the first time or modifying configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(StormRecommender, self)
+    self.as_super.__init__(*args, **kwargs)
+
+
+  def recommendStormConfigurationsFromHDP206(self, configurations, clusterData, services, hosts):
+    putStormSiteProperty = self.putProperty(configurations, "storm-site", services)
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    # Storm AMS integration
+    if 'AMBARI_METRICS' in servicesList:
+      putStormSiteProperty('metrics.reporter.register', 'org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter')
+
+
+  def recommendStormConfigurationsFromHDP21(self, configurations, clusterData, services, hosts):
+    storm_mounts = [
+      ("storm.local.dir", ["NODEMANAGER", "NIMBUS"], "/hadoop/storm", "single")
+    ]
+
+    self.updateMountProperties("storm-site", storm_mounts, configurations, services, hosts)
+
+
+  def recommendStormConfigurationsFromHDP22(self, configurations, clusterData, services, hosts):
+    putStormSiteProperty = self.putProperty(configurations, "storm-site", services)
+    putStormSiteAttributes = self.putPropertyAttribute(configurations, "storm-site")
+    storm_site = self.getServicesSiteProperties(services, "storm-site")
+    security_enabled = self.isSecurityEnabled(services)
+    if "ranger-env" in services["configurations"] and "ranger-storm-plugin-properties" in services["configurations"] and \
+        "ranger-storm-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
+      putStormRangerPluginProperty = self.putProperty(configurations, "ranger-storm-plugin-properties", services)
+      rangerEnvStormPluginProperty = services["configurations"]["ranger-env"]["properties"]["ranger-storm-plugin-enabled"]
+      putStormRangerPluginProperty("ranger-storm-plugin-enabled", rangerEnvStormPluginProperty)
+
+    rangerPluginEnabled = ''
+    if 'ranger-storm-plugin-properties' in configurations and 'ranger-storm-plugin-enabled' in  configurations['ranger-storm-plugin-properties']['properties']:
+      rangerPluginEnabled = configurations['ranger-storm-plugin-properties']['properties']['ranger-storm-plugin-enabled']
+    elif 'ranger-storm-plugin-properties' in services['configurations'] and 'ranger-storm-plugin-enabled' in services['configurations']['ranger-storm-plugin-properties']['properties']:
+      rangerPluginEnabled = services['configurations']['ranger-storm-plugin-properties']['properties']['ranger-storm-plugin-enabled']
+
+    nonRangerClass = 'backtype.storm.security.auth.authorizer.SimpleACLAuthorizer'
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    rangerServiceVersion=''
+    if 'RANGER' in servicesList:
+      rangerServiceVersion = [service['StackServices']['service_version'] for service in services["services"] if service['StackServices']['service_name'] == 'RANGER'][0]
+
+    if rangerServiceVersion and rangerServiceVersion == '0.4.0':
+      rangerClass = 'com.xasecure.authorization.storm.authorizer.XaSecureStormAuthorizer'
+    else:
+      rangerClass = 'org.apache.ranger.authorization.storm.authorizer.RangerStormAuthorizer'
+    # Cluster is kerberized
+    if security_enabled:
+      if rangerPluginEnabled and (rangerPluginEnabled.lower() == 'Yes'.lower()):
+        putStormSiteProperty('nimbus.authorizer',rangerClass)
+      else:
+        putStormSiteProperty('nimbus.authorizer', nonRangerClass)
+    else:
+      putStormSiteAttributes('nimbus.authorizer', 'delete', 'true')
+
+
+  def recommendStormConfigurationsFromHDP23(self, configurations, clusterData, services, hosts):
+    putStormStartupProperty = self.putProperty(configurations, "storm-site", services)
+    putStormEnvProperty = self.putProperty(configurations, "storm-env", services)
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+
+    if "storm-site" in services["configurations"]:
+      # atlas
+      notifier_plugin_property = "storm.topology.submission.notifier.plugin.class"
+      if notifier_plugin_property in services["configurations"]["storm-site"]["properties"] and \
+         services["configurations"]["storm-site"]["properties"][notifier_plugin_property] is not None:
+
+        notifier_plugin_value = services["configurations"]["storm-site"]["properties"][notifier_plugin_property]
+      else:
+        notifier_plugin_value = " "
+
+      atlas_is_present = "ATLAS" in servicesList
+      atlas_hook_class = "org.apache.atlas.storm.hook.StormAtlasHook"
+      atlas_hook_is_set = atlas_hook_class in notifier_plugin_value
+      enable_atlas_hook = False
+      enable_external_atlas_for_storm = False
+
+      if 'storm-atlas-application.properties' in services['configurations'] and 'enable.external.atlas.for.storm' in services['configurations']['storm-atlas-application.properties']['properties']:
+        enable_external_atlas_for_storm = services['configurations']['storm-atlas-application.properties']['properties']['enable.external.atlas.for.storm'].lower() == "true"
+
+      if atlas_is_present:
+        putStormEnvProperty("storm.atlas.hook", "true")
+      elif enable_external_atlas_for_storm:
+        putStormEnvProperty("storm.atlas.hook", "true")
+      else:
+        putStormEnvProperty("storm.atlas.hook", "false")
+
+      if 'storm-env' in configurations and 'storm.atlas.hook' in configurations['storm-env']['properties']:
+        enable_atlas_hook = configurations['storm-env']['properties']['storm.atlas.hook'] == "true"
+      elif 'storm-env' in services['configurations'] and 'storm.atlas.hook' in services['configurations']['storm-env']['properties']:
+        enable_atlas_hook = services['configurations']['storm-env']['properties']['storm.atlas.hook'] == "true"
+
+      if enable_atlas_hook and not atlas_hook_is_set:
+        notifier_plugin_value = atlas_hook_class if notifier_plugin_value == " " else ",".join([notifier_plugin_value, atlas_hook_class])
+
+      if not enable_atlas_hook and atlas_hook_is_set:
+        application_classes = [item for item in notifier_plugin_value.split(",") if item != atlas_hook_class and item != " "]
+        notifier_plugin_value = ",".join(application_classes) if application_classes else " "
+
+      if notifier_plugin_value.strip() != "":
+        putStormStartupProperty(notifier_plugin_property, notifier_plugin_value)
+      else:
+        putStormStartupPropertyAttribute = self.putPropertyAttribute(configurations, "storm-site")
+        putStormStartupPropertyAttribute(notifier_plugin_property, 'delete', 'true')
+
+
+
+  def recommendStormConfigurationsFromHDP25(self, configurations, clusterData, services, hosts):
+    storm_site = self.getServicesSiteProperties(services, "storm-site")
+    storm_env = self.getServicesSiteProperties(services, "storm-env")
+    putStormSiteProperty = self.putProperty(configurations, "storm-site", services)
+    putStormSiteAttributes = self.putPropertyAttribute(configurations, "storm-site")
+    security_enabled = self.isSecurityEnabled(services)
+
+    if storm_env and storm_site:
+      if security_enabled:
+        _storm_principal_name = storm_env['storm_principal_name'] if 'storm_principal_name' in storm_env else None
+        storm_bare_jaas_principal = get_bare_principal(_storm_principal_name)
+        if 'nimbus.impersonation.acl' in storm_site:
+          storm_nimbus_impersonation_acl = storm_site["nimbus.impersonation.acl"]
+          storm_nimbus_impersonation_acl.replace('{{storm_bare_jaas_principal}}', storm_bare_jaas_principal)
+          putStormSiteProperty('nimbus.impersonation.acl', storm_nimbus_impersonation_acl)
+      else:
+        if 'nimbus.impersonation.acl' in storm_site:
+          putStormSiteAttributes('nimbus.impersonation.acl', 'delete', 'true')
+        if 'nimbus.impersonation.authorizer' in storm_site:
+          putStormSiteAttributes('nimbus.impersonation.authorizer', 'delete', 'true')
+
+    rangerPluginEnabled = ''
+    if 'ranger-storm-plugin-properties' in configurations and 'ranger-storm-plugin-enabled' in  configurations['ranger-storm-plugin-properties']['properties']:
+      rangerPluginEnabled = configurations['ranger-storm-plugin-properties']['properties']['ranger-storm-plugin-enabled']
+    elif 'ranger-storm-plugin-properties' in services['configurations'] and 'ranger-storm-plugin-enabled' in services['configurations']['ranger-storm-plugin-properties']['properties']:
+      rangerPluginEnabled = services['configurations']['ranger-storm-plugin-properties']['properties']['ranger-storm-plugin-enabled']
+
+    storm_authorizer_class = 'org.apache.storm.security.auth.authorizer.SimpleACLAuthorizer'
+    ranger_authorizer_class = 'org.apache.ranger.authorization.storm.authorizer.RangerStormAuthorizer'
+    # Cluster is kerberized
+    if security_enabled:
+      if rangerPluginEnabled and (rangerPluginEnabled.lower() == 'Yes'.lower()):
+        putStormSiteProperty('nimbus.authorizer',ranger_authorizer_class)
+      else:
+        putStormSiteProperty('nimbus.authorizer', storm_authorizer_class)
+    else:
+      putStormSiteAttributes('nimbus.authorizer', 'delete', 'true')
+
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    # Storm AMS integration
+    if 'AMBARI_METRICS' in servicesList:
+      putStormSiteProperty('storm.cluster.metrics.consumer.register', '[{"class": "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter"}]')
+      putStormSiteProperty('topology.metrics.consumer.register',
+                           '[{"class": "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsSink", '
+                           '"parallelism.hint": 1, '
+                           '"whitelist": ["kafkaOffset\\\..+/", "__complete-latency", "__process-latency", '
+                           '"__receive\\\.population$", "__sendqueue\\\.population$", "__execute-count", "__emit-count", '
+                           '"__ack-count", "__fail-count", "memory/heap\\\.usedBytes$", "memory/nonHeap\\\.usedBytes$", '
+                           '"GC/.+\\\.count$", "GC/.+\\\.timeMs$"]}]')
+    else:
+      putStormSiteProperty('storm.cluster.metrics.consumer.register', 'null')
+      putStormSiteProperty('topology.metrics.consumer.register', 'null')
+
+
+class StormValidator(service_advisor.ServiceAdvisor):
+  """
+  Storm Validator checks the correctness of properties whenever the service is first added or the user attempts to
+  change configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(StormValidator, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    self.validators = [("storm-site", self.validateStormConfigurationsFromHDP206),
+                       ("ranger-storm-plugin-properties", self.validateStormRangerPluginConfigurationsFromHDP22),
+                       ("storm-site", self.validateStormConfigurationsFromHDP25)]
+
+
+
+  def validateStormConfigurationsFromHDP206(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    # Storm AMS integration
+    if 'AMBARI_METRICS' in servicesList and "metrics.reporter.register" in properties and \
+      "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter" not in properties.get("metrics.reporter.register"):
+
+      validationItems.append({"config-name": 'metrics.reporter.register',
+                              "item": self.getWarnItem(
+                                "Should be set to org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter to report the metrics to Ambari Metrics service.")})
+
+    return self.toConfigurationValidationProblems(validationItems, "storm-site")
+
+
+  def validateStormRangerPluginConfigurationsFromHDP22(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+    ranger_plugin_properties = self.getSiteProperties(configurations, "ranger-storm-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-storm-plugin-enabled'] if ranger_plugin_properties else 'No'
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    security_enabled = self.isSecurityEnabled(services)
+    if 'RANGER' in servicesList and ranger_plugin_enabled.lower() == 'yes':
+      # ranger-hdfs-plugin must be enabled in ranger-env
+      ranger_env = self.getServicesSiteProperties(services, 'ranger-env')
+      if not ranger_env or not 'ranger-storm-plugin-enabled' in ranger_env or \
+          ranger_env['ranger-storm-plugin-enabled'].lower() != 'yes':
+        validationItems.append({"config-name": 'ranger-storm-plugin-enabled',
+                                "item": self.getWarnItem(
+                                  "ranger-storm-plugin-properties/ranger-storm-plugin-enabled must correspond ranger-env/ranger-storm-plugin-enabled")})
+    if ("RANGER" in servicesList) and (ranger_plugin_enabled.lower() == 'Yes'.lower()) and not security_enabled:
+      validationItems.append({"config-name": "ranger-storm-plugin-enabled",
+                              "item": self.getWarnItem(
+                                "Ranger Storm plugin should not be enabled in non-kerberos environment.")})
+
+    return self.toConfigurationValidationProblems(validationItems, "ranger-storm-plugin-properties")
+
+
+  def validateStormConfigurationsFromHDP25(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    # Storm AMS integration
+    if 'AMBARI_METRICS' in servicesList:
+      if "storm.cluster.metrics.consumer.register" in properties and \
+          'null' in properties.get("storm.cluster.metrics.consumer.register"):
+
+        validationItems.append({"config-name": 'storm.cluster.metrics.consumer.register',
+                              "item": self.getWarnItem(
+                                "Should be set to recommended value to report metrics to Ambari Metrics service.")})
+
+      if "topology.metrics.consumer.register" in properties and \
+          'null' in properties.get("topology.metrics.consumer.register"):
+
+        validationItems.append({"config-name": 'topology.metrics.consumer.register',
+                                "item": self.getWarnItem(
+                                  "Should be set to recommended value to report metrics to Ambari Metrics service.")})
+
+    return self.toConfigurationValidationProblems(validationItems, "storm-site")
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/widgets.json b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/widgets.json
new file mode 100644
index 0000000..d22a1ed
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/widgets.json
@@ -0,0 +1,127 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_storm_dashboard",
+      "display_name": "Standard Storm Dashboard",
+      "section_name": "STORM_SUMMARY",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Number of Slots",
+          "description": "Number of Slots",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Used Slots",
+              "metric_path": "metrics/storm/nimbus/usedslots",
+              "service_name": "STORM",
+              "component_name": "NIMBUS"
+            },
+            {
+              "name": "Free Slots",
+              "metric_path": "metrics/storm/nimbus/freeslots",
+              "service_name": "STORM",
+              "component_name": "NIMBUS"
+            },
+            {
+              "name": "Total Slots",
+              "metric_path": "metrics/storm/nimbus/totalslots",
+              "service_name": "STORM",
+              "component_name": "NIMBUS"
+            }
+          ],
+          "values": [
+            {
+              "name": "Used slots",
+              "value": "${Used Slots}"
+            },
+            {
+              "name": "Free slots",
+              "value": "${Free Slots}"
+            },
+            {
+              "name": "Total slots",
+              "value": "${Total Slots}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Number of executors",
+          "description": "Number of executors",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Total Executors",
+              "metric_path": "metrics/storm/nimbus/totalexecutors",
+              "service_name": "STORM",
+              "component_name": "NIMBUS"
+            }
+          ],
+          "values": [
+            {
+              "name": "Total executors",
+              "value": "${Total Executors}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Number of topologies",
+          "description": "Number of topologies",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Topologies",
+              "metric_path": "metrics/storm/nimbus/topologies",
+              "service_name": "STORM",
+              "component_name": "NIMBUS"
+            }
+          ],
+          "values": [
+            {
+              "name": "Total topologies",
+              "value": "${Topologies}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Number of tasks",
+          "description": "Number of tasks",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Total Tasks",
+              "metric_path": "metrics/storm/nimbus/totaltasks",
+              "service_name": "STORM",
+              "component_name": "NIMBUS"
+            }
+          ],
+          "values": [
+            {
+              "name": "Total tasks",
+              "value": "${Total Tasks}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        }
+      ]
+    }
+  ]
+}
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-site.xml b/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-site.xml
new file mode 100644
index 0000000..1a5dde9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-site.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>nimbus.autocredential.plugins.classes</name>
+    <description>
+      Allows users to add token based authentication for services such as HDFS, HBase, Hive
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>nimbus.credential.renewers.freq.secs</name>
+    <description>
+      Frequency at which tokens will be renewed.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>nimbus.credential.renewers.classes</name>
+    <description>
+      List of classes for token renewal
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.1.0/kerberos.json b/ambari-server/src/main/resources/common-services/STORM/1.1.0/kerberos.json
new file mode 100644
index 0000000..643cfd3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.1.0/kerberos.json
@@ -0,0 +1,138 @@
+{
+  "services": [
+    {
+      "name": "STORM",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        },
+        {
+          "name": "storm_components",
+          "principal": {
+            "value": "${storm-env/storm_user}${principal_suffix}@${realm}",
+            "type": "user",
+            "configuration": "storm-env/storm_principal_name"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/storm.headless.keytab",
+            "owner": {
+              "name": "${storm-env/storm_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": ""
+            },
+            "configuration": "storm-env/storm_keytab"
+          }
+        },
+        {
+          "name": "/STORM/storm_components",
+          "principal": {
+            "configuration": "storm-atlas-application.properties/atlas.jaas.KafkaClient.option.principal"
+          },
+          "keytab": {
+            "configuration": "storm-atlas-application.properties/atlas.jaas.KafkaClient.option.keyTab"
+          }
+        }
+      ],
+      "configurations": [
+        {
+          "storm-site": {
+            "nimbus.authorizer": "org.apache.storm.security.auth.authorizer.SimpleACLAuthorizer",
+            "drpc.authorizer": "org.apache.storm.security.auth.authorizer.DRPCSimpleACLAuthorizer",
+            "ui.filter": "org.apache.hadoop.security.authentication.server.AuthenticationFilter",
+            "storm.principal.tolocal": "org.apache.storm.security.auth.KerberosPrincipalToLocal",
+            "supervisor.enable": "true",
+            "storm.zookeeper.superACL": "sasl:{{storm_bare_jaas_principal}}",
+            "java.security.auth.login.config": "{{conf_dir}}/storm_jaas.conf",
+            "nimbus.impersonation.authorizer": "org.apache.storm.security.auth.authorizer.ImpersonationAuthorizer",
+            "nimbus.impersonation.acl": "{ {{storm_bare_jaas_principal}} : {hosts: ['*'], groups: ['*']}}",
+            "nimbus.admins": "['{{storm_bare_jaas_principal}}', '{{ambari_bare_jaas_principal}}']",
+            "nimbus.supervisor.users": "['{{storm_bare_jaas_principal}}']",
+            "ui.filter.params": "{'type': 'kerberos', 'kerberos.principal': '{{storm_ui_jaas_principal}}', 'kerberos.keytab': '{{storm_ui_keytab_path}}', 'kerberos.name.rules': 'DEFAULT'}",
+            "nimbus.autocredential.plugins.classes": "['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']",
+            "nimbus.credential.renewers.classes": "['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']",
+            "nimbus.credential.renewers.freq.secs": 82800
+            
+          }
+        },
+        {
+          "ranger-storm-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "STORM_UI_SERVER",
+          "identities": [
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "storm-env/storm_ui_principal_name"
+              },
+              "keytab": {
+                "configuration": "storm-env/storm_ui_keytab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "NIMBUS",
+          "identities": [
+            {
+              "name": "nimbus_server",
+              "principal": {
+                "value": "nimbus/_HOST@${realm}",
+                "type": "service",
+                "configuration": "storm-env/nimbus_principal_name"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nimbus.service.keytab",
+                "owner": {
+                  "name": "${storm-env/storm_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "storm-env/nimbus_keytab"
+              }
+            },
+            {
+              "name": "/STORM/storm_components",
+              "principal": {
+                "configuration": "ranger-storm-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-storm-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "DRPC_SERVER",
+          "identities": [
+            {
+              "name": "drpc_server",
+              "reference": "/STORM/NIMBUS/nimbus_server"
+            }
+          ]
+        },
+        {
+          "name" : "SUPERVISOR"
+        }
+      ]
+    }
+  ]
+}
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.1.0/metainfo.xml b/ambari-server/src/main/resources/common-services/STORM/1.1.0/metainfo.xml
new file mode 100644
index 0000000..94f5ca3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.1.0/metainfo.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>STORM</name>
+      <version>1.1.0</version>
+      <extends>common-services/STORM/1.0.1</extends>
+
+      <configuration-dependencies>
+        <config-type>storm-site</config-type>
+        <config-type>storm-env</config-type>
+        <config-type>ranger-storm-plugin-properties</config-type>
+        <config-type>ranger-storm-audit</config-type>
+        <config-type>ranger-storm-policymgr-ssl</config-type>
+        <config-type>ranger-storm-security</config-type>
+        <config-type>admin-properties</config-type>
+        <config-type>ranger-ugsync-site</config-type>
+        <config-type>ranger-admin-site</config-type>
+        <config-type>zookeeper-env</config-type>
+        <config-type>zoo.cfg</config-type>
+        <config-type>application.properties</config-type>
+        <config-type>storm-atlas-application.properties</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration-mapred/mapred-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration-mapred/mapred-logsearch-conf.xml
deleted file mode 100644
index 3c0abbf..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration-mapred/mapred-logsearch-conf.xml
+++ /dev/null
@@ -1,80 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>MapReduce</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>HISTORYSERVER:mapred_historyserver</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder Config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-  "input":[
-    {
-      "type":"mapred_historyserver",
-      "rowtype":"service",
-      "path":"{{default('/configurations/mapred-env/mapred_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/mapred-env/mapred_user', 'mapred')}}/mapred-{{default('configurations/mapred-env/mapred_user', 'mapred')}}-historyserver*.log"
-    }
-   ],
-  "filter":[
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "mapred_historyserver"
-          ]
-         }
-       },
-      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       }
-     }
-   ]
- }
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-logsearch-conf.xml
deleted file mode 100644
index 95cf0c9..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-logsearch-conf.xml
+++ /dev/null
@@ -1,104 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>YARN</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>RESOURCEMANAGER:yarn_resourcemanager,yarn_historyserver,yarn_jobsummary;NODEMANAGER:yarn_nodemanager;APP_TIMELINE_SERVER:yarn_timelineserver</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder Config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-  "input":[
-    {
-      "type":"yarn_nodemanager",
-      "rowtype":"service",
-      "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/yarn-{{default('configurations/yarn-env/yarn_user', 'yarn')}}-nodemanager-*.log"
-    },
-    {
-      "type":"yarn_resourcemanager",
-      "rowtype":"service",
-      "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/yarn-{{default('configurations/yarn-env/yarn_user', 'yarn')}}-resourcemanager-*.log"
-    },
-    {
-      "type":"yarn_timelineserver",
-      "rowtype":"service",
-      "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/yarn-{{default('configurations/yarn-env/yarn_user', 'yarn')}}-timelineserver-*.log"
-    },
-    {
-      "type":"yarn_historyserver",
-      "rowtype":"service",
-      "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/yarn-{{default('configurations/yarn-env/yarn_user', 'yarn')}}-historyserver-*.log"
-    },
-    {
-      "type":"yarn_jobsummary",
-      "rowtype":"service",
-      "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/hadoop-mapreduce.jobsummary.log"
-    }
-   ],
-  "filter":[
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "yarn_historyserver",
-            "yarn_jobsummary",
-            "yarn_nodemanager",
-            "yarn_resourcemanager",
-            "yarn_timelineserver"
-          ]
-         }
-       },
-      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       }
-     }
-   ]
-}
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py
index 03fff21..b1e0c16 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py
@@ -83,67 +83,6 @@
     env.set_params(status_params)
     check_process_status(status_params.yarn_historyserver_pid_file)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"yarn.timeline-service.enabled": "true",
-                           "yarn.timeline-service.http-authentication.type": "kerberos",
-                           "yarn.acl.enable": "true"}
-      props_empty_check = ["yarn.timeline-service.principal",
-                           "yarn.timeline-service.keytab",
-                           "yarn.timeline-service.http-authentication.kerberos.principal",
-                           "yarn.timeline-service.http-authentication.kerberos.keytab"]
-
-      props_read_check = ["yarn.timeline-service.keytab",
-                          "yarn.timeline-service.http-authentication.kerberos.keytab"]
-      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
-                                                  props_read_check)
-
-      yarn_expectations ={}
-      yarn_expectations.update(yarn_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'yarn-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, yarn_expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'yarn-site' not in security_params
-               or 'yarn.timeline-service.keytab' not in security_params['yarn-site']
-               or 'yarn.timeline-service.principal' not in security_params['yarn-site']) \
-            or 'yarn.timeline-service.http-authentication.kerberos.keytab' not in security_params['yarn-site'] \
-            or 'yarn.timeline-service.http-authentication.kerberos.principal' not in security_params['yarn-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.timeline-service.keytab'],
-                                security_params['yarn-site']['yarn.timeline-service.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.timeline-service.http-authentication.kerberos.keytab'],
-                                security_params['yarn-site']['yarn.timeline-service.http-authentication.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
     return params.yarn_log_dir
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
index 8f5d380..d886244 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
@@ -120,62 +120,6 @@
     env.set_params(status_params)
     check_process_status(status_params.mapred_historyserver_pid_file)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      expectations = {}
-      expectations.update(build_expectations('mapred-site',
-                                             None,
-                                             [
-                                               'mapreduce.jobhistory.keytab',
-                                               'mapreduce.jobhistory.principal',
-                                               'mapreduce.jobhistory.webapp.spnego-keytab-file',
-                                               'mapreduce.jobhistory.webapp.spnego-principal'
-                                             ],
-                                             None))
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'mapred-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'mapred-site' not in security_params or
-               'mapreduce.jobhistory.keytab' not in security_params['mapred-site'] or
-               'mapreduce.jobhistory.principal' not in security_params['mapred-site'] or
-               'mapreduce.jobhistory.webapp.spnego-keytab-file' not in security_params['mapred-site'] or
-               'mapreduce.jobhistory.webapp.spnego-principal' not in security_params['mapred-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal not set."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.mapred_user,
-                                security_params['mapred-site']['mapreduce.jobhistory.keytab'],
-                                security_params['mapred-site']['mapreduce.jobhistory.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.mapred_user,
-                                security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-keytab-file'],
-                                security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
     return params.mapred_log_dir
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
index 133d2e1..5acb20b 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
@@ -90,66 +90,6 @@
     env.set_params(status_params)
     check_process_status(status_params.nodemanager_pid_file)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
-                           "yarn.acl.enable": "true"}
-      props_empty_check = ["yarn.nodemanager.principal",
-                           "yarn.nodemanager.keytab",
-                           "yarn.nodemanager.webapp.spnego-principal",
-                           "yarn.nodemanager.webapp.spnego-keytab-file"]
-
-      props_read_check = ["yarn.nodemanager.keytab",
-                          "yarn.nodemanager.webapp.spnego-keytab-file"]
-      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
-                                           props_read_check)
-
-      yarn_expectations ={}
-      yarn_expectations.update(yarn_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'yarn-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, yarn_site_props)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'yarn-site' not in security_params
-               or 'yarn.nodemanager.keytab' not in security_params['yarn-site']
-               or 'yarn.nodemanager.principal' not in security_params['yarn-site']) \
-            or 'yarn.nodemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
-            or 'yarn.nodemanager.webapp.spnego-principal' not in security_params['yarn-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.nodemanager.keytab'],
-                                security_params['yarn-site']['yarn.nodemanager.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.nodemanager.webapp.spnego-keytab-file'],
-                                security_params['yarn-site']['yarn.nodemanager.webapp.spnego-principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
     return params.yarn_log_dir
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
index b871b68..81b99e6 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
@@ -132,66 +132,6 @@
     check_process_status(status_params.resourcemanager_pid_file)
     pass
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
-                           "yarn.acl.enable": "true"}
-      props_empty_check = ["yarn.resourcemanager.principal",
-                           "yarn.resourcemanager.keytab",
-                           "yarn.resourcemanager.webapp.spnego-principal",
-                           "yarn.resourcemanager.webapp.spnego-keytab-file"]
-
-      props_read_check = ["yarn.resourcemanager.keytab",
-                          "yarn.resourcemanager.webapp.spnego-keytab-file"]
-      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
-                                           props_read_check)
-
-      yarn_expectations ={}
-      yarn_expectations.update(yarn_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'yarn-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, yarn_site_props)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'yarn-site' not in security_params
-               or 'yarn.resourcemanager.keytab' not in security_params['yarn-site']
-               or 'yarn.resourcemanager.principal' not in security_params['yarn-site']) \
-            or 'yarn.resourcemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
-            or 'yarn.resourcemanager.webapp.spnego-principal' not in security_params['yarn-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.resourcemanager.keytab'],
-                                security_params['yarn-site']['yarn.resourcemanager.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-keytab-file'],
-                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def refreshqueues(self, env):
     import params
 
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py
index 52338df..2e1b208 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py
@@ -45,6 +45,39 @@
   """
   import params
 
+  if config_dir is None:
+    config_dir = params.hadoop_conf_dir
+
+  if params.yarn_nodemanager_recovery_dir:
+    Directory(InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
+              owner=params.yarn_user,
+              group=params.user_group,
+              create_parents=True,
+              mode=0755,
+              cd_access='a',
+    )
+
+  Directory([params.yarn_pid_dir_prefix, params.yarn_pid_dir, params.yarn_log_dir],
+            owner=params.yarn_user,
+            group=params.user_group,
+            create_parents=True,
+            cd_access='a',
+  )
+  Directory([params.mapred_pid_dir_prefix, params.mapred_pid_dir, params.mapred_log_dir_prefix, params.mapred_log_dir],
+            owner=params.mapred_user,
+            group=params.user_group,
+            create_parents=True,
+            cd_access='a',
+  )
+  Directory([params.yarn_log_dir_prefix],
+            owner=params.yarn_user,
+            group=params.user_group,
+            create_parents=True,
+            ignore_failures=True,
+            cd_access='a',
+  )
+
+  # Some of these function calls depend on the directories above being created first.
   if name == 'resourcemanager':
     setup_resourcemanager()
   elif name == 'nodemanager':
@@ -53,40 +86,7 @@
     setup_ats()
   elif name == 'historyserver':
     setup_historyserver()
-
-  if config_dir is None:
-    config_dir = params.hadoop_conf_dir
-
-  if params.yarn_nodemanager_recovery_dir:
-    Directory(InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
-              owner=params.yarn_user,
-              group=params.user_group,
-              create_parents = True,
-              mode=0755,
-              cd_access = 'a',
-    )
-
-  Directory([params.yarn_pid_dir_prefix, params.yarn_pid_dir, params.yarn_log_dir],
-            owner=params.yarn_user,
-            group=params.user_group,
-            create_parents = True,
-            cd_access = 'a',
-  )
-
-  Directory([params.mapred_pid_dir_prefix, params.mapred_pid_dir, params.mapred_log_dir_prefix, params.mapred_log_dir],
-            owner=params.mapred_user,
-            group=params.user_group,
-            create_parents = True,
-            cd_access = 'a',
-  )
-  Directory([params.yarn_log_dir_prefix],
-            owner=params.yarn_user,
-            group=params.user_group,
-            create_parents = True,
-            ignore_failures=True,
-            cd_access = 'a',
-  )
-
+  
   XmlConfig("core-site.xml",
             conf_dir=config_dir,
             configurations=params.config['configurations']['core-site'],
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/input.config-mapreduce2.json.j2 b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/input.config-mapreduce2.json.j2
new file mode 100644
index 0000000..8034843
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/input.config-mapreduce2.json.j2
@@ -0,0 +1,48 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"mapred_historyserver",
+      "rowtype":"service",
+      "path":"{{default('/configurations/mapred-env/mapred_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/mapred-env/mapred_user', 'mapred')}}/mapred-{{default('configurations/mapred-env/mapred_user', 'mapred')}}-historyserver*.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "mapred_historyserver"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/input.config-yarn.json.j2 b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/input.config-yarn.json.j2
new file mode 100644
index 0000000..bf1dd37
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/input.config-yarn.json.j2
@@ -0,0 +1,72 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"yarn_nodemanager",
+      "rowtype":"service",
+      "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/yarn-{{default('configurations/yarn-env/yarn_user', 'yarn')}}-nodemanager-*.log"
+    },
+    {
+      "type":"yarn_resourcemanager",
+      "rowtype":"service",
+      "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/yarn-{{default('configurations/yarn-env/yarn_user', 'yarn')}}-resourcemanager-*.log"
+    },
+    {
+      "type":"yarn_timelineserver",
+      "rowtype":"service",
+      "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/yarn-{{default('configurations/yarn-env/yarn_user', 'yarn')}}-timelineserver-*.log"
+    },
+    {
+      "type":"yarn_historyserver",
+      "rowtype":"service",
+      "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/yarn-{{default('configurations/yarn-env/yarn_user', 'yarn')}}-historyserver-*.log"
+    },
+    {
+      "type":"yarn_jobsummary",
+      "rowtype":"service",
+      "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/hadoop-mapreduce.jobsummary.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "yarn_historyserver",
+            "yarn_jobsummary",
+            "yarn_nodemanager",
+            "yarn_resourcemanager",
+            "yarn_timelineserver"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-env.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-env.xml
index 5fb4732..6a52865 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-env.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-env.xml
@@ -161,131 +161,131 @@
     <display-name>yarn-env template</display-name>
     <description>This is the jinja template for yarn-env.sh file</description>
     <value>
-      export HADOOP_YARN_HOME={{hadoop_yarn_home}}
-      export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
-      export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
-      export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
-      export JAVA_HOME={{java64_home}}
-      export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}"
+export HADOOP_YARN_HOME={{hadoop_yarn_home}}
+export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
+export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+export JAVA_HOME={{java64_home}}
+export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}"
 
-      # We need to add the EWMA appender for the yarn daemons only;
-      # however, YARN_ROOT_LOGGER is shared by the yarn client and the
-      # daemons. This is restrict the EWMA appender to daemons only.
-      INVOKER="${0##*/}"
-      if [ "$INVOKER" == "yarn-daemon.sh" ]; then
-      export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,EWMA,RFA}
-      fi
+# We need to add the EWMA appender for the yarn daemons only;
+# however, YARN_ROOT_LOGGER is shared by the yarn client and the
+# daemons. This is restrict the EWMA appender to daemons only.
+INVOKER="${0##*/}"
+if [ "$INVOKER" == "yarn-daemon.sh" ]; then
+export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,EWMA,RFA}
+fi
 
-      # User for YARN daemons
-      export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
+# User for YARN daemons
+export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
 
-      # resolve links - $0 may be a softlink
-      export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
+# resolve links - $0 may be a softlink
+export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
 
-      # some Java parameters
-      # export JAVA_HOME=/home/y/libexec/jdk1.6.0/
-      if [ "$JAVA_HOME" != "" ]; then
-      #echo "run java in $JAVA_HOME"
-      JAVA_HOME=$JAVA_HOME
-      fi
+# some Java parameters
+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+if [ "$JAVA_HOME" != "" ]; then
+#echo "run java in $JAVA_HOME"
+JAVA_HOME=$JAVA_HOME
+fi
 
-      if [ "$JAVA_HOME" = "" ]; then
-      echo "Error: JAVA_HOME is not set."
-      exit 1
-      fi
+if [ "$JAVA_HOME" = "" ]; then
+echo "Error: JAVA_HOME is not set."
+exit 1
+fi
 
-      JAVA=$JAVA_HOME/bin/java
-      JAVA_HEAP_MAX=-Xmx1000m
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
 
-      # For setting YARN specific HEAP sizes please use this
-      # Parameter and set appropriately
-      YARN_HEAPSIZE={{yarn_heapsize}}
+# For setting YARN specific HEAP sizes please use this
+# Parameter and set appropriately
+YARN_HEAPSIZE={{yarn_heapsize}}
 
-      # check envvars which might override default args
-      if [ "$YARN_HEAPSIZE" != "" ]; then
-      JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
-      fi
+# check envvars which might override default args
+if [ "$YARN_HEAPSIZE" != "" ]; then
+JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+fi
 
-      # Resource Manager specific parameters
+# Resource Manager specific parameters
 
-      # Specify the max Heapsize for the ResourceManager using a numerical value
-      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-      # the value to 1000.
-      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
-      # and/or YARN_RESOURCEMANAGER_OPTS.
-      # If not specified, the default value will be picked from either YARN_HEAPMAX
-      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-      export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
+# Specify the max Heapsize for the ResourceManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_RESOURCEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
 
-      # Specify the JVM options to be used when starting the ResourceManager.
-      # These options will be appended to the options specified as YARN_OPTS
-      # and therefore may override any similar flags set in YARN_OPTS
-      #export YARN_RESOURCEMANAGER_OPTS=
+# Specify the JVM options to be used when starting the ResourceManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_RESOURCEMANAGER_OPTS=
 
-      # Node Manager specific parameters
+# Node Manager specific parameters
 
-      # Specify the max Heapsize for the NodeManager using a numerical value
-      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-      # the value to 1000.
-      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
-      # and/or YARN_NODEMANAGER_OPTS.
-      # If not specified, the default value will be picked from either YARN_HEAPMAX
-      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-      export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
+# Specify the max Heapsize for the NodeManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_NODEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
 
-      # Specify the max Heapsize for the timeline server using a numerical value
-      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-      # the value to 1024.
-      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
-      # and/or YARN_TIMELINESERVER_OPTS.
-      # If not specified, the default value will be picked from either YARN_HEAPMAX
-      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-      export YARN_TIMELINESERVER_HEAPSIZE={{apptimelineserver_heapsize}}
+# Specify the max Heapsize for the timeline server using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1024.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_TIMELINESERVER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_TIMELINESERVER_HEAPSIZE={{apptimelineserver_heapsize}}
 
-      # Specify the JVM options to be used when starting the NodeManager.
-      # These options will be appended to the options specified as YARN_OPTS
-      # and therefore may override any similar flags set in YARN_OPTS
-      #export YARN_NODEMANAGER_OPTS=
+# Specify the JVM options to be used when starting the NodeManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_NODEMANAGER_OPTS=
 
-      # so that filenames w/ spaces are handled correctly in loops below
-      IFS=
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
 
 
-      # default log directory and file
-      if [ "$YARN_LOG_DIR" = "" ]; then
-      YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
-      fi
-      if [ "$YARN_LOGFILE" = "" ]; then
-      YARN_LOGFILE='yarn.log'
-      fi
+# default log directory and file
+if [ "$YARN_LOG_DIR" = "" ]; then
+YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+fi
+if [ "$YARN_LOGFILE" = "" ]; then
+YARN_LOGFILE='yarn.log'
+fi
 
-      # default policy file for service-level authorization
-      if [ "$YARN_POLICYFILE" = "" ]; then
-      YARN_POLICYFILE="hadoop-policy.xml"
-      fi
+# default policy file for service-level authorization
+if [ "$YARN_POLICYFILE" = "" ]; then
+YARN_POLICYFILE="hadoop-policy.xml"
+fi
 
-      # restore ordinary behaviour
-      unset IFS
+# restore ordinary behaviour
+unset IFS
 
 
-      YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
-      YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
-      YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
-      YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
-      YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
-      YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
-      YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-      YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-      export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT"
-      export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Drm.audit.logger=INFO,RMAUDIT"
-      if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
-      YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
-      fi
-      YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
-      YARN_OPTS="$YARN_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}"
-      {% if rm_security_opts is defined %}
-      YARN_OPTS="{{rm_security_opts}} $YARN_OPTS"
-      {% endif %}
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
+YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
+YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT"
+export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Drm.audit.logger=INFO,RMAUDIT"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+fi
+YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
+YARN_OPTS="$YARN_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}"
+{% if rm_security_opts is defined %}
+YARN_OPTS="{{rm_security_opts}} $YARN_OPTS"
+{% endif %}
     </value>
     <value-attributes>
       <type>content</type>
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-log4j.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-log4j.xml
index a200e74..dab4516 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-log4j.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-log4j.xml
@@ -25,74 +25,74 @@
     <display-name>yarn-log4j template</display-name>
     <description>Custom log4j.properties</description>
     <value>
-      #Relative to Yarn Log Dir Prefix
-      yarn.log.dir=.
-      #
-      # Job Summary Appender
-      #
-      # Use following logger to send summary to separate file defined by
-      # hadoop.mapreduce.jobsummary.log.file rolled daily:
-      # hadoop.mapreduce.jobsummary.logger=INFO,JSA
-      #
-      hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-      hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-      log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
-      # Set the ResourceManager summary log filename
-      yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log
-      # Set the ResourceManager summary log level and appender
-      yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
-      #yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
+#Relative to Yarn Log Dir Prefix
+yarn.log.dir=.
+#
+# Job Summary Appender
+#
+# Use following logger to send summary to separate file defined by
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+#
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
+# Set the ResourceManager summary log filename
+yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log
+# Set the ResourceManager summary log level and appender
+yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
+#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
 
-      # To enable AppSummaryLogging for the RM,
-      # set yarn.server.resourcemanager.appsummary.logger to
-      # LEVEL,RMSUMMARY in hadoop-env.sh
+# To enable AppSummaryLogging for the RM,
+# set yarn.server.resourcemanager.appsummary.logger to
+# LEVEL,RMSUMMARY in hadoop-env.sh
 
-      # Appender for ResourceManager Application Summary Log
-      # Requires the following properties to be set
-      #    - hadoop.log.dir (Hadoop Log directory)
-      #    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
-      #    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
-      log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
-      log4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
-      log4j.appender.RMSUMMARY.MaxFileSize=256MB
-      log4j.appender.RMSUMMARY.MaxBackupIndex=20
-      log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
-      log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-      log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-      log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-      log4j.appender.JSA.DatePattern=.yyyy-MM-dd
-      log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-      log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
-      log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
+# Appender for ResourceManager Application Summary Log
+# Requires the following properties to be set
+#    - hadoop.log.dir (Hadoop Log directory)
+#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
+#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
+log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
+log4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
+log4j.appender.RMSUMMARY.MaxFileSize=256MB
+log4j.appender.RMSUMMARY.MaxBackupIndex=20
+log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
+log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+log4j.appender.JSA.DatePattern=.yyyy-MM-dd
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
+log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
 
-      # Appender for viewing information for errors and warnings
-      yarn.ewma.cleanupInterval=300
-      yarn.ewma.messageAgeLimitSeconds=86400
-      yarn.ewma.maxUniqueMessages=250
-      log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
-      log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
-      log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
-      log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
+# Appender for viewing information for errors and warnings
+yarn.ewma.cleanupInterval=300
+yarn.ewma.messageAgeLimitSeconds=86400
+yarn.ewma.maxUniqueMessages=250
+log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
+log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
+log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
+log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
 
-      # Audit logging for ResourceManager
-      rm.audit.logger=${hadoop.root.logger}
-      log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=${rm.audit.logger}
-      log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=false
-      log4j.appender.RMAUDIT=org.apache.log4j.DailyRollingFileAppender
-      log4j.appender.RMAUDIT.File=${yarn.log.dir}/rm-audit.log
-      log4j.appender.RMAUDIT.layout=org.apache.log4j.PatternLayout
-      log4j.appender.RMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-      log4j.appender.RMAUDIT.DatePattern=.yyyy-MM-dd
+# Audit logging for ResourceManager
+rm.audit.logger=${hadoop.root.logger}
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=${rm.audit.logger}
+log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=false
+log4j.appender.RMAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.RMAUDIT.File=${yarn.log.dir}/rm-audit.log
+log4j.appender.RMAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.RMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.RMAUDIT.DatePattern=.yyyy-MM-dd
 
-      # Audit logging for NodeManager
-      nm.audit.logger=${hadoop.root.logger}
-      log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=${nm.audit.logger}
-      log4j.additivity.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=false
-      log4j.appender.NMAUDIT=org.apache.log4j.DailyRollingFileAppender
-      log4j.appender.NMAUDIT.File=${yarn.log.dir}/nm-audit.log
-      log4j.appender.NMAUDIT.layout=org.apache.log4j.PatternLayout
-      log4j.appender.NMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-      log4j.appender.NMAUDIT.DatePattern=.yyyy-MM-dd
+# Audit logging for NodeManager
+nm.audit.logger=${hadoop.root.logger}
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=${nm.audit.logger}
+log4j.additivity.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=false
+log4j.appender.NMAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.NMAUDIT.File=${yarn.log.dir}/nm-audit.log
+log4j.appender.NMAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.NMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.NMAUDIT.DatePattern=.yyyy-MM-dd
     </value>
     <value-attributes>
       <type>content</type>
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
index 394fae3..64e0bcb 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
@@ -411,9 +411,10 @@
   </property>
 
   <!-- These configs were inherited from HDP 2.1 -->
+  <!-- TODO, temporarily disable timeline service since failing due to YARN-6534 -->
   <property>
     <name>yarn.timeline-service.enabled</name>
-    <value>true</value>
+    <value>false</value>
     <description>Indicate to clients whether timeline service is enabled or not.
       If enabled, clients will put entities and events to the timeline server.
     </description>
@@ -1033,11 +1034,9 @@
   </property>
 
   <!--ats v2.0 properties-->
-
-  <!-- TODO HDP 3.0, set version to 2.0 once ready. -->
   <property>
     <name>yarn.timeline-service.version</name>
-    <value>1.5</value>
+    <value>2.0</value>
     <description>Timeline service version we&#x2019;re currently using.</description>
     <on-ambari-upgrade add="false"/>
   </property>
@@ -1177,4 +1176,16 @@
     </value-attributes>
     <on-ambari-upgrade add="false"/>
   </property>
+  <property>
+    <name>yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round</name>
+    <value>0.1</value>
+    <description>This option controls the pace at which containers-marked-for-preemption are actually preempted in each period.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.monitor.capacity.preemption.natural_termination_factor</name>
+    <value>1</value>
+    <description>Similar to total_preemption_per_round, we can apply this factor to slowdown resource preemption after preemption-target is computed for each queue.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
 </configuration>
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
index 18186bd..a802795 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
@@ -267,8 +267,7 @@
             <enabled>true</enabled>
             <co-locate>YARN/RESOURCEMANAGER</co-locate>
           </auto-deploy>
-
-          <!-- TODO HDP 3.0, add later after UI is fixed,
+          
           <dependencies>
             <dependency>
               <name>HDFS/HDFS_CLIENT</name>
@@ -292,7 +291,6 @@
               </auto-deploy>
             </dependency>
           </dependencies>
-          -->
 
           <commandScript>
             <script>scripts/historyserver.py</script>
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/application_timeline_server.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/application_timeline_server.py
index 03fff21..b1e0c16 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/application_timeline_server.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/application_timeline_server.py
@@ -83,67 +83,6 @@
     env.set_params(status_params)
     check_process_status(status_params.yarn_historyserver_pid_file)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"yarn.timeline-service.enabled": "true",
-                           "yarn.timeline-service.http-authentication.type": "kerberos",
-                           "yarn.acl.enable": "true"}
-      props_empty_check = ["yarn.timeline-service.principal",
-                           "yarn.timeline-service.keytab",
-                           "yarn.timeline-service.http-authentication.kerberos.principal",
-                           "yarn.timeline-service.http-authentication.kerberos.keytab"]
-
-      props_read_check = ["yarn.timeline-service.keytab",
-                          "yarn.timeline-service.http-authentication.kerberos.keytab"]
-      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
-                                                  props_read_check)
-
-      yarn_expectations ={}
-      yarn_expectations.update(yarn_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'yarn-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, yarn_expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'yarn-site' not in security_params
-               or 'yarn.timeline-service.keytab' not in security_params['yarn-site']
-               or 'yarn.timeline-service.principal' not in security_params['yarn-site']) \
-            or 'yarn.timeline-service.http-authentication.kerberos.keytab' not in security_params['yarn-site'] \
-            or 'yarn.timeline-service.http-authentication.kerberos.principal' not in security_params['yarn-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.timeline-service.keytab'],
-                                security_params['yarn-site']['yarn.timeline-service.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.timeline-service.http-authentication.kerberos.keytab'],
-                                security_params['yarn-site']['yarn.timeline-service.http-authentication.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
     return params.yarn_log_dir
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/historyserver.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/historyserver.py
index 8f5d380..d886244 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/historyserver.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/historyserver.py
@@ -120,62 +120,6 @@
     env.set_params(status_params)
     check_process_status(status_params.mapred_historyserver_pid_file)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      expectations = {}
-      expectations.update(build_expectations('mapred-site',
-                                             None,
-                                             [
-                                               'mapreduce.jobhistory.keytab',
-                                               'mapreduce.jobhistory.principal',
-                                               'mapreduce.jobhistory.webapp.spnego-keytab-file',
-                                               'mapreduce.jobhistory.webapp.spnego-principal'
-                                             ],
-                                             None))
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'mapred-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'mapred-site' not in security_params or
-               'mapreduce.jobhistory.keytab' not in security_params['mapred-site'] or
-               'mapreduce.jobhistory.principal' not in security_params['mapred-site'] or
-               'mapreduce.jobhistory.webapp.spnego-keytab-file' not in security_params['mapred-site'] or
-               'mapreduce.jobhistory.webapp.spnego-principal' not in security_params['mapred-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal not set."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.mapred_user,
-                                security_params['mapred-site']['mapreduce.jobhistory.keytab'],
-                                security_params['mapred-site']['mapreduce.jobhistory.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.mapred_user,
-                                security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-keytab-file'],
-                                security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
     return params.mapred_log_dir
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/nodemanager.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/nodemanager.py
index 133d2e1..5acb20b 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/nodemanager.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/nodemanager.py
@@ -90,66 +90,6 @@
     env.set_params(status_params)
     check_process_status(status_params.nodemanager_pid_file)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
-                           "yarn.acl.enable": "true"}
-      props_empty_check = ["yarn.nodemanager.principal",
-                           "yarn.nodemanager.keytab",
-                           "yarn.nodemanager.webapp.spnego-principal",
-                           "yarn.nodemanager.webapp.spnego-keytab-file"]
-
-      props_read_check = ["yarn.nodemanager.keytab",
-                          "yarn.nodemanager.webapp.spnego-keytab-file"]
-      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
-                                           props_read_check)
-
-      yarn_expectations ={}
-      yarn_expectations.update(yarn_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'yarn-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, yarn_site_props)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'yarn-site' not in security_params
-               or 'yarn.nodemanager.keytab' not in security_params['yarn-site']
-               or 'yarn.nodemanager.principal' not in security_params['yarn-site']) \
-            or 'yarn.nodemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
-            or 'yarn.nodemanager.webapp.spnego-principal' not in security_params['yarn-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.nodemanager.keytab'],
-                                security_params['yarn-site']['yarn.nodemanager.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.nodemanager.webapp.spnego-keytab-file'],
-                                security_params['yarn-site']['yarn.nodemanager.webapp.spnego-principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
     return params.yarn_log_dir
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
index ba748f1..78675bf 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
@@ -147,66 +147,6 @@
     check_process_status(status_params.resourcemanager_pid_file)
     pass
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
-                           "yarn.acl.enable": "true"}
-      props_empty_check = ["yarn.resourcemanager.principal",
-                           "yarn.resourcemanager.keytab",
-                           "yarn.resourcemanager.webapp.spnego-principal",
-                           "yarn.resourcemanager.webapp.spnego-keytab-file"]
-
-      props_read_check = ["yarn.resourcemanager.keytab",
-                          "yarn.resourcemanager.webapp.spnego-keytab-file"]
-      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
-                                           props_read_check)
-
-      yarn_expectations ={}
-      yarn_expectations.update(yarn_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'yarn-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, yarn_site_props)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'yarn-site' not in security_params
-               or 'yarn.resourcemanager.keytab' not in security_params['yarn-site']
-               or 'yarn.resourcemanager.principal' not in security_params['yarn-site']) \
-            or 'yarn.resourcemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
-            or 'yarn.resourcemanager.webapp.spnego-principal' not in security_params['yarn-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.resourcemanager.keytab'],
-                                security_params['yarn-site']['yarn.resourcemanager.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-keytab-file'],
-                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def refreshqueues(self, env):
     import params
 
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn.py
index d601f8f..c6b4c18 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/yarn.py
@@ -45,6 +45,40 @@
   """
   import params
 
+  if config_dir is None:
+    config_dir = params.hadoop_conf_dir
+
+  if params.yarn_nodemanager_recovery_dir:
+    Directory(InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
+              owner=params.yarn_user,
+              group=params.user_group,
+              create_parents=True,
+              mode=0755,
+              cd_access='a',
+    )
+
+  Directory([params.yarn_pid_dir_prefix, params.yarn_pid_dir, params.yarn_log_dir],
+            owner=params.yarn_user,
+            group=params.user_group,
+            create_parents=True,
+            cd_access='a',
+  )
+
+  Directory([params.mapred_pid_dir_prefix, params.mapred_pid_dir, params.mapred_log_dir_prefix, params.mapred_log_dir],
+            owner=params.mapred_user,
+            group=params.user_group,
+            create_parents=True,
+            cd_access='a',
+  )
+  Directory([params.yarn_log_dir_prefix],
+            owner=params.yarn_user,
+            group=params.user_group,
+            create_parents=True,
+            ignore_failures=True,
+            cd_access='a',
+  )
+
+  # Some of these function calls depend on the directories above being created first.
   if name == 'resourcemanager':
     setup_resourcemanager()
   elif name == 'nodemanager':
@@ -54,39 +88,6 @@
   elif name == 'historyserver':
     setup_historyserver()
 
-  if config_dir is None:
-    config_dir = params.hadoop_conf_dir
-
-  if params.yarn_nodemanager_recovery_dir:
-    Directory(InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
-              owner=params.yarn_user,
-              group=params.user_group,
-              create_parents = True,
-              mode=0755,
-              cd_access = 'a',
-    )
-
-  Directory([params.yarn_pid_dir_prefix, params.yarn_pid_dir, params.yarn_log_dir],
-            owner=params.yarn_user,
-            group=params.user_group,
-            create_parents = True,
-            cd_access = 'a',
-  )
-
-  Directory([params.mapred_pid_dir_prefix, params.mapred_pid_dir, params.mapred_log_dir_prefix, params.mapred_log_dir],
-            owner=params.mapred_user,
-            group=params.user_group,
-            create_parents = True,
-            cd_access = 'a',
-  )
-  Directory([params.yarn_log_dir_prefix],
-            owner=params.yarn_user,
-            group=params.user_group,
-            create_parents = True,
-            ignore_failures=True,
-            cd_access = 'a',
-  )
-
   XmlConfig("core-site.xml",
             conf_dir=config_dir,
             configurations=params.config['configurations']['core-site'],
@@ -377,6 +378,7 @@
        owner=params.yarn_user,
        group=params.user_group
   )
+  # This depends on the parent directory already existing.
   File(params.yarn_job_summary_log,
      owner=params.yarn_user,
      group=params.user_group
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
index fc32001..3dd39e5 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
@@ -420,6 +420,7 @@
 
   def recommendYARNConfigurationsFromHDP26(self, configurations, clusterData, services, hosts):
     putYarnSiteProperty = self.putProperty(configurations, "yarn-site", services)
+    putYarnEnvProperty = self.putProperty(configurations, "yarn-env", services)
 
     if "yarn-site" in services["configurations"] and \
                     "yarn.resourcemanager.scheduler.monitor.enable" in services["configurations"]["yarn-site"]["properties"]:
@@ -429,6 +430,10 @@
       else:
         putYarnSiteProperty('yarn.scheduler.capacity.ordering-policy.priority-utilization.underutilized-preemption.enabled', "false")
 
+    # calculate total_preemption_per_round
+    total_preemption_per_round = str(round(max(float(1)/len(hosts['items']), 0.1),2))
+    putYarnSiteProperty('yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round', total_preemption_per_round)
+
     if 'yarn-env' in services['configurations'] and 'yarn_user' in services['configurations']['yarn-env']['properties']:
       yarn_user = services['configurations']['yarn-env']['properties']['yarn_user']
     else:
@@ -466,6 +471,81 @@
     else:
       self.logger.info("Not setting Yarn Repo user for Ranger.")
 
+    yarn_timeline_app_cache_size = None
+    host_mem = None
+    for host in hosts["items"]:
+      host_mem = host["Hosts"]["total_mem"]
+      break
+    # Check if 'yarn.timeline-service.entity-group-fs-store.app-cache-size' in changed configs.
+    changed_configs_has_ats_cache_size = self.isConfigPropertiesChanged(
+      services, "yarn-site", ['yarn.timeline-service.entity-group-fs-store.app-cache-size'], False)
+    # Check if it's : 1. 'apptimelineserver_heapsize' changed detected in changed-configurations)
+    # OR 2. cluster initialization (services['changed-configurations'] should be empty in this case)
+    if changed_configs_has_ats_cache_size:
+      yarn_timeline_app_cache_size = self.read_yarn_apptimelineserver_cache_size(services)
+    elif 0 == len(services['changed-configurations']):
+      # Fetch host memory from 1st host, to be used for ATS config calculations below.
+      if host_mem is not None:
+        yarn_timeline_app_cache_size = self.calculate_yarn_apptimelineserver_cache_size(host_mem)
+        putYarnSiteProperty('yarn.timeline-service.entity-group-fs-store.app-cache-size', yarn_timeline_app_cache_size)
+        self.logger.info("Updated YARN config 'yarn.timeline-service.entity-group-fs-store.app-cache-size' as : {0}, "
+                         "using 'host_mem' = {1}".format(yarn_timeline_app_cache_size, host_mem))
+      else:
+        self.logger.info("Couldn't update YARN config 'yarn.timeline-service.entity-group-fs-store.app-cache-size' as "
+                         "'host_mem' read = {0}".format(host_mem))
+
+    if yarn_timeline_app_cache_size is not None:
+      # Calculation for 'ats_heapsize' is in MB.
+      ats_heapsize = self.calculate_yarn_apptimelineserver_heapsize(host_mem, yarn_timeline_app_cache_size)
+      putYarnEnvProperty('apptimelineserver_heapsize', ats_heapsize) # Value in MB
+      self.logger.info("Updated YARN config 'apptimelineserver_heapsize' as : {0}, ".format(ats_heapsize))
+
+  """
+  Calculate YARN config 'apptimelineserver_heapsize' in MB.
+  """
+  def calculate_yarn_apptimelineserver_heapsize(self, host_mem, yarn_timeline_app_cache_size):
+    ats_heapsize = None
+    if host_mem < 4096:
+      ats_heapsize = 1024
+    else:
+      ats_heapsize = long(min(math.floor(host_mem/2), long(yarn_timeline_app_cache_size) * 500 + 3072))
+    return ats_heapsize
+
+  """
+  Calculates for YARN config 'yarn.timeline-service.entity-group-fs-store.app-cache-size', based on YARN's NodeManager size.
+  """
+  def calculate_yarn_apptimelineserver_cache_size(self, host_mem):
+    yarn_timeline_app_cache_size = None
+    if host_mem < 4096:
+      yarn_timeline_app_cache_size = 3
+    elif host_mem >= 4096 and host_mem < 8192:
+      yarn_timeline_app_cache_size = 7
+    elif host_mem >= 8192:
+      yarn_timeline_app_cache_size = 10
+    self.logger.info("Calculated and returning 'yarn_timeline_app_cache_size' : {0}".format(yarn_timeline_app_cache_size))
+    return yarn_timeline_app_cache_size
+
+
+  """
+  Reads YARN config 'yarn.timeline-service.entity-group-fs-store.app-cache-size'.
+  """
+  def read_yarn_apptimelineserver_cache_size(self, services):
+    """
+    :type services dict
+    :rtype str
+    """
+    yarn_ats_app_cache_size = None
+    yarn_ats_app_cache_size_config = "yarn.timeline-service.entity-group-fs-store.app-cache-size"
+    yarn_site_in_services = self.getServicesSiteProperties(services, "yarn-site")
+
+    if yarn_site_in_services and yarn_ats_app_cache_size_config in yarn_site_in_services:
+      yarn_ats_app_cache_size = yarn_site_in_services[yarn_ats_app_cache_size_config]
+      self.logger.info("'yarn.scheduler.minimum-allocation-mb' read from services as : {0}".format(yarn_ats_app_cache_size))
+
+    if not yarn_ats_app_cache_size:
+      self.logger.error("'{0}' was not found in the services".format(yarn_ats_app_cache_size_config))
+
+    return yarn_ats_app_cache_size
 
   #region LLAP
   def updateLlapConfigs(self, configurations, services, hosts, llap_queue_name):
@@ -829,6 +909,7 @@
                 "{2}".format(llap_mem_daemon_size, llap_mem_for_tezAm_and_daemons, tez_am_memory_required))
 
     llap_daemon_mem_per_node = self._normalizeDown(llap_mem_daemon_size / num_llap_nodes_requested, yarn_min_container_size)
+    # This value takes into account total cluster capacity, and may not have left enough capcaity on each node to launch an AM.
     self.logger.info("DBG: Calculated 'llap_daemon_mem_per_node' : {0}, using following : llap_mem_daemon_size : {1}, num_llap_nodes_requested : {2}, "
                 "yarn_min_container_size: {3}".format(llap_daemon_mem_per_node, llap_mem_daemon_size, num_llap_nodes_requested, yarn_min_container_size))
     if llap_daemon_mem_per_node == 0:
@@ -848,6 +929,31 @@
       num_llap_nodes = num_llap_nodes_requested
       self.logger.info("DBG: num_llap_nodes : {0}".format(num_llap_nodes))
 
+    # Make sure we have enough memory on each node to run AMs.
+    # If nodes vs nodes_requested is different - AM memory is already factored in.
+    # If llap_node_count < total_cluster_nodes - assuming AMs can run on a different node.
+    # Else factor in min_concurrency_per_node * tez_am_size, and slider_am_size
+    # Also needs to factor in whether num_llap_nodes = cluster_node_count
+    min_mem_reserved_per_node = 0
+    if num_llap_nodes == num_llap_nodes_requested and num_llap_nodes == node_manager_cnt:
+      min_mem_reserved_per_node = max(normalized_tez_am_container_size, slider_am_container_size)
+      tez_AMs_per_node = llap_concurrency / num_llap_nodes
+      tez_AMs_per_node_low = int(math.floor(tez_AMs_per_node))
+      tez_AMs_per_node_high = int(math.ceil(tez_AMs_per_node))
+      min_mem_reserved_per_node = int(max(tez_AMs_per_node_high * normalized_tez_am_container_size, tez_AMs_per_node_low * normalized_tez_am_container_size + slider_am_container_size))
+      self.logger.info("DBG: Determined 'AM reservation per node': {0}, using following : concurrency: {1}, num_llap_nodes: {2}, AMsPerNode: {3}"
+                  .format(min_mem_reserved_per_node, llap_concurrency, num_llap_nodes,  tez_AMs_per_node))
+
+    max_single_node_mem_available_for_daemon = self._normalizeDown(yarn_nm_mem_in_mb_normalized - min_mem_reserved_per_node, yarn_min_container_size)
+    if max_single_node_mem_available_for_daemon <=0 or max_single_node_mem_available_for_daemon < mem_per_thread_for_llap:
+      self.logger.warning("Not enough capacity available per node for daemons after factoring in AM memory requirements. NM Mem: {0}, "
+                     "minAMMemPerNode: {1}, available: {2}".format(yarn_nm_mem_in_mb_normalized, min_mem_reserved_per_node, max_single_node_mem_available_for_daemon))
+      self.recommendDefaultLlapConfiguration(configurations, services, hosts)
+
+    llap_daemon_mem_per_node = min(max_single_node_mem_available_for_daemon, llap_daemon_mem_per_node)
+    self.logger.info("DBG: Determined final memPerDaemon: {0}, using following: concurrency: {1}, numNMNodes: {2}, numLlapNodes: {3} "
+                .format(llap_daemon_mem_per_node, llap_concurrency, node_manager_cnt, num_llap_nodes))
+
     num_executors_per_node_max = self.get_max_executors_per_node(yarn_nm_mem_in_mb_normalized, cpu_per_nm_host, mem_per_thread_for_llap)
     if num_executors_per_node_max < 1:
       self.logger.warning("Calculated 'Max. Executors per Node' = {0}. Expected values >= 1.".format(num_executors_per_node_max))
@@ -868,6 +974,8 @@
     # Now figure out how much of the memory will be used by the executors, and how much will be used by the cache.
     total_mem_for_executors_per_node = num_executors_per_node * mem_per_thread_for_llap
     cache_mem_per_node = llap_daemon_mem_per_node - total_mem_for_executors_per_node
+    self.logger.info("DBG: Calculated 'Cache per node' : {0}, using following : llap_daemon_mem_per_node : {1}, total_mem_for_executors_per_node : {2}"
+            .format(cache_mem_per_node, llap_daemon_mem_per_node, total_mem_for_executors_per_node))
 
     tez_runtime_io_sort_mb = (long((0.8 * mem_per_thread_for_llap) / 3))
     tez_runtime_unordered_output_buffer_size = long(0.8 * 0.075 * mem_per_thread_for_llap)
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-logsearch-conf.xml
deleted file mode 100644
index 0b8ab7a..0000000
--- a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-logsearch-conf.xml
+++ /dev/null
@@ -1,80 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>Zeppelin</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>ZEPPELIN_MASTER:zeppelin</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder Config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-  "input":[
-    {
-      "type":"zeppelin",
-      "rowtype":"service",
-      "path":"{{default('/configurations/zeppelin-env/zeppelin_log_dir', '/var/log/zeppelin')}}/zeppelin-zeppelin-*.log"
-    }
-   ],
-  "filter":[
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "zeppelin"
-          ]
-         }
-       },
-      "log4j_format":"",
-      "multiline_pattern":"^(%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{TIMESTAMP_ISO8601:logtime}\\])",
-      "message_pattern":"(?m)^%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{TIMESTAMP_ISO8601:logtime}\\]%{SPACE}\\(\\{{"{"}}%{DATA:thread_name}\\{{"}"}}%{SPACE}%{JAVAFILE:file}\\[%{JAVAMETHOD:method}\\]:%{INT:line_number}\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       }
-     }
-   ]
- }
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
index fbf2246..c2f81639 100644
--- a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
@@ -295,6 +295,12 @@
           interpreter['properties']['zeppelin.jdbc.auth.type'] = "KERBEROS"
           interpreter['properties']['zeppelin.jdbc.principal'] = params.zeppelin_kerberos_principal
           interpreter['properties']['zeppelin.jdbc.keytab.location'] = params.zeppelin_kerberos_keytab
+          if params.zookeeper_znode_parent \
+              and params.hbase_zookeeper_quorum \
+              and params.zookeeper_znode_parent not in interpreter['properties']['phoenix.url']:
+            interpreter['properties']['phoenix.url'] = "jdbc:phoenix:" + \
+                                                       params.hbase_zookeeper_quorum + ':' + \
+                                                       params.zookeeper_znode_parent
         else:
           interpreter['properties']['zeppelin.jdbc.auth.type'] = ""
           interpreter['properties']['zeppelin.jdbc.principal'] = ""
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/templates/input.config-zeppelin.json.j2 b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/templates/input.config-zeppelin.json.j2
new file mode 100644
index 0000000..2b373d5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/templates/input.config-zeppelin.json.j2
@@ -0,0 +1,48 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"zeppelin",
+      "rowtype":"service",
+      "path":"{{default('/configurations/zeppelin-env/zeppelin_log_dir', '/var/log/zeppelin')}}/zeppelin-zeppelin-*.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "zeppelin"
+          ]
+        }
+      },
+      "log4j_format":"",
+      "multiline_pattern":"^(%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{TIMESTAMP_ISO8601:logtime}\\])",
+      "message_pattern":"(?m)^%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{TIMESTAMP_ISO8601:logtime}\\]%{SPACE}\\(\\{{"{"}}%{DATA:thread_name}\\{{"}"}}%{SPACE}%{JAVAFILE:file}\\[%{JAVAMETHOD:method}\\]:%{INT:line_number}\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/configuration/zookeeper-log4j.xml b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/configuration/zookeeper-log4j.xml
index 76dff64..ff9138e 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/configuration/zookeeper-log4j.xml
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/configuration/zookeeper-log4j.xml
@@ -30,7 +30,7 @@
     </value-attributes>
     <on-ambari-upgrade add="false"/>
   </property>
-<property>
+  <property>
     <name>zookeeper_log_number_of_backup_files</name>
     <value>10</value>
     <description>The number of backup files</description>
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/configuration/zookeeper-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/configuration/zookeeper-logsearch-conf.xml
deleted file mode 100644
index 325af14..0000000
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/configuration/zookeeper-logsearch-conf.xml
+++ /dev/null
@@ -1,76 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>Zookeeper</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>ZOOKEEPER_SERVER:zookeeper</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder Config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-  "input":[
-    {
-     "type":"zookeeper",
-     "rowtype":"service",
-     "path":"{{default('/configurations/zookeeper-env/zk_log_dir', '/var/log/zookeeper')}}/zookeeper*.log"
-    }
-  ],
-  "filter":[
-   {
-      "filter":"grok",
-      "conditions":{
-        "fields":{"type":["zookeeper"]}
-      },
-     "log4j_format":"%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n",
-     "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-     "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}-%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{DATA:thread_name}\\@%{INT:line_number}\\]%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
-     "post_map_values": {
-       "logtime": {
-         "map_date":{
-           "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-         }
-       }
-     }
-    }
-   ]
-}
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/zookeeper_server.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/zookeeper_server.py
index c2d76be..8d9de9e 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/zookeeper_server.py
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/scripts/zookeeper_server.py
@@ -111,57 +111,6 @@
     import status_params
     env.set_params(status_params)
     check_process_status(status_params.zk_pid_file)
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-
-    if status_params.security_enabled:
-      # Expect the following files to be available in params.config_dir:
-      #   zookeeper_jaas.conf
-      #   zookeeper_client_jaas.conf
-      try:
-        props_value_check = None
-        props_empty_check = ['Server/keyTab', 'Server/principal']
-        props_read_check = ['Server/keyTab']
-        zk_env_expectations = build_expectations('zookeeper_jaas', props_value_check, props_empty_check,
-                                                 props_read_check)
-
-        zk_expectations = {}
-        zk_expectations.update(zk_env_expectations)
-
-        security_params = get_params_from_filesystem(status_params.config_dir,
-                                                   {'zookeeper_jaas.conf': FILE_TYPE_JAAS_CONF})
-
-        result_issues = validate_security_config_properties(security_params, zk_expectations)
-        if not result_issues:  # If all validations passed successfully
-          # Double check the dict before calling execute
-          if ( 'zookeeper_jaas' not in security_params
-               or 'Server' not in security_params['zookeeper_jaas']
-               or 'keyTab' not in security_params['zookeeper_jaas']['Server']
-               or 'principal' not in security_params['zookeeper_jaas']['Server']):
-            self.put_structured_out({"securityState": "ERROR"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.zk_user,
-                                security_params['zookeeper_jaas']['Server']['keyTab'],
-                                security_params['zookeeper_jaas']['Server']['principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        else:
-          issues = []
-          for cf in result_issues:
-            issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-          self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-          self.put_structured_out({"securityState": "UNSECURED"})
-      except Exception as e:
-        self.put_structured_out({"securityState": "ERROR"})
-        self.put_structured_out({"securityStateErrorInfo": str(e)})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
       
   def get_log_folder(self):
     import params
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/templates/input.config-zookeeper.json.j2 b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/templates/input.config-zookeeper.json.j2
new file mode 100644
index 0000000..cc20847
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/package/templates/input.config-zookeeper.json.j2
@@ -0,0 +1,46 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"zookeeper",
+      "rowtype":"service",
+      "path":"{{default('/configurations/zookeeper-env/zk_log_dir', '/var/log/zookeeper')}}/zookeeper*.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":["zookeeper"]
+        }
+      },
+      "log4j_format":"%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}-%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{DATA:thread_name}\\@%{INT:line_number}\\]%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values": {
+        "logtime": {
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py b/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
index 112abe3..33adce1 100644
--- a/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
+++ b/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
@@ -127,7 +127,7 @@
     Logger.info("Will install packages for repository version {0}".format(self.repository_version))
 
     if 0 == len(base_urls):
-      Logger.info("Repository list is empty. Ambari may not be managing the repositories for {0}.".format(self.repository_version))
+      Logger.warning("Repository list is empty. Ambari may not be managing the repositories for {0}.".format(self.repository_version))
 
     try:
       append_to_file = False
diff --git a/ambari-server/src/main/resources/custom_actions/scripts/ru_execute_tasks.py b/ambari-server/src/main/resources/custom_actions/scripts/ru_execute_tasks.py
index 4dec16f..c0f0d41 100644
--- a/ambari-server/src/main/resources/custom_actions/scripts/ru_execute_tasks.py
+++ b/ambari-server/src/main/resources/custom_actions/scripts/ru_execute_tasks.py
@@ -23,6 +23,7 @@
 import os
 import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
 import socket
+import traceback
 
 from resource_management.libraries.script import Script
 from resource_management.libraries.functions.default import default
@@ -79,6 +80,7 @@
     else:
       raise Exception("No config found at %s" % str(config_path))
   except Exception, err:
+    traceback.print_exc()
     Logger.warning(err)
 
 
diff --git a/ambari-server/src/main/resources/properties.json b/ambari-server/src/main/resources/properties.json
index c2545fe..e0f81e6 100644
--- a/ambari-server/src/main/resources/properties.json
+++ b/ambari-server/src/main/resources/properties.json
@@ -59,8 +59,8 @@
         "ServiceComponentInfo/cluster_name",
         "ServiceComponentInfo/display_name",
         "ServiceComponentInfo/state",
-        "ServiceComponents/display_name",
-        "ServiceComponents/description",
+        "ServiceComponentInfo/display_name",
+        "ServiceComponentInfo/description",
         "ServiceComponentInfo/category",
         "ServiceComponentInfo/total_count",
         "ServiceComponentInfo/started_count",
@@ -69,6 +69,8 @@
         "ServiceComponentInfo/init_count",
         "ServiceComponentInfo/unknown_count",
         "ServiceComponentInfo/recovery_enabled",
+        "ServiceComponentInfo/desired_version",
+        "ServiceComponentInfo/repository_state",
         "params/run_smoke_test",
         "_"
     ],
@@ -135,6 +137,9 @@
         "Requests/queued_task_count",
         "Requests/progress_percent",
         "Requests/abort_reason",
+        "Requests/remove_pending_host_requests",
+        "Requests/pending_host_request_count",
+        "Requests/cluster_host_info",
         "_"
     ],
     "RequestSchedule" : [
@@ -167,6 +172,7 @@
         "Tasks/attempt_cnt",
         "Tasks/custom_command_name",
         "Tasks/command_detail",
+        "Tasks/ops_display_name",
         "_"
     ],
     "User":[
diff --git a/ambari-server/src/main/resources/scripts/check_ambari_permissions.py b/ambari-server/src/main/resources/scripts/check_ambari_permissions.py
index 638f65f..21b8a0a 100644
--- a/ambari-server/src/main/resources/scripts/check_ambari_permissions.py
+++ b/ambari-server/src/main/resources/scripts/check_ambari_permissions.py
@@ -30,23 +30,26 @@
 SECURE_FILE_PERMISSIONS = 700
 
 # List of directories with jar files or path to jar file. If "directory", then we will check all jar files in it and in all subdirectories. If jar "file" then we will check only this file.
-jar_files_to_check = ["/var/lib/ambari-server/", "/usr/lib/ambari-server/"]
+jar_files_to_check = ["/var/lib/ambari-server/", "/usr/lib/ambari-server/", "/var/lib/ambari-agent/"]
 
 # List of directories. For this list we are only checking permissions for directory.
 directories_to_check = ["/etc/ambari-server/conf", "/usr/lib/ambari-server", "/usr/lib/python2.6/site-packages/ambari_server",
-                        "/var/lib/ambari-server"]
+                        "/var/lib/ambari-server", "/usr/lib/ambari-agent", "/usr/lib/python2.6/site-packages/ambari_agent",
+                        "/var/lib/ambari-agent/cache", "/var/lib/ambari-agent/cred", "/var/lib/ambari-agent/data",
+                        "/var/lib/ambari-agent/tools", "/var/lib/ambari-agent/lib", "/etc/ambari-agent/conf"]
 
 # List of directories/files. If "directory", then we will check all files in it and in all subdirectories. If "file" then we will check only this file.
 files_to_check = ["/etc/ambari-server/conf/", "/etc/init/ambari-server.conf", "/etc/init.d/ambari-server",
                   "/usr/lib/ambari-server", "/usr/lib/python2.6/site-packages/ambari_server", "/usr/sbin/ambari_server_main.py",
-                  "/usr/sbin/ambari-server.py", "/var/lib/ambari-server"]
+                  "/usr/sbin/ambari-server.py", "/var/lib/ambari-server", "/usr/lib/ambari-agent",
+                  "/usr/lib/python2.6/site-packages/ambari_agent", "/var/lib/ambari-agent"]
 
 
 # List of secure directories. For this list we are only checking permissions for directory.
-secure_directories_to_check = ["/var/lib/ambari-server/keys"]
+secure_directories_to_check = ["/var/lib/ambari-server/keys","/var/lib/ambari-agent/keys"]
 
 # List of secure directories/files. If "directory", then we will check all files in it and in all subdirectories. If "file" then we will check only this file.
-secure_files_to_check = ["/var/lib/ambari-server/keys"]
+secure_files_to_check = ["/var/lib/ambari-server/keys", "/var/lib/ambari-agent/keys"]
 
 
 
@@ -146,9 +149,10 @@
 
 def do_work(args):
   print "\n*****Check file, or files in directory for valid permissions (without w for group and other)*****"
+  files_with_wrong_permissions = []
   for path in files_to_check:
     path = os.path.join(args.ambari_root_dir, path.lstrip('/'))
-    files_with_wrong_permissions = check_files_in_directory_or_file_for_permissions(path, "/g=w,o=w")
+    files_with_wrong_permissions = files_with_wrong_permissions + check_files_in_directory_or_file_for_permissions(path, "/g=w,o=w")
 
   if files_with_wrong_permissions:
     print "\nFiles with wrong permissions:"
@@ -186,9 +190,10 @@
 
 
   print "\n*****Check directories for valid permissions (without w for group and other)*****"
+  directories_with_wrong_permissions = []
   for dir_path in directories_to_check:
     dir_path = os.path.join(args.ambari_root_dir, dir_path.lstrip('/'))
-    directories_with_wrong_permissions = check_directory_permissions(dir_path, "/g=w,o=w")
+    directories_with_wrong_permissions = directories_with_wrong_permissions + check_directory_permissions(dir_path, "/g=w,o=w")
 
   if directories_with_wrong_permissions:
     print "\nDirectories with wrong permissions:"
@@ -196,9 +201,10 @@
     update_permissions(directories_with_wrong_permissions, DIRECTORY_PERMISSIONS, "Fix permissions for directories to " + str(DIRECTORY_PERMISSIONS) + " (recommended) ")
 
   print "\n*****Check secure directories for valid permissions (without r+w+x for group and other)*****"
+  secure_directories_with_wrong_permissions = []
   for dir_path in secure_directories_to_check:
     dir_path = os.path.join(args.ambari_root_dir, dir_path.lstrip('/'))
-    secure_directories_with_wrong_permissions = check_directory_permissions(dir_path, "/g=r+w+x,o=r+w+x")
+    secure_directories_with_wrong_permissions = secure_directories_with_wrong_permissions + check_directory_permissions(dir_path, "/g=r+w+x,o=r+w+x")
 
   if secure_directories_with_wrong_permissions:
     print "\nSecure directories with wrong permissions:"
@@ -206,9 +212,10 @@
     update_permissions(secure_directories_with_wrong_permissions, SECURE_DIRECTORY_PERMISSIONS, "Fix permissions for secure directories to " + str(SECURE_DIRECTORY_PERMISSIONS) + " (recommended) ")
 
   print "\n*****Check secure file, or files in directory for valid permissions (without r+w+x for group and other)*****"
+  secure_files_with_wrong_permissions = []
   for path in secure_files_to_check:
     path = os.path.join(args.ambari_root_dir, path.lstrip('/'))
-    secure_files_with_wrong_permissions = check_files_in_directory_or_file_for_permissions(path, "/g=r+w+x,o=r+w+x")
+    secure_files_with_wrong_permissions = secure_files_with_wrong_permissions + check_files_in_directory_or_file_for_permissions(path, "/g=r+w+x,o=r+w+x")
 
   if secure_files_with_wrong_permissions:
     print "\nSecure files with wrong permissions:"
diff --git a/ambari-server/src/main/resources/scripts/relocate_host_components.py b/ambari-server/src/main/resources/scripts/relocate_host_components.py
deleted file mode 100644
index 1b9ad1e..0000000
--- a/ambari-server/src/main/resources/scripts/relocate_host_components.py
+++ /dev/null
@@ -1,489 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import optparse
-import sys
-import os
-import logging
-import tempfile
-import urllib2
-import socket
-import json
-import base64
-import time
-
-AMBARI_HOSTNAME = None
-AMBARI_PORT = 8080
-CLUSTER_NAME = None
-PROTOCOL = "http"
-USERNAME = "admin"
-PASSWORD = "admin"
-DEFAULT_TIMEOUT = 10 # seconds
-START_ON_RELOCATE = False
-
-# Supported Actions
-RELOCATE_ACTION = 'relocate'
-ALLOWED_ACTUAL_STATES_FOR_RELOCATE = [ 'INIT', 'UNKNOWN', 'DISABLED', 'UNINSTALLED' ]
-ALLOWED_HOST_STATUS_FOR_RELOCATE = [ 'HEALTHY' ]
-STATUS_WAIT_TIMEOUT = 120 # seconds
-STATUS_CHECK_INTERVAL = 10 # seconds
-
-# API calls
-GET_CLUSTERS_URI = "/api/v1/clusters/"
-GET_HOST_COMPONENTS_URI = "/api/v1/clusters/{0}/services/{1}/components/{2}" +\
-                          "?fields=host_components"
-GET_HOST_COMPONENT_DESIRED_STATE_URI = "/api/v1/clusters/{0}/hosts/{1}" +\
-                                       "/host_components/{2}" +\
-                                       "?fields=HostRoles/desired_state"
-GET_HOST_COMPONENT_STATE_URI = "/api/v1/clusters/{0}/hosts/{1}" +\
-                               "/host_components/{2}" +\
-                               "?fields=HostRoles/state"
-GET_HOST_STATE_URL = "/api/v1/clusters/{0}/hosts/{1}?fields=Hosts/host_state"
-HOST_COMPONENT_URI = "/api/v1/clusters/{0}/hosts/{1}/host_components/{2}"
-ADD_HOST_COMPONENT_URI = "/api/v1/clusters/{0}/hosts?Hosts/host_name={1}"
-
-logger = logging.getLogger()
-
-
-
-class PreemptiveBasicAuthHandler(urllib2.BaseHandler):
-
-  def __init__(self):
-    password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
-    password_mgr.add_password(None, getUrl(''), USERNAME, PASSWORD)
-    self.passwd = password_mgr
-    self.add_password = self.passwd.add_password
-
-  def http_request(self, req):
-    uri = req.get_full_url()
-    user = USERNAME
-    pw = PASSWORD
-    raw = "%s:%s" % (user, pw)
-    auth = 'Basic %s' % base64.b64encode(raw).strip()
-    req.add_unredirected_header('Authorization', auth)
-    return req
-
-
-class AmbariResource:
-
-  def __init__(self, serviceName, componentName):
-    self.serviveName = serviceName
-    self.componentName = componentName
-    self.isInitialized = False
-
-  def initializeResource(self):
-    global CLUSTER_NAME
-    if CLUSTER_NAME is None:
-      CLUSTER_NAME = self.findClusterName()
-
-    if self.serviveName is None:
-      raise Exception('Service name undefined')
-
-    if self.componentName is None:
-      raise Exception('Component name undefined')
-
-    handler = PreemptiveBasicAuthHandler()
-    opener = urllib2.build_opener(handler)
-    # Install opener for all requests
-    urllib2.install_opener(opener)
-    self.urlOpener = opener
-
-    self.old_hostname = self.getHostname()
-
-    self.isInitialized = True
-
-
-  def relocate(self, new_hostname):
-    if not self.isInitialized:
-      raise Exception('Resource not initialized')
-
-    # If old and new hostname are the same exit harmlessly
-    if self.old_hostname == new_hostname:
-      logger.error('New hostname is same as existing host name, %s' % self.old_hostname)
-      sys.exit(2)
-    pass
-
-    try:
-      self.verifyHostComponentStatus(self.old_hostname, new_hostname, self.componentName)
-    except Exception, e:
-      logger.error("Exception caught on verify relocate request.")
-      logger.error(e.message)
-      sys.exit(3)
-
-    # Put host component in Maintenance state
-    self.updateHostComponentStatus(self.old_hostname, self.componentName,
-                                   "Disable", "DISABLED")
-
-    # Delete current host component
-    self.deleteHostComponent(self.old_hostname, self.componentName)
-
-    # Add component on the new host
-    self.addHostComponent(new_hostname, self.componentName)
-
-    # Install host component
-    self.updateHostComponentStatus(new_hostname, self.componentName,
-                                   "Installing", "INSTALLED")
-
-    # Wait on install
-    self.waitOnHostComponentUpdate(new_hostname, self.componentName,
-                                   "INSTALLED")
-
-    if START_ON_RELOCATE:
-      # Start host component
-      self.updateHostComponentStatus(new_hostname, self.componentName,
-                                     "Starting", "STARTED")
-
-      # Wait on start
-      self.waitOnHostComponentUpdate(new_hostname, self.componentName, "STARTED")
-    pass
-  pass
-
-  def waitOnHostComponentUpdate(self, hostname, componentName, status):
-    logger.info("Waiting for host component status to update ...")
-    sleep_itr = 0
-    state = None
-    while sleep_itr < STATUS_WAIT_TIMEOUT:
-      try:
-        state = self.getHostComponentState(hostname, componentName)
-        if status == state:
-          logger.info("Status update successful. status: %s" % state)
-          return
-        pass
-      except Exception, e:
-        logger.error("Caught an exception waiting for status update.. "
-                     "continuing to wait...")
-      pass
-
-      time.sleep(STATUS_CHECK_INTERVAL)
-      sleep_itr += STATUS_CHECK_INTERVAL
-    pass
-    if state and state != status:
-      logger.error("Timed out on wait, status unchanged. status = %s" % state)
-      sys.exit(1)
-    pass
-  pass
-
-  def addHostComponent(self, hostname, componentName):
-    data = '{"host_components":[{"HostRoles":{"component_name":"%s"}}]}' % self.componentName
-    req = urllib2.Request(getUrl(ADD_HOST_COMPONENT_URI.format(CLUSTER_NAME,
-                          hostname)), data)
-
-    req.add_header("X-Requested-By", "ambari_probe")
-    req.get_method = lambda: 'POST'
-    try:
-      logger.info("Adding host component: %s" % req.get_full_url())
-      resp = self.urlOpener.open(req)
-      self.logResponse('Add host component response: ', resp)
-    except Exception, e:
-      logger.error('Create host component failed, component: {0}, host: {1}'
-                    .format(componentName, hostname))
-      logger.error(e)
-      raise e
-    pass
-
-  def deleteHostComponent(self, hostname, componentName):
-    req = urllib2.Request(getUrl(HOST_COMPONENT_URI.format(CLUSTER_NAME,
-                                hostname, componentName)))
-    req.add_header("X-Requested-By", "ambari_probe")
-    req.get_method = lambda: 'DELETE'
-    try:
-      logger.info("Deleting host component: %s" % req.get_full_url())
-      resp = self.urlOpener.open(req)
-      self.logResponse('Delete component response: ', resp)
-    except Exception, e:
-      logger.error('Delete {0} failed.'.format(componentName))
-      logger.error(e)
-      raise e
-    pass
-
-  def updateHostComponentStatus(self, hostname, componentName, contextStr, status):
-    # Update host component
-    data = '{"RequestInfo":{"context":"%s %s"},"Body":{"HostRoles":{"state":"%s"}}}' % (contextStr, self.componentName, status)
-    req = urllib2.Request(getUrl(HOST_COMPONENT_URI.format(CLUSTER_NAME,
-                                hostname, componentName)), data)
-    req.add_header("X-Requested-By", "ambari_probe")
-    req.get_method = lambda: 'PUT'
-    try:
-      logger.info("%s host component: %s" % (contextStr, req.get_full_url()))
-      resp = self.urlOpener.open(req)
-      self.logResponse('Update host component response: ', resp)
-    except Exception, e:
-      logger.error('Update Status {0} failed.'.format(componentName))
-      logger.error(e)
-      raise e
-    pass
-
-  def verifyHostComponentStatus(self, old_hostname, new_hostname, componentName):
-    # Check desired state of host component is not STOPPED or host is
-    # unreachable
-    actualState = self.getHostComponentState(old_hostname, componentName)
-
-    if actualState not in ALLOWED_ACTUAL_STATES_FOR_RELOCATE:
-      raise Exception('Aborting relocate action since host component '
-                      'state is %s' % actualState)
-
-    hostState = self.getHostSatus(new_hostname)
-    if hostState not in ALLOWED_HOST_STATUS_FOR_RELOCATE:
-      raise Exception('Aborting relocate action since host state is %s' % hostState)
-
-    pass
-
-  def getHostSatus(self, hostname):
-    hostStateUrl = getUrl(GET_HOST_STATE_URL.format(CLUSTER_NAME, hostname))
-
-    logger.info("Requesting host status: %s " % hostStateUrl)
-    urlResponse = self.urlOpener.open(hostStateUrl)
-    state = None
-
-    if urlResponse:
-      response = urlResponse.read()
-      data = json.loads(response)
-      logger.debug('Response from getHostSatus: %s' % data)
-      if data:
-        try:
-          hostsInfo = data.get('Hosts')
-          if not hostsInfo:
-            raise Exception('Cannot find host state for host: {1}'.format(hostname))
-
-          state = hostsInfo.get('host_state')
-        except Exception, e:
-          logger.error('Unable to parse json data. %s' % data)
-          raise e
-        pass
-
-      else:
-        logger.error("Unable to retrieve host state.")
-      pass
-
-    return state
-
-
-  def getHostComponentState(self, hostname, componentName):
-    hostStatusUrl = getUrl(GET_HOST_COMPONENT_STATE_URI.format(CLUSTER_NAME,
-                                hostname, componentName))
-
-    logger.info("Requesting host component state: %s " % hostStatusUrl)
-    urlResponse = self.urlOpener.open(hostStatusUrl)
-    state = None
-
-    if urlResponse:
-      response = urlResponse.read()
-      data = json.loads(response)
-      logger.debug('Response from getHostComponentState: %s' % data)
-      if data:
-        try:
-          hostRoles = data.get('HostRoles')
-          if not hostRoles:
-            raise Exception('Cannot find host component state for component: ' +\
-                            '{0}, host: {1}'.format(componentName, hostname))
-
-          state = hostRoles.get('state')
-        except Exception, e:
-          logger.error('Unable to parse json data. %s' % data)
-          raise e
-        pass
-
-      else:
-        logger.error("Unable to retrieve host component desired state.")
-      pass
-
-    return state
-
-
-  # Log response for PUT, POST or DELETE
-  def logResponse(self, text=None, response=None):
-    if response is not None:
-      resp = str(response.getcode())
-      if text is None:
-        text = 'Logging response from server: '
-      if resp is not None:
-        logger.info(text + resp)
-
-  def findClusterName(self):
-    clusterUrl = getUrl(GET_CLUSTERS_URI)
-    clusterName = None
-
-    logger.info("Requesting clusters: " + clusterUrl)
-    urlResponse = self.urlOpener.open(clusterUrl)
-    if urlResponse is not None:
-      response = urlResponse.read()
-      data = json.loads(response)
-      logger.debug('Response from findClusterName: %s' % data)
-      if data:
-        try:
-          clusters = data.get('items')
-          if len(clusters) > 1:
-            raise Exception('Multiple clusters found. %s' % clusters)
-
-          clusterName = clusters[0].get('Clusters').get('cluster_name')
-        except Exception, e:
-          logger.error('Unable to parse json data. %s' % data)
-          raise e
-        pass
-      else:
-        logger.error("Unable to retrieve clusters data.")
-      pass
-
-    return clusterName
-
-  def getHostname(self):
-    hostsUrl = getUrl(GET_HOST_COMPONENTS_URI.format(CLUSTER_NAME,
-                  self.serviveName, self.componentName))
-
-    logger.info("Requesting host info: " + hostsUrl)
-    urlResponse = self.urlOpener.open(hostsUrl)
-    hostname = None
-
-    if urlResponse is not None:
-      response = urlResponse.read()
-      data = json.loads(response)
-      logger.debug('Response from getHostname: %s' % data)
-      if data:
-        try:
-          hostRoles = data.get('host_components')
-          if not hostRoles:
-            raise Exception('Cannot find host component data for service: ' +\
-                            '{0}, component: {1}'.format(self.serviveName, self.componentName))
-          if len(hostRoles) > 1:
-            raise Exception('More than one hosts found with the same role')
-
-          hostname = hostRoles[0].get('HostRoles').get('host_name')
-        except Exception, e:
-          logger.error('Unable to parse json data. %s' % data)
-          raise e
-        pass
-
-      else:
-        logger.error("Unable to retrieve host component data.")
-      pass
-
-    return hostname
-
-
-def getUrl(partial_url):
-  return PROTOCOL + "://" + AMBARI_HOSTNAME + ":" + AMBARI_PORT + partial_url
-
-def get_supported_actions():
-  return [ RELOCATE_ACTION ]
-
-#
-# Main.
-#
-def main():
-  tempDir = tempfile.gettempdir()
-  outputFile = os.path.join(tempDir, "ambari_reinstall_probe.out")
-
-  parser = optparse.OptionParser(usage="usage: %prog [options]")
-  parser.set_description('This python program is a Ambari thin client and '
-                         'supports relocation of ambari host components on '
-                         'Ambari managed clusters.')
-
-  parser.add_option("-v", "--verbose", dest="verbose", action="store_false",
-                  default=False, help="output verbosity.")
-  parser.add_option("-s", "--host", dest="server_hostname",
-                  help="Ambari server host name.")
-  parser.add_option("-p", "--port", dest="server_port",
-                  default="8080" ,help="Ambari server port. [default: 8080]")
-  parser.add_option("-r", "--protocol", dest="protocol", default = "http",
-                  help="Protocol for communicating with Ambari server ("
-                       "http/https) [default: http].")
-  parser.add_option("-c", "--cluster-name", dest="cluster_name",
-                  help="Ambari cluster to operate on.")
-  parser.add_option("-e", "--service-name", dest="service_name",
-                  help="Ambari Service to which the component belongs to.")
-  parser.add_option("-m", "--component-name", dest="component_name",
-                  help="Ambari Service Component to operate on.")
-  parser.add_option("-n", "--new-host", dest="new_hostname",
-                  help="New host to relocate the component to.")
-  parser.add_option("-a", "--action", dest="action", default = "relocate",
-                  help="Script action. [default: relocate]")
-  parser.add_option("-o", "--output-file", dest="outputfile",
-                  default = outputFile, metavar="FILE",
-                  help="Output file. [default: %s]" % outputFile)
-  parser.add_option("-u", "--username", dest="username",
-                  default="admin" ,help="Ambari server admin user. [default: admin]")
-  parser.add_option("-w", "--password", dest="password",
-                  default="admin" ,help="Ambari server admin password.")
-  parser.add_option("-d", "--start-component", dest="start_component",
-                  action="store_false", default=False,
-                  help="Should the script start the component after relocate.")
-
-  (options, args) = parser.parse_args()
-
-  # set verbose
-  if options.verbose:
-    logging.basicConfig(level=logging.DEBUG)
-  else:
-    logging.basicConfig(level=logging.INFO)
-
-  global AMBARI_HOSTNAME
-  AMBARI_HOSTNAME = options.server_hostname
-
-  global AMBARI_PORT
-  AMBARI_PORT = options.server_port
-
-  global CLUSTER_NAME
-  CLUSTER_NAME = options.cluster_name
-
-  global PROTOCOL
-  PROTOCOL = options.protocol
-
-  global USERNAME
-  USERNAME = options.username
-
-  global PASSWORD
-  PASSWORD = options.password
-
-  global START_ON_RELOCATE
-  START_ON_RELOCATE = options.start_component
-
-  global logger
-  logger = logging.getLogger('AmbariProbe')
-  handler = logging.FileHandler(options.outputfile)
-  formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
-  handler.setFormatter(formatter)
-  logger.addHandler(handler)
-
-  action = RELOCATE_ACTION
-
-  if options.action is not None:
-    if options.action not in get_supported_actions():
-      logger.error("Unsupported action: " + options.action + ", "
-                  "valid actions: " + str(get_supported_actions()))
-      sys.exit(1)
-    else:
-      action = options.action
-
-  socket.setdefaulttimeout(DEFAULT_TIMEOUT)
-
-  ambariResource = AmbariResource(serviceName=options.service_name,
-                                  componentName=options.component_name)
-  ambariResource.initializeResource()
-
-  if action == RELOCATE_ACTION:
-    if options.new_hostname is not None:
-      ambariResource.relocate(options.new_hostname)
-
-if __name__ == "__main__":
-  try:
-    main()
-  except (KeyboardInterrupt, EOFError):
-    print("\nAborting ... Keyboard Interrupt.")
-    sys.exit(1)
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
index f85efb0..19e223c 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
@@ -241,4 +241,4 @@
 dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
 mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
 mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-script_https_protocol = Script.get_force_https_protocol()
\ No newline at end of file
+script_https_protocol = Script.get_force_https_protocol_name()
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/mysql_service.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/mysql_service.py
index 11bbdd8..a4f3bbb 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/mysql_service.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/mysql_service.py
@@ -26,7 +26,10 @@
   cmd = format('service {daemon_name} {action}')
 
   if action == 'status':
-    Execute(status_cmd)
+    try:
+      Execute(status_cmd)
+    except:
+      raise ComponentIsNotRunning()
   elif action == 'stop':
     Execute(cmd,
             logoutput = True,
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_service.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_service.py
index cc7b4cc..41fe107 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_service.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_service.py
@@ -26,7 +26,10 @@
   cmd = format('service {postgresql_daemon_name} {action}')
 
   if action == 'status':
-    Execute(status_cmd)
+    try:
+      Execute(status_cmd)
+    except:
+      raise ComponentIsNotRunning()
   elif action == 'stop':
     Execute(cmd,
             logoutput = True,
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
index 1782298..9abd2fe 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
@@ -27,6 +27,7 @@
 from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format_jvm_option
 from resource_management.libraries.functions.version import format_stack_version
+from string import lower
 
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
@@ -44,6 +45,19 @@
 # current host stack version
 current_version = default("/hostLevelParams/current_version", None)
 
+# service name
+service_name = config['serviceName']
+
+# logsearch configuration
+logsearch_logfeeder_conf = "/etc/ambari-logsearch-logfeeder/conf"
+
+agent_cache_dir = config['hostLevelParams']['agentCacheDir']
+service_package_folder = config['commandParams']['service_package_folder']
+logsearch_service_name = service_name.lower().replace("_", "-")
+logsearch_config_file_name = 'input.config-' + logsearch_service_name + ".json"
+logsearch_config_file_path = agent_cache_dir + "/" + service_package_folder + "/templates/" + logsearch_config_file_name + ".j2"
+logsearch_config_file_exists = os.path.isfile(logsearch_config_file_path)
+
 # default hadoop params
 mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
 hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
index e9f2283..36a202f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
@@ -19,9 +19,13 @@
 import os
 
 import ambari_simplejson as json
+from ambari_jinja2 import Environment as JinjaEnvironment
 from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Directory, File
+from resource_management.core.source import InlineTemplate, Template
 from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.version import compare_versions
 from resource_management.libraries.functions.fcntl_based_process_lock import FcntlBasedProcessLock
@@ -72,6 +76,19 @@
               group=params.user_group,
               only_if=format("ls {hadoop_conf_dir}"))
 
+  Directory(params.logsearch_logfeeder_conf,
+            mode=0755,
+            cd_access='a',
+            create_parents=True
+            )
+
+  if params.logsearch_config_file_exists:
+    File(format("{logsearch_logfeeder_conf}/" + params.logsearch_config_file_name),
+         content=Template(params.logsearch_config_file_path,extra_imports=[default])
+         )
+  else:
+    Logger.warning('No logsearch configuration exists at ' + params.logsearch_config_file_path)
+
 
 def load_version(struct_out_file):
   """
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
index 4d0de7f..1f17cd1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
@@ -109,8 +109,8 @@
   if len(parts) == 1:
     parts.append("")
 
-  users_list = parts[0].split(",") if parts[0] else []
-  groups_list = parts[1].split(",") if parts[1] else []
+  users_list = parts[0].strip(",").split(",") if parts[0] else []
+  groups_list = parts[1].strip(",").split(",") if parts[1] else []
 
   # skip creating groups and users if * is provided as value.
   users_list = filter(lambda x: x != '*' , users_list)
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
index 70ebfeb..3488e75 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
@@ -111,7 +111,14 @@
 namenode_host = default("/clusterHostInfo/namenode_host", [])
 zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
 ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+cluster_name = config["clusterName"]
+set_instanceId = "false"
+if 'cluster-env' in config['configurations'] and \
+    'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
+  ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
+  set_instanceId = "true"
+else:
+  ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
 
 has_namenode = not len(namenode_host) == 0
 has_resourcemanager = not len(rm_host) == 0
@@ -137,8 +144,8 @@
 metric_collector_port = None
 if has_metric_collector:
   if 'cluster-env' in config['configurations'] and \
-      'metrics_collector_vip_port' in config['configurations']['cluster-env']:
-    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
+      'metrics_collector_external_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
   else:
     metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
     if metric_collector_web_address.find(':') != -1:
@@ -157,6 +164,9 @@
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
+
 # Cluster Zookeeper quorum
 zookeeper_quorum = None
 if has_zk_host:
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
index 2f3aab6..2cd9aa8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -75,6 +75,8 @@
 *.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}
 *.sink.timeline.protocol={{metric_collector_protocol}}
 *.sink.timeline.port={{metric_collector_port}}
+*.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+*.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 # HTTPS properties
 *.sink.timeline.truststore.path = {{metric_truststore_path}}
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
index 34f3c07..78f7b9f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
@@ -397,6 +397,27 @@
       "name": "ranger_kms_ssl",
       "description": "Ranger KMS SSL properties in ambari stack",
       "min_version": "2.6.0.0"
+    },
+    {
+      "name": "nifi_encrypt_config",
+      "description": "Encrypt sensitive properties written to nifi property file",
+      "min_version": "2.6.0.0"
+    },
+    {
+      "name": "toolkit_config_update",
+      "description": "Support separate input and output for toolkit configuration",
+      "min_version": "2.6.0.0"
+    },
+    {
+      "name": "admin_toolkit_support",
+      "description": "Supports the nifi admin toolkit",
+      "min_version": "2.6.0.0"
+    },
+    {
+      "name": "tls_toolkit_san",
+      "description": "Support subject alternative name flag",
+      "min_version": "2.6.0.0"
     }
+
   ]
 }
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
index 866d4cb..ddd795f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
@@ -210,12 +210,13 @@
             webHcatSitePropertyAttributes("webhcat.proxyuser.{0}.hosts".format(old_ambari_user), 'delete', 'true')
             webHcatSitePropertyAttributes("webhcat.proxyuser.{0}.groups".format(old_ambari_user), 'delete', 'true')
 
-    if self.is_secured_cluster(services):
-      putCoreSiteProperty = self.putProperty(configurations, "core-site", services)
+    if "HDFS" in servicesList or "YARN" in servicesList:
+      if self.is_secured_cluster(services):
+        putCoreSiteProperty = self.putProperty(configurations, "core-site", services)
 
-      meta = self.get_service_component_meta("HIVE", "WEBHCAT_SERVER", services)
-      if "hostnames" in meta:
-        self.put_proxyuser_value("HTTP", meta["hostnames"], services=services, configurations=configurations, put_function=putCoreSiteProperty)
+        meta = self.get_service_component_meta("HIVE", "WEBHCAT_SERVER", services)
+        if "hostnames" in meta:
+          self.put_proxyuser_value("HTTP", meta["hostnames"], services=services, configurations=configurations, put_function=putCoreSiteProperty)
 
   def recommendTezConfigurations(self, configurations, clusterData, services, hosts):
     putTezProperty = self.putProperty(configurations, "tez-site")
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml
index 0eb3366..d0b4bb1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml
@@ -193,7 +193,7 @@
         <name>yarn_cgroups_enabled</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
@@ -205,7 +205,7 @@
         <name>yarn_cgroups_enabled</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
@@ -217,7 +217,7 @@
         <name>yarn_cgroups_enabled</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.mount-path</name>
@@ -229,7 +229,7 @@
         <name>yarn_cgroups_enabled</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage</name>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index 4d672d2..726514b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -422,36 +422,37 @@
 
     container_size = "512"
 
-    if not "yarn-site" in configurations:
-      self.recommendYARNConfigurations(configurations, clusterData, services, hosts)
-    #properties below should be always present as they are provided in HDP206 stack advisor at least
-    yarnMaxAllocationSize = min(30 * int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]), int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]))
-    #duplicate tez task resource calc logic, direct dependency doesn't look good here (in case of Hive without Tez)
-    container_size = clusterData['mapMemory'] if clusterData['mapMemory'] > 2048 else int(clusterData['reduceMemory'])
-    container_size = min(clusterData['containers'] * clusterData['ramPerContainer'], container_size, yarnMaxAllocationSize)
+    if "YARN" in servicesList:
+      if not "yarn-site" in configurations:
+        self.recommendYARNConfigurations(configurations, clusterData, services, hosts)
+      #properties below should be always present as they are provided in HDP206 stack advisor at least
+      yarnMaxAllocationSize = min(30 * int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]), int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]))
+      #duplicate tez task resource calc logic, direct dependency doesn't look good here (in case of Hive without Tez)
+      container_size = clusterData['mapMemory'] if clusterData['mapMemory'] > 2048 else int(clusterData['reduceMemory'])
+      container_size = min(clusterData['containers'] * clusterData['ramPerContainer'], container_size, yarnMaxAllocationSize)
 
-    putHiveSiteProperty("hive.tez.container.size", min(int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]), container_size))
+      putHiveSiteProperty("hive.tez.container.size", min(int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]), container_size))
 
-    putHiveSitePropertyAttribute("hive.tez.container.size", "minimum", int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]))
-    putHiveSitePropertyAttribute("hive.tez.container.size", "maximum", int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]))
+      putHiveSitePropertyAttribute("hive.tez.container.size", "minimum", int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]))
+      putHiveSitePropertyAttribute("hive.tez.container.size", "maximum", int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]))
 
-    if "yarn-site" in services["configurations"]:
-      if "yarn.scheduler.minimum-allocation-mb" in services["configurations"]["yarn-site"]["properties"]:
-        putHiveSitePropertyAttribute("hive.tez.container.size", "minimum", int(services["configurations"]["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]))
-      if "yarn.scheduler.maximum-allocation-mb" in services["configurations"]["yarn-site"]["properties"]:
-        putHiveSitePropertyAttribute("hive.tez.container.size", "maximum", int(services["configurations"]["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]))
+      if "yarn-site" in services["configurations"]:
+        if "yarn.scheduler.minimum-allocation-mb" in services["configurations"]["yarn-site"]["properties"]:
+          putHiveSitePropertyAttribute("hive.tez.container.size", "minimum", int(services["configurations"]["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]))
+        if "yarn.scheduler.maximum-allocation-mb" in services["configurations"]["yarn-site"]["properties"]:
+          putHiveSitePropertyAttribute("hive.tez.container.size", "maximum", int(services["configurations"]["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]))
 
-    putHiveSiteProperty("hive.prewarm.enabled", "false")
-    putHiveSiteProperty("hive.prewarm.numcontainers", "3")
-    putHiveSiteProperty("hive.tez.auto.reducer.parallelism", "true")
-    putHiveSiteProperty("hive.tez.dynamic.partition.pruning", "true")
+      putHiveSiteProperty("hive.prewarm.enabled", "false")
+      putHiveSiteProperty("hive.prewarm.numcontainers", "3")
+      putHiveSiteProperty("hive.tez.auto.reducer.parallelism", "true")
+      putHiveSiteProperty("hive.tez.dynamic.partition.pruning", "true")
 
-    container_size = configurations["hive-site"]["properties"]["hive.tez.container.size"]
-    container_size_bytes = int(int(container_size)*0.8*1024*1024) # Xmx == 80% of container
-    # Memory
-    putHiveSiteProperty("hive.auto.convert.join.noconditionaltask.size", int(round(container_size_bytes/3)))
-    putHiveSitePropertyAttribute("hive.auto.convert.join.noconditionaltask.size", "maximum", container_size_bytes)
-    putHiveSiteProperty("hive.exec.reducers.bytes.per.reducer", "67108864")
+      container_size = configurations["hive-site"]["properties"]["hive.tez.container.size"]
+      container_size_bytes = int(int(container_size)*0.8*1024*1024) # Xmx == 80% of container
+      # Memory
+      putHiveSiteProperty("hive.auto.convert.join.noconditionaltask.size", int(round(container_size_bytes/3)))
+      putHiveSitePropertyAttribute("hive.auto.convert.join.noconditionaltask.size", "maximum", container_size_bytes)
+      putHiveSiteProperty("hive.exec.reducers.bytes.per.reducer", "67108864")
 
     # CBO
     if "hive-site" in services["configurations"] and "hive.cbo.enable" in services["configurations"]["hive-site"]["properties"]:
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/2.3/role_command_order.json
index 4f279c6..5b1b50e 100755
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/role_command_order.json
@@ -4,8 +4,8 @@
   "general_deps" : {
     "_comment" : "dependencies for all cases",
     "MAHOUT_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
-    "RANGER_USERSYNC-START" : ["RANGER_ADMIN-START", "RANGER_KMS_SERVER-START"],
-    "RANGER_KMS_SERVER-START" : ["RANGER_ADMIN-START"],
+    "RANGER_USERSYNC-START" : ["RANGER_ADMIN-START"],
+    "RANGER_KMS_SERVER-START" : ["RANGER_ADMIN-START", "NAMENODE-START"],
     "RANGER_KMS_SERVICE_CHECK-SERVICE_CHECK" : ["RANGER_KMS_SERVER-START"],
     "PHOENIX_QUERY_SERVER-START": ["HBASE_MASTER-START"],
     "SPARK_THRIFTSERVER-START" : ["NAMENODE-START", "HIVE_METASTORE-START"],
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
index 67532c5..9efcee0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
@@ -232,9 +232,16 @@
     hive_hooks = [x for x in hive_hooks if x != ""]
     is_atlas_present_in_cluster = "ATLAS" in servicesList
 
+    enable_external_atlas_for_hive = False
     enable_atlas_hook = False
+
+    if 'hive-atlas-application.properties' in services['configurations'] and 'enable.external.atlas.for.hive' in services['configurations']['hive-atlas-application.properties']['properties']:
+      enable_external_atlas_for_hive = services['configurations']['hive-atlas-application.properties']['properties']['enable.external.atlas.for.hive'].lower() == "true"
+
     if is_atlas_present_in_cluster:
       putHiveEnvProperty("hive.atlas.hook", "true")
+    elif enable_external_atlas_for_hive:
+      putHiveEnvProperty("hive.atlas.hook", "true")
     else:
       putHiveEnvProperty("hive.atlas.hook", "false")
 
@@ -349,10 +356,10 @@
       ranger_kafka_plugin_enabled = services["configurations"]["ranger-env"]["properties"]["ranger-kafka-plugin-enabled"]
       putKafkaRangerPluginProperty("ranger-kafka-plugin-enabled", ranger_kafka_plugin_enabled)
 
-    # Determine if the Ranger/Kafka Plugin is enabled
-    ranger_plugin_enabled = "RANGER" in servicesList
+
+    ranger_plugin_enabled = False
     # Only if the RANGER service is installed....
-    if ranger_plugin_enabled:
+    if "RANGER" in servicesList:
       # If ranger-kafka-plugin-properties/ranger-kafka-plugin-enabled,
       # determine if the Ranger/Kafka plug-in enabled enabled or not
       if 'ranger-kafka-plugin-properties' in configurations and \
@@ -814,10 +821,16 @@
     putSqoopSiteProperty = self.putProperty(configurations, "sqoop-site", services)
     putSqoopEnvProperty = self.putProperty(configurations, "sqoop-env", services)
 
+    enable_external_atlas_for_sqoop = False
     enable_atlas_hook = False
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if 'sqoop-atlas-application.properties' in services['configurations'] and 'enable.external.atlas.for.sqoop' in services['configurations']['sqoop-atlas-application.properties']['properties']:
+      enable_external_atlas_for_sqoop = services['configurations']['sqoop-atlas-application.properties']['properties']['enable.external.atlas.for.sqoop'].lower() == "true"
+
     if "ATLAS" in servicesList:
       putSqoopEnvProperty("sqoop.atlas.hook", "true")
+    elif enable_external_atlas_for_sqoop:
+      putSqoopEnvProperty("sqoop.atlas.hook", "true")
     else:
       putSqoopEnvProperty("sqoop.atlas.hook", "false")
 
@@ -852,9 +865,15 @@
       atlas_hook_class = "org.apache.atlas.storm.hook.StormAtlasHook"
       atlas_hook_is_set = atlas_hook_class in notifier_plugin_value
       enable_atlas_hook = False
+      enable_external_atlas_for_storm = False
+
+      if 'storm-atlas-application.properties' in services['configurations'] and 'enable.external.atlas.for.storm' in services['configurations']['storm-atlas-application.properties']['properties']:
+        enable_external_atlas_for_storm = services['configurations']['storm-atlas-application.properties']['properties']['enable.external.atlas.for.storm'].lower() == "true"
 
       if atlas_is_present:
         putStormEnvProperty("storm.atlas.hook", "true")
+      elif enable_external_atlas_for_storm:
+        putStormEnvProperty("storm.atlas.hook", "true")
       else:
         putStormEnvProperty("storm.atlas.hook", "false")
 
@@ -881,9 +900,14 @@
     putFalconEnvProperty = self.putProperty(configurations, "falcon-env", services)
     enable_atlas_hook = False
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    enable_external_atlas_for_falcon = False
+    if 'falcon-atlas-application.properties' in services['configurations'] and 'enable.external.atlas.for.falcon' in services['configurations']['falcon-atlas-application.properties']['properties']:
+      enable_external_atlas_for_falcon = services['configurations']['falcon-atlas-application.properties']['properties']['enable.external.atlas.for.falcon'].lower() == "true"
 
     if "ATLAS" in servicesList:
       putFalconEnvProperty("falcon.atlas.hook", "true")
+    elif enable_external_atlas_for_falcon:
+      putFalconEnvProperty("falcon.atlas.hook", "true")
     else:
       putFalconEnvProperty("falcon.atlas.hook", "false")
 
@@ -894,7 +918,7 @@
       "HIVE": {"hiveserver2-site": self.validateHiveServer2Configurations,
                "hive-site": self.validateHiveConfigurations},
       "HBASE": {"hbase-site": self.validateHBASEConfigurations},
-      "KAKFA": {"kafka-broker": self.validateKAFKAConfigurations},
+      "KAFKA": {"kafka-broker": self.validateKAFKAConfigurations},
       "RANGER": {"admin-properties": self.validateRangerAdminConfigurations,
                  "ranger-env": self.validateRangerConfigurationsEnv}
     }
@@ -1064,7 +1088,7 @@
     kafka_broker = properties
     validationItems = []
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
- 
+
     #Adding Ranger Plugin logic here
     ranger_plugin_properties = self.getSiteProperties(configurations, "ranger-kafka-plugin-properties")
     ranger_plugin_enabled = ranger_plugin_properties['ranger-kafka-plugin-enabled'] if ranger_plugin_properties else 'No'
@@ -1118,4 +1142,3 @@
     validationProblems = self.toConfigurationValidationProblems(validationItems, "ranger-env")
     validationProblems.extend(parentValidationProblems)
     return validationProblems
-
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
index 63cf5f5..8b5c07d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
@@ -386,6 +386,13 @@
             <regex-replace  key="content" find="hadoop.security.log.file=SecurityAuth.audit" replace-with="hadoop.security.log.file=SecurityAuth.audit&#10;log4j.additivity.SecurityLogger=false"/>
             <regex-replace  key="content" find="log4j.additivity.SecurityLogger=true" replace-with="log4j.additivity.SecurityLogger=false"/>
           </definition>
+
+          <definition xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics" summary="Prevent Garbage Collection Heuristics">
+            <type>hadoop-env</type>
+            <replace  key="content" find="-XX:+PrintGCDateStamps -Xms{{namenode_heapsize}}" replace-with="-XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}}"/>
+            <replace  key="content" find="-XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE}" replace-with="-XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE}"/>
+          </definition>
+
         </changes>
       </component>
     </service>
@@ -635,22 +642,26 @@
             <set key="storm_wrkr_out_maxbackupindex" value="4"/>
             <set key="storm_wrkr_err_maxfilesize" value="100"/>
             <set key="storm_wrkr_err_maxbackupindex" value="4"/>
-            <regex-replace key="content" find="\$\{sys:storm.log.dir}/\$\{sys:logfile.name}"
-                           replace-with="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}"/>
-            <regex-replace key="content" find="A1&quot;&#xA;&#009;&#009;fileName=&quot;\$\{sys:workers.artifacts}/\$\{sys:storm.id}/\$\{sys:worker.port}/\$\{sys:logfile.name}&quot;&#xA;&#009;&#009;filePattern=&quot;\$\{sys:workers.artifacts}/\$\{sys:storm.id}/\$\{sys:worker.port}/\$\{sys:logfile.name}.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;\$\{pattern}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;(?:[0-9]+) MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;([0-9]+)"
-                           replace-with="A1&quot;&#xA;&#009;&#009;fileName=&quot;${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}&quot;&#xA;&#009;&#009;filePattern=&quot;${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;${pattern}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;{{storm_wrkr_a1_maxfilesize}} MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;{{storm_wrkr_a1_maxbackupindex}}"/>
-            <regex-replace key="content" find="STDOUT&quot;&#xA;&#009;&#009;fileName=&quot;\$\{sys:workers.artifacts}/\$\{sys:storm.id}/\$\{sys:worker.port}/\$\{sys:logfile.name}.out&quot;&#xA;&#009;&#009;filePattern=&quot;\$\{sys:workers.artifacts}/\$\{sys:storm.id}/\$\{sys:worker.port}/\$\{sys:logfile.name}.out.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;\$\{patternNoTime}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;(?:[0-9]+) MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;([0-9]+)"
-                           replace-with="STDOUT&quot;&#xA;&#009;&#009;fileName=&quot;${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.out&quot;&#xA;&#009;&#009;filePattern=&quot;${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.out.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;${patternNoTime}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;{{storm_wrkr_out_maxfilesize}} MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;{{storm_wrkr_out_maxbackupindex}}"/>
-            <regex-replace key="content" find="STDERR&quot;&#xA;&#009;&#009;fileName=&quot;\$\{sys:workers.artifacts}/\$\{sys:storm.id}/\$\{sys:worker.port}/\$\{sys:logfile.name}.err&quot;&#xA;&#009;&#009;filePattern=&quot;\$\{sys:workers.artifacts}/\$\{sys:storm.id}/\$\{sys:worker.port}/\$\{sys:logfile.name}.err.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;\$\{patternNoTime}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;(?:[0-9]+) MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;([0-9]+)"
-                           replace-with="STDERR&quot;&#xA;&#009;&#009;fileName=&quot;${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.err&quot;&#xA;&#009;&#009;filePattern=&quot;${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.err.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;${patternNoTime}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;{{storm_wrkr_err_maxfilesize}} MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;{{storm_wrkr_err_maxbackupindex}}"/>
+            <regex-replace key="content" find="}.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;\$\{pattern}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;(?:[0-9]+) MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;([0-9]+)"
+                           replace-with="}.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;${pattern}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;{{storm_wrkr_a1_maxfilesize}} MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;{{storm_wrkr_a1_maxbackupindex}}"/>
+            <regex-replace key="content" find="}.out.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;\$\{patternNoTime}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;(?:[0-9]+) MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;([0-9]+)"
+                           replace-with="}.out.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;${patternNoTime}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;{{storm_wrkr_out_maxfilesize}} MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;{{storm_wrkr_out_maxbackupindex}}"/>
+            <regex-replace key="content" find="}.err.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;\$\{patternNoTime}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;(?:[0-9]+) MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;([0-9]+)"
+                           replace-with="}.err.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;${patternNoTime}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;{{storm_wrkr_err_maxfilesize}} MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;{{storm_wrkr_err_maxbackupindex}}"/>
           </definition>
           <definition xsi:type="configure" id="storm_cluster_log4j_parameterize" summary="Parameterizing Storm Cluster Log4J Properties">
             <type>storm-cluster-log4j</type>
             <set key="storm_a1_maxfilesize" value="100"/>
             <set key="storm_a1_maxbackupindex" value="9"/>
-            <regex-replace key="content" find="A1&quot; immediateFlush=&quot;false&quot;&#xA;                 fileName=&quot;\$\{sys:storm.log.dir}/\$\{sys:logfile.name}&quot;&#xA;                 filePattern=&quot;\$\{sys:storm.log.dir}/\$\{sys:logfile.name}.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;\$\{pattern}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;(?:[0-9]+) MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;([0-9]+)"
-                           replace-with="A1&quot; immediateFlush=&quot;false&quot;&#xA;                 fileName=&quot;${sys:storm.log.dir}/${sys:logfile.name}&quot;&#xA;                 filePattern=&quot;${sys:storm.log.dir}/${sys:logfile.name}.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;${pattern}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;{{storm_a1_maxfilesize}} MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;{{storm_a1_maxbackupindex}}"/>
+            <regex-replace key="content" find="A1&quot;&#xA;                 fileName=&quot;\$\{sys:storm.log.dir}/\$\{sys:logfile.name}&quot;&#xA;                 filePattern=&quot;\$\{sys:storm.log.dir}/\$\{sys:logfile.name}.%i&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;\$\{pattern}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;(?:[0-9]+) MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;([0-9]+)"
+                           replace-with="A1&quot;&#xA;                 fileName=&quot;${sys:storm.log.dir}/${sys:logfile.name}&quot;&#xA;                 filePattern=&quot;${sys:storm.log.dir}/${sys:logfile.name}.%i&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;${pattern}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;{{storm_a1_maxfilesize}} MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;{{storm_a1_maxbackupindex}}"/>
           </definition>
+          <definition xsi:type="configure" id="storm_worker_log4j_directory" summary="Update Storm log directory">
+            <type>storm-worker-log4j</type>
+            <replace key="content" find="${sys:storm.log.dir}/${sys:logfile.name}"
+                     replace-with="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}"/>
+          </definition>
+
         </changes>
       </component>
     </service>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml
index 3d8041e..ff1d87d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml
@@ -253,6 +253,11 @@
       <direction>UPGRADE</direction>   <!--  prevent config changes on downgrade -->
       <skippable>true</skippable>  <!-- May fix configuration problems manually -->
 
+      <!-- HDFS -->
+      <execute-stage service="HDFS" component="NAMENODE" title="Prevent Garbage Collection Heuristics">
+        <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+      </execute-stage>
+
       <!--YARN-->
       <execute-stage service="MAPREDUCE2" component="MAPREDUCE2_CLIENT" title="Apply config changes for Mapreduce2 client">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath">
@@ -292,6 +297,8 @@
       <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Nimbus">
         <task xsi:type="configure" id="increase_storm_zookeeper_timeouts"/>
       </execute-stage>
+
+
     </group>
 
     <!--
@@ -334,16 +341,6 @@
       </service>
     </group>
 
-    <group xsi:type="restart" name="RANGER_KMS" title="Ranger KMS">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <parallel-scheduler/>
-      <service name="RANGER_KMS">
-        <component>RANGER_KMS_SERVER</component>
-      </service>
-    </group>
-
     <group xsi:type="restart" name="HDFS" title="HDFS">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -382,6 +379,16 @@
       </execute-stage>
     </group>
 
+    <group xsi:type="restart" name="RANGER_KMS" title="Ranger KMS">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <supports-auto-skip-failure>false</supports-auto-skip-failure>
+      <parallel-scheduler/>
+      <service name="RANGER_KMS">
+        <component>RANGER_KMS_SERVER</component>
+      </service>
+    </group>
+
     <group xsi:type="restart" name="KAFKA" title="Kafka">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -748,30 +755,6 @@
       </component>
     </service>
 
-    <service name="RANGER_KMS">
-      <component name="RANGER_KMS_SERVER">
-        <pre-upgrade>
-          <task xsi:type="execute" hosts="any" sequential="true">
-            <summary>Upgrading Ranger KMS database schema</summary>
-            <script>scripts/kms_server.py</script>
-            <function>setup_ranger_kms_database</function>
-          </task>
-        </pre-upgrade>
-
-        <pre-downgrade>
-          <task xsi:type="execute" hosts="any" sequential="true">
-            <summary>Downgrading Ranger KMS database schema</summary>
-            <script>scripts/kms_server.py</script>
-            <function>setup_ranger_kms_database</function>
-          </task>
-        </pre-downgrade>
-
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-    </service>
-
     <service name="HDFS">
       <component name="NAMENODE">
         <upgrade>
@@ -816,6 +799,30 @@
       </component>
     </service>
 
+    <service name="RANGER_KMS">
+      <component name="RANGER_KMS_SERVER">
+        <pre-upgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Upgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-upgrade>
+
+        <pre-downgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Downgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-downgrade>
+
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+    </service>
+
     <service name="MAPREDUCE2">
       <component name="HISTORYSERVER">
         <upgrade>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml
index fa06291..41cab3f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml
@@ -264,6 +264,11 @@
         </task>
       </execute-stage>
 
+      <!-- HDFS -->
+      <execute-stage service="HDFS" component="NAMENODE" title="Prevent Garbage Collection Heuristics">
+        <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+      </execute-stage>
+
       <!-- YARN -->
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Calculating Yarn Properties for Spark">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.SparkShufflePropertyConfig">
@@ -479,16 +484,6 @@
       </service>
     </group>
 
-    <group xsi:type="restart" name="RANGER_KMS" title="Ranger KMS">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <parallel-scheduler/>
-      <service name="RANGER_KMS">
-        <component>RANGER_KMS_SERVER</component>
-      </service>
-    </group>
-
     <group xsi:type="restart" name="HDFS" title="HDFS">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -527,6 +522,16 @@
       </execute-stage>
     </group>
 
+    <group xsi:type="restart" name="RANGER_KMS" title="Ranger KMS">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <supports-auto-skip-failure>false</supports-auto-skip-failure>
+      <parallel-scheduler/>
+      <service name="RANGER_KMS">
+        <component>RANGER_KMS_SERVER</component>
+      </service>
+    </group>
+
     <group xsi:type="restart" name="KAFKA" title="Kafka">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -895,30 +900,6 @@
       </component>
     </service>
 
-    <service name="RANGER_KMS">
-      <component name="RANGER_KMS_SERVER">
-        <pre-upgrade>
-          <task xsi:type="execute" hosts="any" sequential="true">
-            <summary>Upgrading Ranger KMS database schema</summary>
-            <script>scripts/kms_server.py</script>
-            <function>setup_ranger_kms_database</function>
-          </task>
-        </pre-upgrade>
-
-        <pre-downgrade>
-          <task xsi:type="execute" hosts="any" sequential="true">
-            <summary>Downgrading Ranger KMS database schema</summary>
-            <script>scripts/kms_server.py</script>
-            <function>setup_ranger_kms_database</function>
-          </task>
-        </pre-downgrade>
-
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-    </service>
-
     <service name="HDFS">
       <component name="NAMENODE">
         <upgrade>
@@ -963,6 +944,30 @@
       </component>
     </service>
 
+    <service name="RANGER_KMS">
+      <component name="RANGER_KMS_SERVER">
+        <pre-upgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Upgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-upgrade>
+
+        <pre-downgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Downgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-downgrade>
+
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+    </service>
+
     <service name="MAPREDUCE2">
       <component name="HISTORYSERVER">
         <upgrade>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml
index 8b8b9a6..76c4ed5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml
@@ -285,6 +285,11 @@
         </task>
       </execute-stage>
 
+      <!-- HDFS -->
+      <execute-stage service="HDFS" component="NAMENODE" title="Prevent Garbage Collection Heuristics">
+        <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+      </execute-stage>
+
       <!-- YARN -->
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Calculating Yarn Properties for Spark">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.SparkShufflePropertyConfig">
@@ -563,6 +568,11 @@
           <summary>Updating the Storm cluster Log4J properties to include parameterizations</summary>
         </task>
       </execute-stage>
+      <execute-stage service="STORM" component="NIMBUS" title="Update Storm log directory">
+        <task xsi:type="configure" id="storm_worker_log4j_directory">
+          <summary>Update Storm log directory in storm worker log4j</summary>
+        </task>
+      </execute-stage>
 
       <!--ATLAS-->
       <execute-stage service="ATLAS" component="ATLAS_SERVER" title="Parameterizing Atlas Log4J Properties">
@@ -630,16 +640,6 @@
       </service>
     </group>
 
-    <group xsi:type="restart" name="RANGER_KMS" title="Ranger KMS">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <parallel-scheduler/>
-      <service name="RANGER_KMS">
-        <component>RANGER_KMS_SERVER</component>
-      </service>
-    </group>
-
     <group xsi:type="restart" name="HDFS" title="HDFS">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -678,6 +678,15 @@
       </execute-stage>
     </group>
 
+    <group xsi:type="restart" name="RANGER_KMS" title="Ranger KMS">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <supports-auto-skip-failure>false</supports-auto-skip-failure>
+      <parallel-scheduler/>
+      <service name="RANGER_KMS">
+        <component>RANGER_KMS_SERVER</component>
+      </service>
+    </group>
 
     <group xsi:type="restart" name="KAFKA" title="Kafka">
       <service-check>false</service-check>
@@ -1037,30 +1046,6 @@
       </component>
     </service>
 
-    <service name="RANGER_KMS">
-      <component name="RANGER_KMS_SERVER">
-        <pre-upgrade>
-          <task xsi:type="execute" hosts="any" sequential="true">
-            <summary>Upgrading Ranger KMS database schema</summary>
-            <script>scripts/kms_server.py</script>
-            <function>setup_ranger_kms_database</function>
-          </task>
-        </pre-upgrade>
-
-        <pre-downgrade>
-          <task xsi:type="execute" hosts="any" sequential="true">
-            <summary>Downgrading Ranger KMS database schema</summary>
-            <script>scripts/kms_server.py</script>
-            <function>setup_ranger_kms_database</function>
-          </task>
-        </pre-downgrade>
-
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-    </service>
-
     <service name="HDFS">
       <component name="NAMENODE">
         <upgrade>
@@ -1105,6 +1090,30 @@
       </component>
     </service>
 
+    <service name="RANGER_KMS">
+      <component name="RANGER_KMS_SERVER">
+        <pre-upgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Upgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-upgrade>
+
+        <pre-downgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Downgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-downgrade>
+
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+    </service>
+
     <service name="KAFKA">
       <component name="KAFKA_BROKER">
         <upgrade>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
index 2bf6e23..5aa08c5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
@@ -297,6 +297,11 @@
         <task xsi:type="configure" id="hdfs_securitylogger_additivity"/>
       </execute-stage>
 
+      <!-- HDFS -->
+      <execute-stage service="HDFS" component="NAMENODE" title="Prevent Garbage Collection Heuristics">
+        <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+      </execute-stage>
+
       <!-- YARN -->
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Calculating Yarn Properties for Spark">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.SparkShufflePropertyConfig">
@@ -661,6 +666,11 @@
           <summary>Updating the Storm cluster Log4J properties to include parameterizations</summary>
         </task>
       </execute-stage>
+      <execute-stage service="STORM" component="NIMBUS" title="Update Storm log directory">
+        <task xsi:type="configure" id="storm_worker_log4j_directory">
+          <summary>Update Storm log directory in storm worker log4j</summary>
+        </task>
+      </execute-stage>
 
       <!--ATLAS-->
       <execute-stage service="ATLAS" component="ATLAS_SERVER" title="Parameterizing Atlas Log4J Properties">
@@ -728,16 +738,6 @@
       </service>
     </group>
 
-    <group xsi:type="restart" name="RANGER_KMS" title="Ranger KMS">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <parallel-scheduler/>
-      <service name="RANGER_KMS">
-        <component>RANGER_KMS_SERVER</component>
-      </service>
-    </group>
-
     <group xsi:type="restart" name="HDFS" title="HDFS">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -776,6 +776,15 @@
       </execute-stage>
     </group>
 
+    <group xsi:type="restart" name="RANGER_KMS" title="Ranger KMS">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <supports-auto-skip-failure>false</supports-auto-skip-failure>
+      <parallel-scheduler/>
+      <service name="RANGER_KMS">
+        <component>RANGER_KMS_SERVER</component>
+      </service>
+    </group>
 
     <group xsi:type="restart" name="KAFKA" title="Kafka">
       <service-check>false</service-check>
@@ -1135,30 +1144,6 @@
       </component>
     </service>
 
-    <service name="RANGER_KMS">
-      <component name="RANGER_KMS_SERVER">
-        <pre-upgrade>
-          <task xsi:type="execute" hosts="any" sequential="true">
-            <summary>Upgrading Ranger KMS database schema</summary>
-            <script>scripts/kms_server.py</script>
-            <function>setup_ranger_kms_database</function>
-          </task>
-        </pre-upgrade>
-
-        <pre-downgrade>
-          <task xsi:type="execute" hosts="any" sequential="true">
-            <summary>Downgrading Ranger KMS database schema</summary>
-            <script>scripts/kms_server.py</script>
-            <function>setup_ranger_kms_database</function>
-          </task>
-        </pre-downgrade>
-
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-    </service>
-
     <service name="HDFS">
       <component name="NAMENODE">
         <upgrade>
@@ -1203,6 +1188,30 @@
       </component>
     </service>
 
+    <service name="RANGER_KMS">
+      <component name="RANGER_KMS_SERVER">
+        <pre-upgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Upgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-upgrade>
+
+        <pre-downgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Downgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-downgrade>
+
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+    </service>
+
     <service name="KAFKA">
       <component name="KAFKA_BROKER">
         <upgrade>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
index 1340b22..19031a8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
@@ -547,8 +547,14 @@
 
     <service name="HDFS">
       <component name="NAMENODE">
+        <pre-upgrade>
+          <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+        </pre-upgrade>
+
+        <pre-downgrade/>
+
         <upgrade>
-          <task xsi:type="restart-task"/>
+          <task xsi:type="restart-task" timeout-config="upgrade.parameter.nn-restart.timeout"/>
         </upgrade>
       </component>
 
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
index 40afc4f..8cfaa52 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
@@ -130,7 +130,7 @@
       </service>
     </group>
 
-    <group name="HBASE" title="Update HBase Configuration">
+    <group xsi:type="cluster" name="HBASE" title="Update HBase Configuration">
       <skippable>true</skippable>
 
       <execute-stage service="HBASE" component="HBASE_MASTER" title="Update HBase Configuration">
@@ -574,12 +574,13 @@
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_4_0_0_namenode_ha_adjustments"/>
           <task xsi:type="configure" id="hdfs_log4j_parameterize" />
+          <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
         </pre-upgrade>
         
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
 
         <upgrade>
-          <task xsi:type="restart-task" />
+          <task xsi:type="restart-task" timeout-config="upgrade.parameter.nn-restart.timeout"/>
         </upgrade>
       </component>
 
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
index e0882d8..f7192ae 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
@@ -158,7 +158,7 @@
       </service>
     </group>
 
-    <group name="HBASE" title="Update HBase Configuration">
+    <group xsi:type="cluster" name="HBASE" title="Update HBase Configuration">
       <skippable>true</skippable>
 
       <execute-stage service="HBASE" component="HBASE_MASTER" title="Update HBase Configuration">
@@ -673,12 +673,13 @@
           <task xsi:type="configure" id="hdp_2_4_0_0_namenode_ha_adjustments"/>
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hdfs_audit_db" />
           <task xsi:type="configure" id="hdfs_log4j_parameterize" />
+          <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
         </pre-upgrade>
 
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
 
         <upgrade>
-          <task xsi:type="restart-task" />
+          <task xsi:type="restart-task" timeout-config="upgrade.parameter.nn-restart.timeout"/>
         </upgrade>
       </component>
 
@@ -1091,6 +1092,7 @@
           <task xsi:type="configure" id="hdp_2_5_0_0_add_storm_security_configs" />
           <task xsi:type="configure" id="storm_worker_log4j_parameterize" />
           <task xsi:type="configure" id="storm_cluster_log4j_parameterize" />
+          <task xsi:type="configure" id="storm_worker_log4j_directory" />
         </pre-upgrade>
 
         <pre-downgrade>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
index 0f4efdc..d98bb53 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
@@ -160,7 +160,7 @@
       </service>
     </group>
 
-    <group name="HBASE" title="Update HBase Configuration">
+    <group xsi:type="cluster" name="HBASE" title="Update HBase Configuration">
       <skippable>true</skippable>
 
       <execute-stage service="HBASE" component="HBASE_MASTER" title="Update HBase Configuration">
@@ -682,12 +682,13 @@
           <task xsi:type="configure" id="hdfs_log4j_parameterize" />
           <task xsi:type="configure" id="hadoop_env_zkfc_security_opts" />
           <task xsi:type="configure" id="hdfs_securitylogger_additivity" />
+          <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
         </pre-upgrade>
 
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
 
         <upgrade>
-          <task xsi:type="restart-task" />
+          <task xsi:type="restart-task" timeout-config="upgrade.parameter.nn-restart.timeout"/>
         </upgrade>
       </component>
 
@@ -1127,6 +1128,7 @@
           <task xsi:type="configure" id="hdp_2_5_0_0_add_storm_security_configs" />
           <task xsi:type="configure" id="storm_worker_log4j_parameterize" />
           <task xsi:type="configure" id="storm_cluster_log4j_parameterize" />
+          <task xsi:type="configure" id="storm_worker_log4j_directory" />
         </pre-upgrade>
 
         <pre-downgrade>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
index 2345d08..b3d19d4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
@@ -271,6 +271,12 @@
             <regex-replace  key="content" find="hadoop.security.log.file=SecurityAuth.audit" replace-with="hadoop.security.log.file=SecurityAuth.audit&#10;log4j.additivity.SecurityLogger=false"/>
             <regex-replace  key="content" find="log4j.additivity.SecurityLogger=true" replace-with="log4j.additivity.SecurityLogger=false"/>
           </definition>
+
+          <definition xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics" summary="Prevent Garbage Collection Heuristics">
+            <type>hadoop-env</type>
+            <replace  key="content" find="-XX:+PrintGCDateStamps -Xms{{namenode_heapsize}}" replace-with="-XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}}"/>
+            <replace  key="content" find="-XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE}" replace-with="-XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE}"/>
+          </definition>
         </changes>
       </component>
     </service>
@@ -458,21 +464,24 @@
             <set key="storm_wrkr_out_maxbackupindex" value="4"/>
             <set key="storm_wrkr_err_maxfilesize" value="100"/>
             <set key="storm_wrkr_err_maxbackupindex" value="4"/>
-            <regex-replace key="content" find="\$\{sys:storm.log.dir}/\$\{sys:logfile.name}"
-                           replace-with="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}"/>
-            <regex-replace key="content" find="A1&quot;&#xA;&#009;&#009;fileName=&quot;\$\{sys:workers.artifacts}/\$\{sys:storm.id}/\$\{sys:worker.port}/\$\{sys:logfile.name}&quot;&#xA;&#009;&#009;filePattern=&quot;\$\{sys:workers.artifacts}/\$\{sys:storm.id}/\$\{sys:worker.port}/\$\{sys:logfile.name}.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;\$\{pattern}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;(?:[0-9]+) MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;([0-9]+)"
-                           replace-with="A1&quot;&#xA;&#009;&#009;fileName=&quot;${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}&quot;&#xA;&#009;&#009;filePattern=&quot;${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;${pattern}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;{{storm_wrkr_a1_maxfilesize}} MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;{{storm_wrkr_a1_maxbackupindex}}"/>
-            <regex-replace key="content" find="STDOUT&quot;&#xA;&#009;&#009;fileName=&quot;\$\{sys:workers.artifacts}/\$\{sys:storm.id}/\$\{sys:worker.port}/\$\{sys:logfile.name}.out&quot;&#xA;&#009;&#009;filePattern=&quot;\$\{sys:workers.artifacts}/\$\{sys:storm.id}/\$\{sys:worker.port}/\$\{sys:logfile.name}.out.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;\$\{patternNoTime}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;(?:[0-9]+) MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;([0-9]+)"
-                           replace-with="STDOUT&quot;&#xA;&#009;&#009;fileName=&quot;${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.out&quot;&#xA;&#009;&#009;filePattern=&quot;${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.out.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;${patternNoTime}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;{{storm_wrkr_out_maxfilesize}} MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;{{storm_wrkr_out_maxbackupindex}}"/>
-            <regex-replace key="content" find="STDERR&quot;&#xA;&#009;&#009;fileName=&quot;\$\{sys:workers.artifacts}/\$\{sys:storm.id}/\$\{sys:worker.port}/\$\{sys:logfile.name}.err&quot;&#xA;&#009;&#009;filePattern=&quot;\$\{sys:workers.artifacts}/\$\{sys:storm.id}/\$\{sys:worker.port}/\$\{sys:logfile.name}.err.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;\$\{patternNoTime}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;(?:[0-9]+) MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;([0-9]+)"
-                           replace-with="STDERR&quot;&#xA;&#009;&#009;fileName=&quot;${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.err&quot;&#xA;&#009;&#009;filePattern=&quot;${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.err.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;${patternNoTime}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;{{storm_wrkr_err_maxfilesize}} MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;{{storm_wrkr_err_maxbackupindex}}"/>
-          </definition>
+            <regex-replace key="content" find="}.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;\$\{pattern}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;(?:[0-9]+) MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;([0-9]+)"
+                           replace-with="}.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;${pattern}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;{{storm_wrkr_a1_maxfilesize}} MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;{{storm_wrkr_a1_maxbackupindex}}"/>
+            <regex-replace key="content" find="}.out.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;\$\{patternNoTime}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;(?:[0-9]+) MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;([0-9]+)"
+                           replace-with="}.out.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;${patternNoTime}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;{{storm_wrkr_out_maxfilesize}} MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;{{storm_wrkr_out_maxbackupindex}}"/>
+            <regex-replace key="content" find="}.err.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;\$\{patternNoTime}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;(?:[0-9]+) MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;([0-9]+)"
+                           replace-with="}.err.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;${patternNoTime}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;{{storm_wrkr_err_maxfilesize}} MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;{{storm_wrkr_err_maxbackupindex}}"/>
+           </definition>
           <definition xsi:type="configure" id="storm_cluster_log4j_parameterize" summary="Parameterizing Storm Cluster Log4J Properties">
             <type>storm-cluster-log4j</type>
             <set key="storm_a1_maxfilesize" value="100"/>
             <set key="storm_a1_maxbackupindex" value="9"/>
-            <regex-replace key="content" find="A1&quot; immediateFlush=&quot;false&quot;&#xA;                 fileName=&quot;\$\{sys:storm.log.dir}/\$\{sys:logfile.name}&quot;&#xA;                 filePattern=&quot;\$\{sys:storm.log.dir}/\$\{sys:logfile.name}.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;\$\{pattern}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;(?:[0-9]+) MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;([0-9]+)"
-                           replace-with="A1&quot; immediateFlush=&quot;false&quot;&#xA;                 fileName=&quot;${sys:storm.log.dir}/${sys:logfile.name}&quot;&#xA;                 filePattern=&quot;${sys:storm.log.dir}/${sys:logfile.name}.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;${pattern}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;{{storm_a1_maxfilesize}} MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;{{storm_a1_maxbackupindex}}"/>
+            <regex-replace key="content" find="A1&quot;&#xA;                 fileName=&quot;\$\{sys:storm.log.dir}/\$\{sys:logfile.name}&quot;&#xA;                 filePattern=&quot;\$\{sys:storm.log.dir}/\$\{sys:logfile.name}.%i&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;\$\{pattern}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;(?:[0-9]+) MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;([0-9]+)"
+                           replace-with="A1&quot;&#xA;                 fileName=&quot;${sys:storm.log.dir}/${sys:logfile.name}&quot;&#xA;                 filePattern=&quot;${sys:storm.log.dir}/${sys:logfile.name}.%i&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;${pattern}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;{{storm_a1_maxfilesize}} MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;{{storm_a1_maxbackupindex}}"/>
+          </definition>
+          <definition xsi:type="configure" id="storm_worker_log4j_directory" summary="Update Storm log directory">
+            <type>storm-worker-log4j</type>
+            <replace key="content" find="${sys:storm.log.dir}/${sys:logfile.name}"
+                     replace-with="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}"/>
           </definition>
         </changes>
       </component>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml
index 13f44fb..4e66b7c 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml
@@ -251,6 +251,11 @@
       <direction>UPGRADE</direction>   <!--  prevent config changes on downgrade -->
       <skippable>true</skippable>  <!-- May fix configuration problems manually -->
 
+      <!-- HDFS -->
+      <execute-stage service="HDFS" component="NAMENODE" title="Prevent Garbage Collection Heuristics">
+        <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+      </execute-stage>
+
       <!--YARN-->
       <execute-stage service="MAPREDUCE2" component="MAPREDUCE2_CLIENT" title="Apply config changes for Mapreduce2 client">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath">
@@ -325,16 +330,6 @@
       </service>
     </group>
 
-    <group xsi:type="restart" name="RANGER_KMS" title="Ranger KMS">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <parallel-scheduler/>
-      <service name="RANGER_KMS">
-        <component>RANGER_KMS_SERVER</component>
-      </service>
-    </group>
-
     <group xsi:type="restart" name="HDFS" title="HDFS">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -373,6 +368,16 @@
       </execute-stage>
     </group>
 
+    <group xsi:type="restart" name="RANGER_KMS" title="Ranger KMS">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <supports-auto-skip-failure>false</supports-auto-skip-failure>
+      <parallel-scheduler/>
+      <service name="RANGER_KMS">
+        <component>RANGER_KMS_SERVER</component>
+      </service>
+    </group>
+
     <group xsi:type="restart" name="KAFKA" title="Kafka">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -731,30 +736,6 @@
       </component>
     </service>
 
-    <service name="RANGER_KMS">
-      <component name="RANGER_KMS_SERVER">
-        <pre-upgrade>
-          <task xsi:type="execute" hosts="any" sequential="true">
-            <summary>Upgrading Ranger KMS database schema</summary>
-            <script>scripts/kms_server.py</script>
-            <function>setup_ranger_kms_database</function>
-          </task>
-        </pre-upgrade>
-
-        <pre-downgrade>
-          <task xsi:type="execute" hosts="any" sequential="true">
-            <summary>Downgrading Ranger KMS database schema</summary>
-            <script>scripts/kms_server.py</script>
-            <function>setup_ranger_kms_database</function>
-          </task>
-        </pre-downgrade>
-
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-    </service>
-
     <service name="HDFS">
       <component name="NAMENODE">
         <upgrade>
@@ -799,6 +780,30 @@
       </component>
     </service>
 
+    <service name="RANGER_KMS">
+      <component name="RANGER_KMS_SERVER">
+        <pre-upgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Upgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-upgrade>
+
+        <pre-downgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Downgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-downgrade>
+
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+    </service>
+
     <service name="KAFKA">
       <component name="KAFKA_BROKER">
         <upgrade>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
index ea261b9..9969a59 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
@@ -379,6 +379,10 @@
         </task>
       </execute-stage>
 
+      <execute-stage service="HDFS" component="NAMENODE" title="Prevent Garbage Collection Heuristics">
+        <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+      </execute-stage>
+
       <!-- SQOOP -->
       <execute-stage service="SQOOP" component="SQOOP" title="Apply config changes for Sqoop to remove Atlas Configs">
         <!-- Remove Atlas configs that were incorrectly added to sqoop-site instead of Atlas' application.properties. -->
@@ -491,6 +495,11 @@
           <summary>Updating the Storm cluster Log4J properties to include parameterizations</summary>
         </task>
       </execute-stage>
+      <execute-stage service="STORM" component="NIMBUS" title="Update Storm log directory">
+        <task xsi:type="configure" id="storm_worker_log4j_directory">
+          <summary>Update Storm log directory in storm worker log4j</summary>
+        </task>
+      </execute-stage>
 
 
       <!-- KAFKA -->
@@ -592,16 +601,6 @@
       </service>
     </group>
 
-    <group xsi:type="restart" name="RANGER_KMS" title="Ranger KMS">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <parallel-scheduler/>
-      <service name="RANGER_KMS">
-        <component>RANGER_KMS_SERVER</component>
-      </service>
-    </group>
-
     <group xsi:type="restart" name="HDFS" title="HDFS">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -640,6 +639,16 @@
       </execute-stage>
     </group>
 
+    <group xsi:type="restart" name="RANGER_KMS" title="Ranger KMS">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <supports-auto-skip-failure>false</supports-auto-skip-failure>
+      <parallel-scheduler/>
+      <service name="RANGER_KMS">
+        <component>RANGER_KMS_SERVER</component>
+      </service>
+    </group>
+
     <group xsi:type="restart" name="KAFKA" title="Kafka">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -998,30 +1007,6 @@
       </component>
     </service>
 
-    <service name="RANGER_KMS">
-      <component name="RANGER_KMS_SERVER">
-        <pre-upgrade>
-          <task xsi:type="execute" hosts="any" sequential="true">
-            <summary>Upgrading Ranger KMS database schema</summary>
-            <script>scripts/kms_server.py</script>
-            <function>setup_ranger_kms_database</function>
-          </task>
-        </pre-upgrade>
-
-        <pre-downgrade>
-          <task xsi:type="execute" hosts="any" sequential="true">
-            <summary>Downgrading Ranger KMS database schema</summary>
-            <script>scripts/kms_server.py</script>
-            <function>setup_ranger_kms_database</function>
-          </task>
-        </pre-downgrade>
-
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-    </service>
-
     <service name="HDFS">
       <component name="NAMENODE">
         <upgrade>
@@ -1066,6 +1051,30 @@
       </component>
     </service>
 
+    <service name="RANGER_KMS">
+      <component name="RANGER_KMS_SERVER">
+        <pre-upgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Upgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-upgrade>
+
+        <pre-downgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Downgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-downgrade>
+
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+    </service>
+
     <service name="KAFKA">
       <component name="KAFKA_BROKER">
         <upgrade>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
index de6b8ef..4a2a502 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
@@ -435,20 +435,22 @@
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hdfs_audit_db"/>
       </execute-stage>
 
-      <!--HDFS-->
       <execute-stage service="HDFS" component="NAMENODE" title="Parameterizing Hdfs Log4J Properties">
         <task xsi:type="configure" id="hdfs_log4j_parameterize">
           <summary>Updating the Hdfs Log4J properties to include parameterizations</summary>
         </task>
       </execute-stage>
 
-      <!--HDFS-->
       <execute-stage service="HDFS" component="NAMENODE" title="Adding HDFS ZKFC Security ACLs">
         <task xsi:type="configure" id="hadoop_env_zkfc_security_opts">
           <summary>Adding HDFS ZKFC Security ACLs</summary>
         </task>
       </execute-stage>
 
+      <execute-stage service="HDFS" component="NAMENODE" title="Prevent Garbage Collection Heuristics">
+        <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+      </execute-stage>
+
       <!--SPARK-->
       <execute-stage service="SPARK" component="SPARK_CLIENT" title="Apply config changes for Spark">
         <task xsi:type="configure" id="hdp_2_5_0_0_spark_yarn_queue">
@@ -584,6 +586,11 @@
           <summary>Updating the Storm cluster Log4J properties to include parameterizations</summary>
         </task>
       </execute-stage>
+      <execute-stage service="STORM" component="NIMBUS" title="Update Storm log directory">
+        <task xsi:type="configure" id="storm_worker_log4j_directory">
+          <summary>Update Storm log directory in storm worker log4j</summary>
+        </task>
+      </execute-stage>
 
       <!-- KAFKA -->
       <execute-stage service="KAFKA" component="KAFKA_BROKER" title="Apply config changes for Kafka Broker">
@@ -687,16 +694,6 @@
       </service>
     </group>
 
-    <group xsi:type="restart" name="RANGER_KMS" title="Ranger KMS">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <parallel-scheduler/>
-      <service name="RANGER_KMS">
-        <component>RANGER_KMS_SERVER</component>
-      </service>
-    </group>
-
     <group xsi:type="restart" name="HDFS" title="HDFS">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -735,6 +732,16 @@
       </execute-stage>
     </group>
 
+    <group xsi:type="restart" name="RANGER_KMS" title="Ranger KMS">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <supports-auto-skip-failure>false</supports-auto-skip-failure>
+      <parallel-scheduler/>
+      <service name="RANGER_KMS">
+        <component>RANGER_KMS_SERVER</component>
+      </service>
+    </group>
+
     <group xsi:type="restart" name="KAFKA" title="Kafka">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -1093,30 +1100,6 @@
       </component>
     </service>
 
-    <service name="RANGER_KMS">
-      <component name="RANGER_KMS_SERVER">
-        <pre-upgrade>
-          <task xsi:type="execute" hosts="any" sequential="true">
-            <summary>Upgrading Ranger KMS database schema</summary>
-            <script>scripts/kms_server.py</script>
-            <function>setup_ranger_kms_database</function>
-          </task>
-        </pre-upgrade>
-
-        <pre-downgrade>
-          <task xsi:type="execute" hosts="any" sequential="true">
-            <summary>Downgrading Ranger KMS database schema</summary>
-            <script>scripts/kms_server.py</script>
-            <function>setup_ranger_kms_database</function>
-          </task>
-        </pre-downgrade>
-
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-    </service>
-
     <service name="HDFS">
       <component name="NAMENODE">
         <upgrade>
@@ -1161,6 +1144,30 @@
       </component>
     </service>
 
+    <service name="RANGER_KMS">
+      <component name="RANGER_KMS_SERVER">
+        <pre-upgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Upgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-upgrade>
+
+        <pre-downgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Downgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-downgrade>
+
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+    </service>
+
     <service name="KAFKA">
       <component name="KAFKA_BROKER">
         <upgrade>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml
index d5e9a5b..80436bf 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml
@@ -523,8 +523,14 @@
 
     <service name="HDFS">
       <component name="NAMENODE">
+        <pre-upgrade>
+          <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+        </pre-upgrade>
+
+        <pre-downgrade/>
+
         <upgrade>
-          <task xsi:type="restart-task" />
+          <task xsi:type="restart-task" timeout-config="upgrade.parameter.nn-restart.timeout"/>
         </upgrade>
       </component>
 
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
index 350395c..ad9ae33 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
@@ -673,12 +673,13 @@
           <task xsi:type="configure" id="hdp_2_5_0_0_namenode_ha_adjustments"/>
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hdfs_audit_db" />
           <task xsi:type="configure" id="hdfs_log4j_parameterize" />
+          <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
         </pre-upgrade>
 
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
 
         <upgrade>
-          <task xsi:type="restart-task" />
+          <task xsi:type="restart-task" timeout-config="upgrade.parameter.nn-restart.timeout"/>
         </upgrade>
       </component>
 
@@ -1044,6 +1045,7 @@
           <task xsi:type="configure" id="increase_storm_zookeeper_timeouts"/>
           <task xsi:type="configure" id="storm_worker_log4j_parameterize" />
           <task xsi:type="configure" id="storm_cluster_log4j_parameterize" />
+          <task xsi:type="configure" id="storm_worker_log4j_directory" />
         </pre-upgrade>
 
         <pre-downgrade>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
index 9ac3d52..1eb9836 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
@@ -687,12 +687,13 @@
           <task xsi:type="configure" id="hdfs_log4j_parameterize" />
           <task xsi:type="configure" id="hadoop_env_zkfc_security_opts" />
           <task xsi:type="configure" id="hdfs_securitylogger_additivity" />
+          <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
         </pre-upgrade>
 
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
 
         <upgrade>
-          <task xsi:type="restart-task" />
+          <task xsi:type="restart-task" timeout-config="upgrade.parameter.nn-restart.timeout"/>
         </upgrade>
       </component>
 
@@ -1089,6 +1090,7 @@
           <task xsi:type="configure" id="increase_storm_zookeeper_timeouts"/>
           <task xsi:type="configure" id="storm_worker_log4j_parameterize" />
           <task xsi:type="configure" id="storm_cluster_log4j_parameterize" />
+          <task xsi:type="configure" id="storm_worker_log4j_directory" />
         </pre-upgrade>
 
         <pre-downgrade>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/atlas-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/atlas-env.xml
index bd0da9f..ebebf82 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/atlas-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/atlas-env.xml
@@ -136,7 +136,7 @@
 
       # Where do you want to expand the war file. By Default it is in /server/webapp dir under the base install dir.
       export ATLAS_EXPANDED_WEBAPP_DIR={{expanded_war_dir}}
-      export ATLAS_SERVER_OPTS="-server -XX:SoftRefLRUPolicyMSPerMB=0 -XX:+CMSClassUnloadingEnabled -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+PrintTenuringDistribution -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=$ATLAS_LOG_DIR/atlas_server.hprof -Xloggc:$ATLAS_LOG_DIRgc-worker.log -verbose:gc -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=1m -XX:+PrintGCDetails -XX:+PrintHeapAtGC -XX:+PrintGCTimeStamps"
+      export ATLAS_SERVER_OPTS="-server -XX:SoftRefLRUPolicyMSPerMB=0 -XX:+CMSClassUnloadingEnabled -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+PrintTenuringDistribution -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=$ATLAS_LOG_DIR/atlas_server.hprof -Xloggc:$ATLAS_LOG_DIR/gc-worker.log -verbose:gc -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=1m -XX:+PrintGCDetails -XX:+PrintHeapAtGC -XX:+PrintGCTimeStamps"
       {% if java_version == 8 %}
       export ATLAS_SERVER_HEAP="-Xms{{atlas_server_xmx}}m -Xmx{{atlas_server_xmx}}m -XX:MaxNewSize={{atlas_server_max_new_size}}m -XX:MetaspaceSize=100m -XX:MaxMetaspaceSize=512m"
       {% else %}
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
index 2a1113a..3337e8e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
@@ -1152,6 +1152,7 @@
                   "{2}".format(llap_mem_daemon_size, llap_mem_for_tezAm_and_daemons, tez_am_memory_required))
 
     llap_daemon_mem_per_node = self._normalizeDown(llap_mem_daemon_size / num_llap_nodes_requested, yarn_min_container_size)
+    # This value takes into account total cluster capacity, and may not have left enough capcaity on each node to launch an AM.
     self.logger.info("DBG: Calculated 'llap_daemon_mem_per_node' : {0}, using following : llap_mem_daemon_size : {1}, num_llap_nodes_requested : {2}, "
                   "yarn_min_container_size: {3}".format(llap_daemon_mem_per_node, llap_mem_daemon_size, num_llap_nodes_requested, yarn_min_container_size))
     if llap_daemon_mem_per_node == 0:
@@ -1171,6 +1172,31 @@
       num_llap_nodes = num_llap_nodes_requested
       self.logger.info("DBG: num_llap_nodes : {0}".format(num_llap_nodes))
 
+    # Make sure we have enough memory on each node to run AMs.
+    # If nodes vs nodes_requested is different - AM memory is already factored in.
+    # If llap_node_count < total_cluster_nodes - assuming AMs can run on a different node.
+    # Else factor in min_concurrency_per_node * tez_am_size, and slider_am_size
+    # Also needs to factor in whether num_llap_nodes = cluster_node_count
+    min_mem_reserved_per_node = 0
+    if num_llap_nodes == num_llap_nodes_requested and num_llap_nodes == node_manager_cnt:
+      min_mem_reserved_per_node = max(normalized_tez_am_container_size, slider_am_container_size)
+      tez_AMs_per_node = llap_concurrency / num_llap_nodes
+      tez_AMs_per_node_low = int(math.floor(tez_AMs_per_node))
+      tez_AMs_per_node_high = int(math.ceil(tez_AMs_per_node))
+      min_mem_reserved_per_node = int(max(tez_AMs_per_node_high * normalized_tez_am_container_size, tez_AMs_per_node_low * normalized_tez_am_container_size + slider_am_container_size))
+      self.logger.info("DBG: Determined 'AM reservation per node': {0}, using following : concurrency: {1}, num_llap_nodes: {2}, AMsPerNode: {3}"
+        .format(min_mem_reserved_per_node, llap_concurrency, num_llap_nodes,  tez_AMs_per_node))
+
+    max_single_node_mem_available_for_daemon = self._normalizeDown(yarn_nm_mem_in_mb_normalized - min_mem_reserved_per_node, yarn_min_container_size)
+    if max_single_node_mem_available_for_daemon <=0 or max_single_node_mem_available_for_daemon < mem_per_thread_for_llap:
+      self.logger.warning("Not enough capacity available per node for daemons after factoring in AM memory requirements. NM Mem: {0}, "
+      "minAMMemPerNode: {1}, available: {2}".format(yarn_nm_mem_in_mb_normalized, min_mem_reserved_per_node, max_single_node_mem_available_for_daemon))
+      self.recommendDefaultLlapConfiguration(configurations, services, hosts)
+
+    llap_daemon_mem_per_node = min(max_single_node_mem_available_for_daemon, llap_daemon_mem_per_node)
+    self.logger.info("DBG: Determined final memPerDaemon: {0}, using following: concurrency: {1}, numNMNodes: {2}, numLlapNodes: {3} "
+      .format(llap_daemon_mem_per_node, llap_concurrency, node_manager_cnt, num_llap_nodes))
+
     num_executors_per_node_max = self.get_max_executors_per_node(yarn_nm_mem_in_mb_normalized, cpu_per_nm_host, mem_per_thread_for_llap)
     if num_executors_per_node_max < 1:
       self.logger.warning("Calculated 'Max. Executors per Node' = {0}. Expected values >= 1.".format(num_executors_per_node_max))
@@ -1191,6 +1217,8 @@
     # Now figure out how much of the memory will be used by the executors, and how much will be used by the cache.
     total_mem_for_executors_per_node = num_executors_per_node * mem_per_thread_for_llap
     cache_mem_per_node = llap_daemon_mem_per_node - total_mem_for_executors_per_node
+    self.logger.info("DBG: Calculated 'Cache per node' : {0}, using following : llap_daemon_mem_per_node : {1}, total_mem_for_executors_per_node : {2}"
+                .format(cache_mem_per_node, llap_daemon_mem_per_node, total_mem_for_executors_per_node))
 
     tez_runtime_io_sort_mb = (long((0.8 * mem_per_thread_for_llap) / 3))
     tez_runtime_unordered_output_buffer_size = long(0.8 * 0.075 * mem_per_thread_for_llap)
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
index b70b9df..a29f74b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
@@ -41,11 +41,6 @@
                  if-type="storm-site"
                  if-value="15000" />
           </definition>
-          <definition xsi:type="configure" id="storm_worker_log4j_update_path" summary="Updating Storm Worker Log4J Path">
-            <type>storm-worker-log4j</type>
-            <regex-replace key="content" find="\$\{sys:storm.log.dir}/\$\{sys:logfile.name}"
-                           replace-with="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}"/>
-          </definition>
           <definition xsi:type="configure" id="storm_worker_log4j_parameterize" summary="Parameterizing Storm Worker Log4J Properties">
             <type>storm-worker-log4j</type>
             <set key="storm_wrkr_a1_maxfilesize" value="100"/>
@@ -54,14 +49,12 @@
             <set key="storm_wrkr_out_maxbackupindex" value="4"/>
             <set key="storm_wrkr_err_maxfilesize" value="100"/>
             <set key="storm_wrkr_err_maxbackupindex" value="4"/>
-            <regex-replace key="content" find="\$\{sys:storm.log.dir}/\$\{sys:logfile.name}"
-                           replace-with="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}"/>
-            <regex-replace key="content" find="A1&quot;&#xA;&#009;&#009;fileName=&quot;\$\{sys:workers.artifacts}/\$\{sys:storm.id}/\$\{sys:worker.port}/\$\{sys:logfile.name}&quot;&#xA;&#009;&#009;filePattern=&quot;\$\{sys:workers.artifacts}/\$\{sys:storm.id}/\$\{sys:worker.port}/\$\{sys:logfile.name}.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;\$\{pattern}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;(?:[0-9]+) MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;([0-9]+)"
-                                         replace-with="A1&quot;&#xA;&#009;&#009;fileName=&quot;${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}&quot;&#xA;&#009;&#009;filePattern=&quot;${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;${pattern}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;{{storm_wrkr_a1_maxfilesize}} MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;{{storm_wrkr_a1_maxbackupindex}}"/>
-            <regex-replace key="content" find="STDOUT&quot;&#xA;&#009;&#009;fileName=&quot;\$\{sys:workers.artifacts}/\$\{sys:storm.id}/\$\{sys:worker.port}/\$\{sys:logfile.name}.out&quot;&#xA;&#009;&#009;filePattern=&quot;\$\{sys:workers.artifacts}/\$\{sys:storm.id}/\$\{sys:worker.port}/\$\{sys:logfile.name}.out.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;\$\{patternNoTime}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;(?:[0-9]+) MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;([0-9]+)"
-                                         replace-with="STDOUT&quot;&#xA;&#009;&#009;fileName=&quot;${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.out&quot;&#xA;&#009;&#009;filePattern=&quot;${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.out.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;${patternNoTime}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;{{storm_wrkr_out_maxfilesize}} MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;{{storm_wrkr_out_maxbackupindex}}"/>
-            <regex-replace key="content" find="STDERR&quot;&#xA;&#009;&#009;fileName=&quot;\$\{sys:workers.artifacts}/\$\{sys:storm.id}/\$\{sys:worker.port}/\$\{sys:logfile.name}.err&quot;&#xA;&#009;&#009;filePattern=&quot;\$\{sys:workers.artifacts}/\$\{sys:storm.id}/\$\{sys:worker.port}/\$\{sys:logfile.name}.err.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;\$\{patternNoTime}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;(?:[0-9]+) MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;([0-9]+)"
-                                         replace-with="STDERR&quot;&#xA;&#009;&#009;fileName=&quot;${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.err&quot;&#xA;&#009;&#009;filePattern=&quot;${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}.err.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;${patternNoTime}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;{{storm_wrkr_err_maxfilesize}} MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;{{storm_wrkr_err_maxbackupindex}}"/>
+            <regex-replace key="content" find="}.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;\$\{pattern}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;(?:[0-9]+) MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;([0-9]+)"
+                                         replace-with="}.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;${pattern}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;{{storm_wrkr_a1_maxfilesize}} MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;{{storm_wrkr_a1_maxbackupindex}}"/>
+            <regex-replace key="content" find="}.out.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;\$\{patternNoTime}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;(?:[0-9]+) MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;([0-9]+)"
+                                         replace-with="}.out.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;${patternNoTime}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;{{storm_wrkr_out_maxfilesize}} MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;{{storm_wrkr_out_maxbackupindex}}"/>
+            <regex-replace key="content" find="}.err.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;\$\{patternNoTime}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;(?:[0-9]+) MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;([0-9]+)"
+                                         replace-with="}.err.%i.gz&quot;&gt;&#xA;        &lt;PatternLayout&gt;&#xA;            &lt;pattern&gt;${patternNoTime}&lt;/pattern&gt;&#xA;        &lt;/PatternLayout&gt;&#xA;        &lt;Policies&gt;&#xA;            &lt;SizeBasedTriggeringPolicy size=&quot;{{storm_wrkr_err_maxfilesize}} MB&quot;/&gt; &lt;!-- Or every 100 MB --&gt;&#xA;        &lt;/Policies&gt;&#xA;        &lt;DefaultRolloverStrategy max=&quot;{{storm_wrkr_err_maxbackupindex}}"/>
           </definition>
           <definition xsi:type="configure" id="storm_cluster_log4j_parameterize" summary="Parameterizing Storm Cluster Log4J Properties">
             <type>storm-cluster-log4j</type>
@@ -156,118 +149,128 @@
             <set key="atlas.kafka.zookeeper.session.timeout.ms" value="60000" if-type="application-properties" if-key="atlas.kafka.zookeeper.session.timeout.ms" if-key-state="present"/>
             <set key="atlas.audit.zookeeper.session.timeout.ms" value="60000" if-type="application-properties" if-key="atlas.audit.zookeeper.session.timeout.ms" if-key-state="present"/>
           </definition>
+          <definition xsi:type="configure" id="atlas_env_gc_worker" summary="Updating Atlas Env gc-worker configuration">
+            <type>atlas-env</type>
+            <replace key="content" find="-Xloggc:$ATLAS_LOG_DIRgc-worker.log" replace-with="-Xloggc:$ATLAS_LOG_DIR/gc-worker.log"/>
+          </definition>
         </changes>
       </component>
     </service>
-   <service name="OOZIE">
-    <component name="OOZIE_SERVER">
-      <changes>
-        <!-- Oozie Rolling properties for log4j need to be parameterized. -->
-        <definition xsi:type="configure" id="oozie_log4j_parameterize" summary="Parameterizing Oozie Log4J Properties">
-          <type>oozie-log4j</type>
-          <set key="oozie_log_maxhistory" value="720"/>
-          <regex-replace key="content" find="^log4j.appender.oozie.RollingPolicy.MaxHistory=([0-9]+)" replace-with="log4j.appender.oozie.RollingPolicy.MaxHistory={{oozie_log_maxhistory}}"/>
-        </definition>
-      </changes>
-    </component>
-  </service>
-  <service name="YARN">
-    <component name="RESOURCEMANAGER">
-      <changes>
-        <!-- Yarn Rolling properties for log4j need to be parameterized. -->
-        <definition xsi:type="configure" id="yarn_log4j_parameterize" summary="Parameterizing Yarn Log4J Properties">
-          <type>yarn-log4j</type>
-          <set key="yarn_rm_summary_log_max_backup_size" value="256"/>
-          <set key="yarn_rm_summary_log_number_of_backup_files" value="20"/>
-          <regex-replace key="content" find="^log4j.appender.RMSUMMARY.MaxFileSize=([0-9]+)MB" replace-with="log4j.appender.RMSUMMARY.MaxFileSize={{yarn_rm_summary_log_max_backup_size}}MB"/>
-          <regex-replace key="content" find="^log4j.appender.RMSUMMARY.MaxBackupIndex=([0-9]+)" replace-with="log4j.appender.RMSUMMARY.MaxBackupIndex={{yarn_rm_summary_log_number_of_backup_files}}"/>
-        </definition>
-        <definition xsi:type="configure" id="yarn_env_security_opts" summary="Adding YARN Security ACLs">
-          <type>yarn-env</type>
-          <insert key="content" value="{% if rm_security_opts is defined %} YARN_OPTS=&quot;{{rm_security_opts}} $YARN_OPTS&quot; {% endif %}" insert-type="append" newline-before="true" newline-after="true" />
-        </definition>
-        <definition xsi:type="configure" id="hdp_2_6_0_0_yarn_priority_utilization_underutilized_preemption">
-          <type>yarn-site</type>
-          <transfer operation="copy"
-                    from-key="yarn.resourcemanager.scheduler.monitor.enable"
-                    to-key="yarn.scheduler.capacity.ordering-policy.priority-utilization.underutilized-preemption.enabled"
-                    default-value="false"/>
-        </definition>
-        <definition xsi:type="configure" id="yarn_site_retained_log_count" summary="Updating Yarn retained file count for continuous Log Aggregation">
-          <type>yarn-site</type>
-          <set key="yarn.nodemanager.log-aggregation.num-log-files-per-app"
-               value="336" />
-        </definition>
-        <definition xsi:type="configure" id="hdp_2_6_0_0_service_check_queue_name">
-          <type>yarn-env</type>
-          <set key="service_check.queue.name" value="default" if-type="yarn-env" if-key="service_check.queue.name" if-key-state="absent"/>
-        </definition>
-        <definition xsi:type="configure" id="hdp_2_6_0_0_ats_scan_interval_default">
-          <type>yarn-site</type>
-          <set key="yarn.timeline-service.entity-group-fs-store.scan-interval-seconds" value="15"
-               if-type="yarn-site" if-key="yarn.timeline-service.entity-group-fs-store.scan-interval-seconds" if-value="60"/>
-        </definition>
-      </changes>
-    </component>
-  </service>
+     <service name="OOZIE">
+      <component name="OOZIE_SERVER">
+        <changes>
+          <!-- Oozie Rolling properties for log4j need to be parameterized. -->
+          <definition xsi:type="configure" id="oozie_log4j_parameterize" summary="Parameterizing Oozie Log4J Properties">
+            <type>oozie-log4j</type>
+            <set key="oozie_log_maxhistory" value="720"/>
+            <regex-replace key="content" find="^log4j.appender.oozie.RollingPolicy.MaxHistory=([0-9]+)" replace-with="log4j.appender.oozie.RollingPolicy.MaxHistory={{oozie_log_maxhistory}}"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+    <service name="YARN">
+      <component name="RESOURCEMANAGER">
+        <changes>
+          <!-- Yarn Rolling properties for log4j need to be parameterized. -->
+          <definition xsi:type="configure" id="yarn_log4j_parameterize" summary="Parameterizing Yarn Log4J Properties">
+            <type>yarn-log4j</type>
+            <set key="yarn_rm_summary_log_max_backup_size" value="256"/>
+            <set key="yarn_rm_summary_log_number_of_backup_files" value="20"/>
+            <regex-replace key="content" find="^log4j.appender.RMSUMMARY.MaxFileSize=([0-9]+)MB" replace-with="log4j.appender.RMSUMMARY.MaxFileSize={{yarn_rm_summary_log_max_backup_size}}MB"/>
+            <regex-replace key="content" find="^log4j.appender.RMSUMMARY.MaxBackupIndex=([0-9]+)" replace-with="log4j.appender.RMSUMMARY.MaxBackupIndex={{yarn_rm_summary_log_number_of_backup_files}}"/>
+          </definition>
+          <definition xsi:type="configure" id="yarn_env_security_opts" summary="Adding YARN Security ACLs">
+            <type>yarn-env</type>
+            <insert key="content" value="{% if rm_security_opts is defined %} YARN_OPTS=&quot;{{rm_security_opts}} $YARN_OPTS&quot; {% endif %}" insert-type="append" newline-before="true" newline-after="true" />
+          </definition>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_yarn_priority_utilization_underutilized_preemption">
+            <type>yarn-site</type>
+            <transfer operation="copy"
+                      from-key="yarn.resourcemanager.scheduler.monitor.enable"
+                      to-key="yarn.scheduler.capacity.ordering-policy.priority-utilization.underutilized-preemption.enabled"
+                      default-value="false"/>
+          </definition>
+          <definition xsi:type="configure" id="yarn_site_retained_log_count" summary="Updating Yarn retained file count for continuous Log Aggregation">
+            <type>yarn-site</type>
+            <set key="yarn.nodemanager.log-aggregation.num-log-files-per-app"
+                 value="336" />
+          </definition>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_service_check_queue_name">
+            <type>yarn-env</type>
+            <set key="service_check.queue.name" value="default" if-type="yarn-env" if-key="service_check.queue.name" if-key-state="absent"/>
+          </definition>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_ats_scan_interval_default">
+            <type>yarn-site</type>
+            <set key="yarn.timeline-service.entity-group-fs-store.scan-interval-seconds" value="15"
+                 if-type="yarn-site" if-key="yarn.timeline-service.entity-group-fs-store.scan-interval-seconds" if-value="60"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
 
-  <service name="MAPREDUCE2">
-    <component name="MAPREDUCE2_CLIENT">
-      <changes>
-        <definition xsi:type="configure" id="hdp_2_6_0_0_mapreduce_job_queuename">
-          <type>mapred-site</type>
-          <set key="mapreduce.job.queuename" value="default" if-type="mapred-site" if-key="mapreduce.job.queuename" if-key-state="absent"/>
-        </definition>
-      </changes>
-    </component>
-  </service>
+    <service name="MAPREDUCE2">
+      <component name="MAPREDUCE2_CLIENT">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_mapreduce_job_queuename">
+            <type>mapred-site</type>
+            <set key="mapreduce.job.queuename" value="default" if-type="mapred-site" if-key="mapreduce.job.queuename" if-key-state="absent"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
 
-  <service name="HDFS">
-    <component name="NAMENODE">
+    <service name="HDFS">
+      <component name="NAMENODE">
+        <changes>
+          <!-- HDFS Rolling properties for log4j need to be parameterized. -->
+          <definition xsi:type="configure" id="hdfs_log4j_parameterize" summary="Parameterizing Hdfs Log4J Properties">
+            <type>hdfs-log4j</type>
+            <set key="hadoop_log_max_backup_size" value="256"/>
+            <set key="hadoop_log_number_of_backup_files" value="10"/>
+            <set key="hadoop_security_log_max_backup_size" value="256"/>
+            <set key="hadoop_security_log_number_of_backup_files" value="20"/>
+            <regex-replace  key="content" find="log4j.appender.RFA.MaxFileSize=([0-9]+)MB" replace-with="log4j.appender.RFA.MaxFileSize={{hadoop_log_max_backup_size}}MB"/>
+            <regex-replace  key="content" find="log4j.appender.RFA.MaxBackupIndex=([0-9]+)" replace-with="log4j.appender.RFA.MaxBackupIndex={{hadoop_log_number_of_backup_files}}"/>
+            <regex-replace  key="content" find="hadoop.security.log.maxfilesize=([0-9]+)MB" replace-with="hadoop.security.log.maxfilesize={{hadoop_security_log_max_backup_size}}MB"/>
+            <regex-replace  key="content" find="hadoop.security.log.maxbackupindex=([0-9]+)" replace-with="hadoop.security.log.maxbackupindex={{hadoop_security_log_number_of_backup_files}}"/>
+          </definition>
+          <definition xsi:type="configure" id="hadoop_env_zkfc_security_opts" summary="Adding HDFS ZKFC Security ACLs">
+            <type>hadoop-env</type>
+            <insert key="content" value="{% if hadoop_zkfc_opts is defined %} export HADOOP_ZKFC_OPTS=&quot;{{hadoop_zkfc_opts}} $HADOOP_ZKFC_OPTS&quot; {% endif %}" insert-type="append" newline-before="true" newline-after="true" />
+          </definition>
+          <definition xsi:type="configure" id="hdfs_securitylogger_additivity" summary="Set additivity of SecurityLogger to false">
+            <type>hdfs-log4j</type>
+            <regex-replace  key="content" find="hadoop.security.log.file=SecurityAuth.audit" replace-with="hadoop.security.log.file=SecurityAuth.audit&#10;log4j.additivity.SecurityLogger=false"/>
+            <regex-replace  key="content" find="log4j.additivity.SecurityLogger=true" replace-with="log4j.additivity.SecurityLogger=false"/>
+          </definition>
+
+          <definition xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics" summary="Prevent Garbage Collection Heuristics">
+            <type>hadoop-env</type>
+            <replace  key="content" find="-XX:+PrintGCDateStamps -Xms{{namenode_heapsize}}" replace-with="-XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}}"/>
+            <replace  key="content" find="-XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE}" replace-with="-XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE}"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+    <service name="HBASE">
+      <component name="HBASE_MASTER">
       <changes>
-        <!-- HDFS Rolling properties for log4j need to be parameterized. -->
-        <definition xsi:type="configure" id="hdfs_log4j_parameterize" summary="Parameterizing Hdfs Log4J Properties">
-          <type>hdfs-log4j</type>
-          <set key="hadoop_log_max_backup_size" value="256"/>
-          <set key="hadoop_log_number_of_backup_files" value="10"/>
-          <set key="hadoop_security_log_max_backup_size" value="256"/>
-          <set key="hadoop_security_log_number_of_backup_files" value="20"/>
-          <regex-replace  key="content" find="log4j.appender.RFA.MaxFileSize=([0-9]+)MB" replace-with="log4j.appender.RFA.MaxFileSize={{hadoop_log_max_backup_size}}MB"/>
-          <regex-replace  key="content" find="log4j.appender.RFA.MaxBackupIndex=([0-9]+)" replace-with="log4j.appender.RFA.MaxBackupIndex={{hadoop_log_number_of_backup_files}}"/>
-          <regex-replace  key="content" find="hadoop.security.log.maxfilesize=([0-9]+)MB" replace-with="hadoop.security.log.maxfilesize={{hadoop_security_log_max_backup_size}}MB"/>
-          <regex-replace  key="content" find="hadoop.security.log.maxbackupindex=([0-9]+)" replace-with="hadoop.security.log.maxbackupindex={{hadoop_security_log_number_of_backup_files}}"/>
-        </definition>
-        <definition xsi:type="configure" id="hadoop_env_zkfc_security_opts" summary="Adding HDFS ZKFC Security ACLs">
-          <type>hadoop-env</type>
-          <insert key="content" value="{% if hadoop_zkfc_opts is defined %} export HADOOP_ZKFC_OPTS=&quot;{{hadoop_zkfc_opts}} $HADOOP_ZKFC_OPTS&quot; {% endif %}" insert-type="append" newline-before="true" newline-after="true" />
-        </definition>
-        <definition xsi:type="configure" id="hdfs_securitylogger_additivity" summary="Set additivity of SecurityLogger to false">
-          <type>hdfs-log4j</type>
-          <regex-replace  key="content" find="hadoop.security.log.file=SecurityAuth.audit" replace-with="hadoop.security.log.file=SecurityAuth.audit&#10;log4j.additivity.SecurityLogger=false"/>
-          <regex-replace  key="content" find="log4j.additivity.SecurityLogger=true" replace-with="log4j.additivity.SecurityLogger=false"/>
+      <!-- HBase Rolling properties for log4j need to be parameterized. -->
+        <definition xsi:type="configure" id="hbase_log4j_parameterize" summary="Parameterizing HBase Log4J Properties">
+            <type>hbase-log4j</type>
+            <set key="hbase_log_maxfilesize" value="256"/>
+            <set key="hbase_log_maxbackupindex" value="20"/>
+            <set key="hbase_security_log_maxfilesize" value="256"/>
+            <set key="hbase_security_log_maxbackupindex" value="20"/>
+            <regex-replace key="content" find="hbase.log.maxfilesize=([0-9]+)MB" replace-with="hbase.log.maxfilesize={{hbase_log_maxfilesize}}MB"/>
+            <regex-replace key="content" find="hbase.log.maxbackupindex=([0-9]+)" replace-with="hbase.log.maxbackupindex={{hbase_log_maxbackupindex}}"/>
+            <regex-replace key="content" find="hbase.security.log.maxfilesize=([0-9]+)MB" replace-with="hbase.security.log.maxfilesize={{hbase_security_log_maxfilesize}}MB"/>
+            <regex-replace key="content" find="hbase.security.log.maxbackupindex=([0-9]+)" replace-with="hbase.security.log.maxbackupindex={{hbase_security_log_maxbackupindex}}"/>
         </definition>
       </changes>
-    </component>
-  </service>
-  <service name="HBASE">
-    <component name="HBASE_MASTER">
-    <changes>
-    <!-- HBase Rolling properties for log4j need to be parameterized. -->
-      <definition xsi:type="configure" id="hbase_log4j_parameterize" summary="Parameterizing HBase Log4J Properties">
-          <type>hbase-log4j</type>
-          <set key="hbase_log_maxfilesize" value="256"/>
-          <set key="hbase_log_maxbackupindex" value="20"/>
-          <set key="hbase_security_log_maxfilesize" value="256"/>
-          <set key="hbase_security_log_maxbackupindex" value="20"/>
-          <regex-replace key="content" find="hbase.log.maxfilesize=([0-9]+)MB" replace-with="hbase.log.maxfilesize={{hbase_log_maxfilesize}}MB"/>
-          <regex-replace key="content" find="hbase.log.maxbackupindex=([0-9]+)" replace-with="hbase.log.maxbackupindex={{hbase_log_maxbackupindex}}"/>
-          <regex-replace key="content" find="hbase.security.log.maxfilesize=([0-9]+)MB" replace-with="hbase.security.log.maxfilesize={{hbase_security_log_maxfilesize}}MB"/>
-          <regex-replace key="content" find="hbase.security.log.maxbackupindex=([0-9]+)" replace-with="hbase.security.log.maxbackupindex={{hbase_security_log_maxbackupindex}}"/>
-      </definition>
-    </changes>
-    </component>
-  </service>
-  <service name="FALCON">
+      </component>
+    </service>
+    <service name="FALCON">
       <component name="FALCON_SERVER">
         <changes>
           <definition xsi:type="configure" id="falcon_log4j_parameterize" summary="Parameterizing Falcon Log4J Properties">
@@ -371,24 +374,24 @@
     </component>
     </service>
     <service name="KNOX">
-    <component name="KNOX_GATEWAY">
-    <changes>
-      <definition xsi:type="configure" id="knox_gateway_log4j_parameterize" summary="Parameterizing Knox Gateway Log4J Properties">
-        <type>gateway-log4j</type>
-        <set key="knox_gateway_log_maxfilesize" value="256"/>
-        <set key="knox_gateway_log_maxbackupindex" value="20"/>
-        <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxFileSize = {{knox_gateway_log_maxfilesize}}MB"/>
-        <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxBackupIndex = {{knox_gateway_log_maxbackupindex}}"/>
-        </definition>
-      <definition xsi:type="configure" id="knox_ldap_log4j_parameterize" summary="Parameterizing Knox Ldap Log4J Properties">
-        <type>ldap-log4j</type>
-        <set key="knox_ldap_log_maxfilesize" value="256"/>
-        <set key="knox_ldap_log_maxbackupindex" value="20"/>
-        <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxFileSize = {{knox_ldap_log_maxfilesize}}MB"/>
-        <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxBackupIndex = {{knox_ldap_log_maxbackupindex}}"/>
-      </definition>
-    </changes>
-    </component>
+      <component name="KNOX_GATEWAY">
+        <changes>
+          <definition xsi:type="configure" id="knox_gateway_log4j_parameterize" summary="Parameterizing Knox Gateway Log4J Properties">
+            <type>gateway-log4j</type>
+            <set key="knox_gateway_log_maxfilesize" value="256"/>
+            <set key="knox_gateway_log_maxbackupindex" value="20"/>
+            <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxFileSize = {{knox_gateway_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxBackupIndex = {{knox_gateway_log_maxbackupindex}}"/>
+            </definition>
+          <definition xsi:type="configure" id="knox_ldap_log4j_parameterize" summary="Parameterizing Knox Ldap Log4J Properties">
+            <type>ldap-log4j</type>
+            <set key="knox_ldap_log_maxfilesize" value="256"/>
+            <set key="knox_ldap_log_maxbackupindex" value="20"/>
+            <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxFileSize = {{knox_ldap_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxBackupIndex = {{knox_ldap_log_maxbackupindex}}"/>
+          </definition>
+        </changes>
+      </component>
     </service>
 
     <service name="PIG">
@@ -477,6 +480,25 @@
             <set key="hive.vectorized.groupby.maxentries" value="1000000"/>
           </definition>
 
+          <definition xsi:type="configure" id="llap_update_settings" summary="Update additional LLAP settings">
+            <type>hive-interactive-site</type>
+            <set key="hive.llap.task.scheduler.locality.delay" value="-1"/>
+            <set key="hive.mapjoin.hybridgrace.hashtable" value="false"/>
+            <set key="hive.merge.nway.joins" value="false"/>
+            <set key="hive.llap.daemon.rpc.port" value="0"/>
+          </definition>
+
+          <definition xsi:type="configure" id="llap_update_tez_settings" summary="Update additional LLAP-Tez settings">
+            <type>tez-interactive-site</type>
+            <set key="tez.runtime.shuffle.keep-alive.enabled" value="true"/>
+            <set key="tez.am.am-rm.heartbeat.interval-ms.max" value="10000"/>
+            <set key="tez.session.am.dag.submit.timeout.secs" value="1209600"/>
+            <set key="tez.runtime.enable.final-merge.in.output" value="false"/>
+            <set key="tez.am.task.reschedule.higher.priority" value="false"/>
+            <set key="tez.runtime.shuffle.connect.timeout" value ="30000"/>
+            <set key="tez.runtime.shuffle.read.timeout" value="30000"/>
+          </definition>
+
           <definition xsi:type="configure" id="hdp_2_6_0_0_copy_hive_tez_container_size_to_hiveInteractive">
             <type>hive-interactive-site</type>
             <transfer operation="copy" from-type="hive-site" from-key="hive.tez.container.size" to-key="hive.tez.container.size" default-value="682"  if-type="hive-interactive-site" if-key="hive.tez.container.size" if-key-state="absent"/>
@@ -487,6 +509,16 @@
             <transfer operation="delete" delete-key="atlas.cluster.name"/>
           </definition>
 
+          <definition xsi:type="configure" id="llap_append_stack_size_java_opts" summary="Update JVM stack size for LLAP">
+            <type>hive-interactive-env</type>
+            <insert key="llap_java_opts" value="-Xss512k" insert-type="append" newline-before="false" newline-after="false" />
+          </definition>
+
+          <definition xsi:type="configure" id="llap_update_shuffle_parallel_copies" summary="Update tez shuffle parallel copies for LLAP">
+            <type>hive-interactive-site</type>
+            <set key="tez.runtime.shuffle.parallel.copies" value="8"/>
+          </definition>
+
         </changes>
 
       </component>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/host-upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/host-upgrade-2.5.xml
index c180b61..a16ef69 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/host-upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/host-upgrade-2.5.xml
@@ -179,30 +179,6 @@
       </component>
     </service>
 
-    <service name="RANGER_KMS">
-      <component name="RANGER_KMS_SERVER">
-        <pre-upgrade>
-          <task xsi:type="execute" hosts="any" sequential="true">
-            <summary>Upgrading Ranger KMS database schema</summary>
-            <script>scripts/kms_server.py</script>
-            <function>setup_ranger_kms_database</function>
-          </task>
-        </pre-upgrade>
-
-        <pre-downgrade>
-          <task xsi:type="execute" hosts="any" sequential="true">
-            <summary>Downgrading Ranger KMS database schema</summary>
-            <script>scripts/kms_server.py</script>
-            <function>setup_ranger_kms_database</function>
-          </task>
-        </pre-downgrade>
-
-        <upgrade>
-          <task xsi:type="restart-task" />
-        </upgrade>
-      </component>
-    </service>
-
     <service name="KAFKA">
       <component name="KAFKA_BROKER">
         <upgrade>
@@ -248,7 +224,31 @@
         </upgrade>
       </component>
     </service>
-    
+
+    <service name="RANGER_KMS">
+      <component name="RANGER_KMS_SERVER">
+        <pre-upgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Upgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-upgrade>
+
+        <pre-downgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Downgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-downgrade>
+
+        <upgrade>
+          <task xsi:type="restart-task" />
+        </upgrade>
+      </component>
+    </service>
+
     <service name="SLIDER">
       <component name="SLIDER">
         <upgrade>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml
index 0f05089..c8cb0b3 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml
@@ -271,6 +271,11 @@
       <direction>UPGRADE</direction>   <!--  prevent config changes on downgrade -->
       <skippable>true</skippable>  <!-- May fix configuration problems manually -->
 
+      <!-- HDFS -->
+      <execute-stage service="HDFS" component="NAMENODE" title="Prevent Garbage Collection Heuristics">
+        <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+      </execute-stage>
+
       <!--YARN-->
       <execute-stage service="MAPREDUCE2" component="MAPREDUCE2_CLIENT" title="Apply config changes for Mapreduce2 client">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath">
@@ -307,23 +312,23 @@
         <task xsi:type="configure" id="increase_storm_zookeeper_timeouts"/>
       </execute-stage>
 
-      <execute-stage service="STORM" component="NIMBUS" title="Updating Storm Worker Log4J Path">
-        <task xsi:type="configure" id="storm_worker_log4j_update_path">
-          <summary>Updating the Storm worker Log4J path</summary>
-        </task>
-      </execute-stage>
-
       <execute-stage service="ATLAS" component="ATLAS_SERVER" title="Updating Atlas TLS Exclude Protocols">
         <task xsi:type="configure" id="hdp_2_5_4_0_atlas_exclude_tls_protocol">
           <summary>Updating Atlas TLS Exclude Protocols to exclude TLS v1.2</summary>
         </task>
       </execute-stage>
 
-      <execute-stage service="ATLAS" component="ATLAS_SERVER" title="Updating Atlas zookeeper timeout valuesProtocols">
+      <execute-stage service="ATLAS" component="ATLAS_SERVER" title="Updating Atlas zookeeper timeout values">
         <task xsi:type="configure" id="increase_atlas_zookeeper_timeouts">
           <summary>Updating Atlas zookeeper timeout values</summary>
         </task>
       </execute-stage>
+
+      <execute-stage service="ATLAS" component="ATLAS_SERVER" title="Updating Atlas Env gc-worker configuration">
+        <task xsi:type="configure" id="atlas_env_gc_worker">
+          <summary>Updating Atlas Env gc-worker configuration</summary>
+        </task>
+      </execute-stage>
     </group>
 
     <!--
@@ -378,16 +383,6 @@
       </service>
     </group>
 
-    <group xsi:type="restart" name="RANGER_KMS" title="Ranger KMS">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <parallel-scheduler/>
-      <service name="RANGER_KMS">
-        <component>RANGER_KMS_SERVER</component>
-      </service>
-    </group>
-
     <group xsi:type="restart" name="HDFS" title="HDFS">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -426,6 +421,16 @@
       </execute-stage>
     </group>
 
+    <group xsi:type="restart" name="RANGER_KMS" title="Ranger KMS">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <supports-auto-skip-failure>false</supports-auto-skip-failure>
+      <parallel-scheduler/>
+      <service name="RANGER_KMS">
+        <component>RANGER_KMS_SERVER</component>
+      </service>
+    </group>
+
     <group xsi:type="restart" name="KAFKA" title="Kafka">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -826,30 +831,6 @@
       </component>
     </service>
 
-    <service name="RANGER_KMS">
-      <component name="RANGER_KMS_SERVER">
-        <pre-upgrade>
-          <task xsi:type="execute" hosts="any" sequential="true">
-            <summary>Upgrading Ranger KMS database schema</summary>
-            <script>scripts/kms_server.py</script>
-            <function>setup_ranger_kms_database</function>
-          </task>
-        </pre-upgrade>
-
-        <pre-downgrade>
-          <task xsi:type="execute" hosts="any" sequential="true">
-            <summary>Downgrading Ranger KMS database schema</summary>
-            <script>scripts/kms_server.py</script>
-            <function>setup_ranger_kms_database</function>
-          </task>
-        </pre-downgrade>
-
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-    </service>
-
     <service name="HDFS">
       <component name="NAMENODE">
         <upgrade>
@@ -894,6 +875,30 @@
       </component>
     </service>
 
+    <service name="RANGER_KMS">
+      <component name="RANGER_KMS_SERVER">
+        <pre-upgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Upgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-upgrade>
+
+        <pre-downgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Downgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-downgrade>
+
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+    </service>
+
     <service name="MAPREDUCE2">
       <component name="HISTORYSERVER">
         <upgrade>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
index a6bc4fc..8c659ee 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
@@ -281,6 +281,10 @@
         <task xsi:type="configure" id="hdfs_securitylogger_additivity"/>
       </execute-stage>
 
+      <execute-stage service="HDFS" component="NAMENODE" title="Prevent Garbage Collection Heuristics">
+        <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+      </execute-stage>
+
       <!--YARN-->
       <execute-stage service="MAPREDUCE2" component="MAPREDUCE2_CLIENT" title="Apply config changes for Mapreduce2 client">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath">
@@ -465,6 +469,12 @@
         </task>
       </execute-stage>
 
+      <execute-stage service="ATLAS" component="ATLAS_SERVER" title="Updating Atlas Env gc-worker configuration">
+        <task xsi:type="configure" id="atlas_env_gc_worker">
+          <summary>Updating Atlas Env gc-worker configuration</summary>
+        </task>
+      </execute-stage>
+
       <!--KAFKA-->
       <execute-stage service="KAFKA" component="KAFKA_BROKER" title="Parameterizing Kafka Log4J Properties">
         <task xsi:type="configure" id="kafka_log4j_parameterize">
@@ -540,6 +550,42 @@
         <task xsi:type="configure" id="hdp_2_6_0_0_tez_append_heap_dump_options_for_tez_am"/>
       </execute-stage>
 
+      <execute-stage service="HIVE" component="HIVE_SERVER_INTERACTIVE" title="Update headroom for LLAP">
+        <task xsi:type="configure" id="llap_update_headroom">
+          <summary>Updating headroom for LLAP</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_SERVER_INTERACTIVE" title="Update Hash Aggregation settings for LLAP">
+        <task xsi:type="configure" id="llap_update_hashaggregation">
+          <summary>Updating Hash Aggregation settings for LLAP</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_SERVER_INTERACTIVE" title="Update additional LLAP settings">
+        <task xsi:type="configure" id="llap_update_settings">
+          <summary>Update additional LLAP settings</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_SERVER_INTERACTIVE" title="Update additional LLAP-Tez settings">
+        <task xsi:type="configure" id="llap_update_tez_settings">
+          <summary>Update additional LLAP-Tez settings</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_SERVER_INTERACTIVE" title="Update JVM stack size for LLAP">
+        <task xsi:type="configure" id="llap_append_stack_size_java_opts">
+          <summary>Updating JVM stack size for LLAP</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_SERVER_INTERACTIVE" title="Update tez shuffle parallel copies for LLAP">
+        <task xsi:type="configure" id="llap_update_shuffle_parallel_copies">
+          <summary>Updating tez shuffle parallel copies for LLAP</summary>
+        </task>
+      </execute-stage>
+
       <execute-stage service="HIVE" component="HIVE_SERVER" title="Copying hive.tez.container.size to HIVE SERVER INTERACTIVE's hive-site">
         <task xsi:type="configure" id="hdp_2_6_0_0_copy_hive_tez_container_size_to_hiveInteractive"/>
       </execute-stage>
@@ -650,16 +696,6 @@
       </service>
     </group>
 
-    <group xsi:type="restart" name="RANGER_KMS" title="Ranger KMS">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <parallel-scheduler/>
-      <service name="RANGER_KMS">
-        <component>RANGER_KMS_SERVER</component>
-      </service>
-    </group>
-
     <group xsi:type="restart" name="HDFS" title="HDFS">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -696,19 +732,16 @@
           <function>wait_for_safemode_off</function>
         </task>
       </execute-stage>
+    </group>
 
-      <execute-stage service="HIVE" component="HIVE_SERVER_INTERACTIVE" title="Update headroom for LLAP">
-        <task xsi:type="configure" id="llap_update_headroom">
-          <summary>Updating the Hive Log4J2 properties to include parameterizations</summary>
-        </task>
-      </execute-stage>
-
-      <execute-stage service="HIVE" component="HIVE_SERVER_INTERACTIVE" title="Update Hash Aggregation settings for LLAP">
-        <task xsi:type="configure" id="llap_update_hashaggregation">
-          <summary>Updating Hash Aggregation settings for LLAP</summary>
-        </task>
-      </execute-stage>
-
+    <group xsi:type="restart" name="RANGER_KMS" title="Ranger KMS">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <supports-auto-skip-failure>false</supports-auto-skip-failure>
+      <parallel-scheduler/>
+      <service name="RANGER_KMS">
+        <component>RANGER_KMS_SERVER</component>
+      </service>
     </group>
 
     <group xsi:type="restart" name="KAFKA" title="Kafka">
@@ -1142,30 +1175,6 @@
       </component>
     </service>
 
-    <service name="RANGER_KMS">
-      <component name="RANGER_KMS_SERVER">
-        <pre-upgrade>
-          <task xsi:type="execute" hosts="any" sequential="true">
-            <summary>Upgrading Ranger KMS database schema</summary>
-            <script>scripts/kms_server.py</script>
-            <function>setup_ranger_kms_database</function>
-          </task>
-        </pre-upgrade>
-
-        <pre-downgrade>
-          <task xsi:type="execute" hosts="any" sequential="true">
-            <summary>Downgrading Ranger KMS database schema</summary>
-            <script>scripts/kms_server.py</script>
-            <function>setup_ranger_kms_database</function>
-          </task>
-        </pre-downgrade>
-
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-    </service>
-
     <service name="HDFS">
       <component name="NAMENODE">
         <upgrade>
@@ -1210,6 +1219,30 @@
       </component>
     </service>
 
+    <service name="RANGER_KMS">
+      <component name="RANGER_KMS_SERVER">
+        <pre-upgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Upgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-upgrade>
+
+        <pre-downgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Downgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-downgrade>
+
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+    </service>
+
     <service name="MAPREDUCE2">
       <component name="HISTORYSERVER">
         <upgrade>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.5.xml
index 04a06e8..f3b1fa6 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.5.xml
@@ -573,8 +573,14 @@
 
     <service name="HDFS">
       <component name="NAMENODE">
+        <pre-upgrade>
+          <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+        </pre-upgrade>
+
+        <pre-downgrade/>
+
         <upgrade>
-          <task xsi:type="restart-task" />
+          <task xsi:type="restart-task" timeout-config="upgrade.parameter.nn-restart.timeout"/>
         </upgrade>
       </component>
 
@@ -881,7 +887,6 @@
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_empty_storm_topology_submission_notifier_plugin_class"/>
           <task xsi:type="configure" id="increase_storm_zookeeper_timeouts"/>
-          <task xsi:type="configure" id="storm_worker_log4j_update_path" />
         </pre-upgrade>
         
         <pre-downgrade />
@@ -953,6 +958,7 @@
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_5_4_0_atlas_exclude_tls_protocol"/>
           <task xsi:type="configure" id="increase_atlas_zookeeper_timeouts"/>
+          <task xsi:type="configure" id="atlas_env_gc_worker"/>
         </pre-upgrade>
         
         <pre-downgrade/>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
index 879fe0f..3054ca3 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
@@ -616,10 +616,11 @@
           <task xsi:type="configure" id="hdfs_log4j_parameterize" />
           <task xsi:type="configure" id="hadoop_env_zkfc_security_opts" />
           <task xsi:type="configure" id="hdfs_securitylogger_additivity" />
+          <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
         </pre-upgrade>
         <pre-downgrade />
         <upgrade>
-          <task xsi:type="restart-task" />
+          <task xsi:type="restart-task" timeout-config="upgrade.parameter.nn-restart.timeout"/>
         </upgrade>
       </component>
 
@@ -842,6 +843,10 @@
           <task xsi:type="configure" id="llap_cli_log4j2_parameterize"/>
           <task xsi:type="configure" id="llap_update_headroom"/>
           <task xsi:type="configure" id="llap_update_hashaggregation"/>
+          <task xsi:type="configure" id="llap_append_stack_size_java_opts"/>
+          <task xsi:type="configure" id="llap_update_shuffle_parallel_copies"/>
+          <task xsi:type="configure" id="llap_update_settings"/>
+          <task xsi:type="configure" id="llap_update_tez_settings"/>
         </pre-upgrade>
         
         <pre-downgrade />
@@ -1094,6 +1099,7 @@
           <task xsi:type="configure" id="atlas_log4j_parameterize" />
           <task xsi:type="configure" id="hdp_2_5_4_0_atlas_exclude_tls_protocol"/>
           <task xsi:type="configure" id="increase_atlas_zookeeper_timeouts"/>
+          <task xsi:type="configure" id="atlas_env_gc_worker"/>
         </pre-upgrade>
         <pre-downgrade />
         <upgrade>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/atlas-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/atlas-env.xml
new file mode 100644
index 0000000..d54cf15
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/atlas-env.xml
@@ -0,0 +1,41 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>atlas.admin.username</name>
+    <display-name>Admin username</display-name>
+    <description>Admin Login user</description>
+    <value>admin</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.admin.password</name>
+    <display-name>Admin password</display-name>
+    <description>Admin Login password</description>
+    <value>admin</value>
+    <property-type>PASSWORD</property-type>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/configuration/hadoop-metrics2.properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/configuration/hadoop-metrics2.properties.xml
index 4aadb83..84ea231 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/configuration/hadoop-metrics2.properties.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/configuration/hadoop-metrics2.properties.xml
@@ -86,6 +86,8 @@
 *.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}
 *.sink.timeline.protocol={{metric_collector_protocol}}
 *.sink.timeline.port={{metric_collector_port}}
+*.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+*.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 # HTTPS properties
 *.sink.timeline.truststore.path = {{metric_truststore_path}}
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml
index 0610833..cb4f947 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml
@@ -30,7 +30,7 @@
   -->
   <property>
     <name>hive.llap.task.scheduler.locality.delay</name>
-    <value>8000</value>
+    <value>-1</value>
     <description>
       Amount of time to wait (in ms) before allocating a request which contains location information,
       to a location other than the ones requested. Set to -1 for an infinite delay, 0
@@ -41,7 +41,7 @@
 
   <property>
     <name>hive.mapjoin.hybridgrace.hashtable</name>
-    <value>true</value>
+    <value>false</value>
     <description>Whether to use hybrid grace hash join as the join method for mapjoin.
       Applies to dynamically partitioned joins when running in LLAP, but not to regular
       broadcast(map) joins. hive.llap.enable.grace.join.in.llap is used for this.
@@ -103,4 +103,11 @@
     <on-ambari-upgrade add="false"/>
   </property>
 
+  <property>
+    <name>hive.merge.nway.joins</name>
+    <value>false</value>
+    <description>Merge adjacent joins into a single n-way join</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
 </configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/tez-interactive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/tez-interactive-site.xml
index ab1202a..2c9b272 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/tez-interactive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/tez-interactive-site.xml
@@ -102,4 +102,41 @@
     </description>
     <on-ambari-upgrade add="false"/>
   </property>
+  <property>
+    <name>tez.runtime.shuffle.keep-alive.enabled</name>
+    <value>true</value>
+    <description>Connection keep-alive for shuffle</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>tez.am.am-rm.heartbeat.interval-ms.max</name>
+    <value>10000</value>
+    <description>The heartbeat interval between the tez AM and YARN RM</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>tez.runtime.enable.final-merge.in.output</name>
+    <value>false</value>
+    <description>Whether to enable a map side merge of outputs</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>tez.am.task.reschedule.higher.priority</name>
+    <value>false</value>
+    <description>Whether rescheduled tasks should be treated at higher priority</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>tez.runtime.shuffle.connect.timeout</name>
+    <value>30000</value>
+    <description>Shuffle connect timeouts (ms)</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>tez.runtime.shuffle.read.timeout</name>
+    <value>30000</value>
+    <description>Shuffle read timeout (ms)</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
 </configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/STORM/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/STORM/metainfo.xml
index db5c8b8..49e00f7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/STORM/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/STORM/metainfo.xml
@@ -22,6 +22,10 @@
     <service>
       <name>STORM</name>
       <version>1.1.0</version>
+      <extends>common-services/STORM/1.1.0</extends>
+      <configuration-dependencies>
+        <config-type>application-properties</config-type>
+      </configuration-dependencies>
     </service>
   </services>
 </metainfo>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-env.xml
index d04c3c5..1b2ca68 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-env.xml
@@ -25,6 +25,24 @@
     <description>Set to false by default,  needs to be set to true in stacks that use Ranger Yarn Plugin</description>
     <on-ambari-upgrade add="true"/>
   </property>
+  <property>
+    <name>apptimelineserver_heapsize</name>
+    <value>1024</value>
+    <display-name>AppTimelineServer Java heap size</display-name>
+    <description>Max heapsize for AppTimelineServer using a numerical value in the scale of MB</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <unit>MB</unit>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.timeline-service.entity-group-fs-store.app-cache-size</name>
+      </property>
+    </depends-on>
+  </property>
   <!-- yarn-env.sh -->
   <property>
     <name>content</name>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-site.xml
index acdae65..cab0e65 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/configuration/yarn-site.xml
@@ -42,6 +42,18 @@
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
+    <name>yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round</name>
+    <value>0.1</value>
+    <description>This option controls the pace at which containers-marked-for-preemption are actually preempted in each period.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.monitor.capacity.preemption.natural_termination_factor</name>
+    <value>1</value>
+    <description>Similar to total_preemption_per_round, we can apply this factor to slowdown resource preemption after preemption-target is computed for each queue.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
     <name>yarn.timeline-service.entity-group-fs-store.app-cache-size</name>
     <value>10</value>
     <description>
@@ -89,4 +101,14 @@
     </depends-on>
     <on-ambari-upgrade add="false"/>
   </property>
+  <property>
+    <name>yarn.nodemanager.kill-escape.launch-command-line</name>
+    <value>slider-agent,LLAP</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.kill-escape.user</name>
+    <value>hive</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
 </configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
index 7881917..488562b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
@@ -16,6 +16,8 @@
 See the License for the specific language governing permissions and
 limitations under the License.
 """
+import math
+
 import json
 import re
 from resource_management.libraries.functions import format
@@ -38,11 +40,42 @@
         "HIVE": self.recommendHIVEConfigurations,
         "HBASE": self.recommendHBASEConfigurations,
         "YARN": self.recommendYARNConfigurations,
-        "KAFKA": self.recommendKAFKAConfigurations
+        "KAFKA": self.recommendKAFKAConfigurations,
+        "BEACON": self.recommendBEACONConfigurations
       }
       parentRecommendConfDict.update(childRecommendConfDict)
       return parentRecommendConfDict
 
+  def recommendBEACONConfigurations(self, configurations, clusterData, services, hosts):
+    beaconEnvProperties = self.getSiteProperties(services['configurations'], 'beacon-env')
+    putbeaconEnvProperty = self.putProperty(configurations, "beacon-env", services)
+
+    # database URL and driver class recommendations
+    if beaconEnvProperties and self.checkSiteProperties(beaconEnvProperties, 'beacon_store_driver') and self.checkSiteProperties(beaconEnvProperties, 'beacon_database'):
+      putbeaconEnvProperty('beacon_store_driver', self.getDBDriver(beaconEnvProperties['beacon_database']))
+    if beaconEnvProperties and self.checkSiteProperties(beaconEnvProperties, 'beacon_store_db_name', 'beacon_store_url') and self.checkSiteProperties(beaconEnvProperties, 'beacon_database'):
+      beaconServerHost = self.getHostWithComponent('BEACON', 'BEACON_SERVER', services, hosts)
+      beaconDBConnectionURL = beaconEnvProperties['beacon_store_url']
+      protocol = self.getProtocol(beaconEnvProperties['beacon_database'])
+      oldSchemaName = self.getOldValue(services, "beacon-env", "beacon_store_db_name")
+      oldDBType = self.getOldValue(services, "beacon-env", "beacon_database")
+      # under these if constructions we are checking if beacon server hostname available,
+      # if it's default db connection url with "localhost" or if schema name was changed or if db type was changed (only for db type change from default mysql to existing mysql)
+      # or if protocol according to current db type differs with protocol in db connection url(other db types changes)
+      if beaconServerHost is not None:
+        if (beaconDBConnectionURL and "//localhost" in beaconDBConnectionURL) or oldSchemaName or oldDBType or (protocol and beaconDBConnectionURL and not beaconDBConnectionURL.startswith(protocol)):
+          dbConnection = self.getDBConnectionStringBeacon(beaconEnvProperties['beacon_database']).format(beaconServerHost['Hosts']['host_name'], beaconEnvProperties['beacon_store_db_name'])
+          putbeaconEnvProperty('beacon_store_url', dbConnection)
+
+  def getDBConnectionStringBeacon(self, databaseType):
+    driverDict = {
+      'NEW DERBY DATABASE': 'jdbc:derby:${{beacon.data.dir}}/${{beacon.store.db.name}}-db;create=true',
+      'EXISTING MYSQL DATABASE': 'jdbc:mysql://{0}/{1}',
+      'EXISTING MYSQL / MARIADB DATABASE': 'jdbc:mysql://{0}/{1}',
+      'EXISTING ORACLE DATABASE': 'jdbc:oracle:thin:@//{0}:1521/{1}'
+    }
+    return driverDict.get(databaseType.upper())
+
   def recommendAtlasConfigurations(self, configurations, clusterData, services, hosts):
     super(HDP26StackAdvisor, self).recommendAtlasConfigurations(configurations, clusterData, services, hosts)
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
@@ -146,6 +179,7 @@
   def recommendYARNConfigurations(self, configurations, clusterData, services, hosts):
     super(HDP26StackAdvisor, self).recommendYARNConfigurations(configurations, clusterData, services, hosts)
     putYarnSiteProperty = self.putProperty(configurations, "yarn-site", services)
+    putYarnEnvProperty = self.putProperty(configurations, "yarn-env", services)
 
     if "yarn-site" in services["configurations"] and \
                     "yarn.resourcemanager.scheduler.monitor.enable" in services["configurations"]["yarn-site"]["properties"]:
@@ -155,6 +189,11 @@
       else:
         putYarnSiteProperty('yarn.scheduler.capacity.ordering-policy.priority-utilization.underutilized-preemption.enabled', "false")
 
+    # calculate total_preemption_per_round
+    total_preemption_per_round = str(round(max(float(1)/len(hosts['items']), 0.1),2))
+    putYarnSiteProperty('yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round', total_preemption_per_round)
+
+
     if 'yarn-env' in services['configurations'] and 'yarn_user' in services['configurations']['yarn-env']['properties']:
       yarn_user = services['configurations']['yarn-env']['properties']['yarn_user']
     else:
@@ -186,6 +225,83 @@
     else:
       self.logger.info("Not setting Yarn Repo user for Ranger.")
 
+
+    yarn_timeline_app_cache_size = None
+    host_mem = None
+    for host in hosts["items"]:
+      host_mem = host["Hosts"]["total_mem"]
+      break
+    # Check if 'yarn.timeline-service.entity-group-fs-store.app-cache-size' in changed configs.
+    changed_configs_has_ats_cache_size = self.isConfigPropertiesChanged(
+      services, "yarn-site", ['yarn.timeline-service.entity-group-fs-store.app-cache-size'], False)
+    # Check if it's : 1. 'apptimelineserver_heapsize' changed detected in changed-configurations)
+    # OR 2. cluster initialization (services['changed-configurations'] should be empty in this case)
+    if changed_configs_has_ats_cache_size:
+      yarn_timeline_app_cache_size = self.read_yarn_apptimelineserver_cache_size(services)
+    elif 0 == len(services['changed-configurations']):
+      # Fetch host memory from 1st host, to be used for ATS config calculations below.
+      if host_mem is not None:
+        yarn_timeline_app_cache_size = self.calculate_yarn_apptimelineserver_cache_size(host_mem)
+        putYarnSiteProperty('yarn.timeline-service.entity-group-fs-store.app-cache-size', yarn_timeline_app_cache_size)
+        self.logger.info("Updated YARN config 'yarn.timeline-service.entity-group-fs-store.app-cache-size' as : {0}, "
+                    "using 'host_mem' = {1}".format(yarn_timeline_app_cache_size, host_mem))
+      else:
+        self.logger.info("Couldn't update YARN config 'yarn.timeline-service.entity-group-fs-store.app-cache-size' as "
+                    "'host_mem' read = {0}".format(host_mem))
+
+    if yarn_timeline_app_cache_size is not None:
+      # Calculation for 'ats_heapsize' is in MB.
+      ats_heapsize = self.calculate_yarn_apptimelineserver_heapsize(host_mem, yarn_timeline_app_cache_size)
+      putYarnEnvProperty('apptimelineserver_heapsize', ats_heapsize) # Value in MB
+      self.logger.info("Updated YARN config 'apptimelineserver_heapsize' as : {0}, ".format(ats_heapsize))
+
+  """
+  Calculate YARN config 'apptimelineserver_heapsize' in MB.
+  """
+  def calculate_yarn_apptimelineserver_heapsize(self, host_mem, yarn_timeline_app_cache_size):
+    ats_heapsize = None
+    if host_mem < 4096:
+      ats_heapsize = 1024
+    else:
+      ats_heapsize = long(min(math.floor(host_mem/2), long(yarn_timeline_app_cache_size) * 500 + 3072))
+    return ats_heapsize
+
+  """
+  Calculates for YARN config 'yarn.timeline-service.entity-group-fs-store.app-cache-size', based on YARN's NodeManager size.
+  """
+  def calculate_yarn_apptimelineserver_cache_size(self, host_mem):
+    yarn_timeline_app_cache_size = None
+    if host_mem < 4096:
+      yarn_timeline_app_cache_size = 3
+    elif host_mem >= 4096 and host_mem < 8192:
+      yarn_timeline_app_cache_size = 7
+    elif host_mem >= 8192:
+      yarn_timeline_app_cache_size = 10
+    self.logger.info("Calculated and returning 'yarn_timeline_app_cache_size' : {0}".format(yarn_timeline_app_cache_size))
+    return yarn_timeline_app_cache_size
+
+
+  """
+  Reads YARN config 'yarn.timeline-service.entity-group-fs-store.app-cache-size'.
+  """
+  def read_yarn_apptimelineserver_cache_size(self, services):
+    """
+    :type services dict
+    :rtype str
+    """
+    yarn_ats_app_cache_size = None
+    yarn_ats_app_cache_size_config = "yarn.timeline-service.entity-group-fs-store.app-cache-size"
+    yarn_site_in_services = self.getServicesSiteProperties(services, "yarn-site")
+
+    if yarn_site_in_services and yarn_ats_app_cache_size_config in yarn_site_in_services:
+      yarn_ats_app_cache_size = yarn_site_in_services[yarn_ats_app_cache_size_config]
+      self.logger.info("'yarn.scheduler.minimum-allocation-mb' read from services as : {0}".format(yarn_ats_app_cache_size))
+
+    if not yarn_ats_app_cache_size:
+      self.logger.error("'{0}' was not found in the services".format(yarn_ats_app_cache_size_config))
+
+    return yarn_ats_app_cache_size
+
   def getMetadataConnectionString(self, database_type):
       driverDict = {
           'mysql': 'jdbc:mysql://{0}:{2}/{1}?createDatabaseIfNotExist=true',
@@ -349,8 +465,10 @@
       and ranger_usersync_properties['ranger.usersync.ldap.deltasync'].lower() == 'true'
     group_sync_enabled = 'ranger.usersync.group.searchenabled' in ranger_usersync_properties \
       and ranger_usersync_properties['ranger.usersync.group.searchenabled'].lower() == 'true'
+    usersync_source_ldap_enabled = 'ranger.usersync.source.impl.class' in ranger_usersync_properties \
+      and ranger_usersync_properties['ranger.usersync.source.impl.class'] == 'org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder'
 
-    if delta_sync_enabled and not group_sync_enabled:
+    if usersync_source_ldap_enabled and delta_sync_enabled and not group_sync_enabled:
       validationItems.append({"config-name": "ranger.usersync.group.searchenabled",
                             "item": self.getWarnItem(
                             "Need to set ranger.usersync.group.searchenabled as true, as ranger.usersync.ldap.deltasync is enabled")})
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
index ceed59b..1610bb5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
@@ -38,6 +38,12 @@
             <set key="ranger.plugin.hdfs.ambari.cluster.name" value="{{cluster_name}}"
               if-type="ranger-hdfs-plugin-properties" if-key="ranger-hdfs-plugin-enabled" if-key-state="present"/>
           </definition>
+
+          <definition xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics" summary="Prevent Garbage Collection Heuristics">
+            <type>hadoop-env</type>
+            <replace  key="content" find="-XX:+PrintGCDateStamps -Xms{{namenode_heapsize}}" replace-with="-XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}}"/>
+            <replace  key="content" find="-XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE}" replace-with="-XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE}"/>
+          </definition>
         </changes>
       </component>
     </service>
@@ -98,6 +104,26 @@
             <set key="ranger.plugin.yarn.ambari.cluster.name" value="{{cluster_name}}"
               if-type="ranger-yarn-plugin-properties" if-key="ranger-yarn-plugin-enabled" if-key-state="present"/>
           </definition>
+          <definition xsi:type="configure" id="hdp_2_6_yarn_preemption" summary="Resource Manager Preemption Settings">
+            <type>yarn-site</type>
+            <set key="yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round" value="0.1"
+              if-type="yarn-site"
+              if-key="yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round"
+              if-key-state="absent" />
+            <set key="yarn.resourcemanager.monitor.capacity.preemption.natural_termination_factor" value="1"
+              if-type="yarn-site"
+              if-key="yarn.resourcemanager.monitor.capacity.preemption.natural_termination_factor"
+              if-key-state="absent" />
+          </definition>
+        </changes>
+      </component>
+      <component name="NODEMANAGER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_llap_mem">
+            <type>yarn-site</type>
+            <set key="yarn.nodemanager.kill-escape.launch-command-line" value="slider-agent,LLAP"/>
+            <set key="yarn.nodemanager.kill-escape.user" value="hive"/>
+          </definition>
         </changes>
       </component>
     </service>
@@ -122,6 +148,10 @@
             <set key="ranger.plugin.atlas.ambari.cluster.name" value="{{cluster_name}}"
               if-type="ranger-atlas-plugin-properties" if-key="ranger-atlas-plugin-enabled" if-key-state="present"/>
           </definition>
+          <definition xsi:type="configure" id="atlas_env_gc_worker" summary="Updating Atlas Env gc-worker configuration">
+            <type>atlas-env</type>
+            <replace key="content" find="-Xloggc:$ATLAS_LOG_DIRgc-worker.log" replace-with="-Xloggc:$ATLAS_LOG_DIR/gc-worker.log"/>
+          </definition>
         </changes>
       </component>
     </service>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/host-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/host-upgrade-2.6.xml
index f923702..a5943c1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/host-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/host-upgrade-2.6.xml
@@ -174,30 +174,6 @@
       </component>
     </service>
 
-    <service name="RANGER_KMS">
-      <component name="RANGER_KMS_SERVER">
-        <pre-upgrade>
-          <task xsi:type="execute" hosts="any" sequential="true">
-            <summary>Upgrading Ranger KMS database schema</summary>
-            <script>scripts/kms_server.py</script>
-            <function>setup_ranger_kms_database</function>
-          </task>
-        </pre-upgrade>
-
-        <pre-downgrade>
-          <task xsi:type="execute" hosts="any" sequential="true">
-            <summary>Downgrading Ranger KMS database schema</summary>
-            <script>scripts/kms_server.py</script>
-            <function>setup_ranger_kms_database</function>
-          </task>
-        </pre-downgrade>
-
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-    </service>
-
     <service name="HDFS">
       <component name="NAMENODE">
         <upgrade>
@@ -242,6 +218,30 @@
       </component>
     </service>
 
+    <service name="RANGER_KMS">
+      <component name="RANGER_KMS_SERVER">
+        <pre-upgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Upgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-upgrade>
+
+        <pre-downgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Downgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-downgrade>
+
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+    </service>
+
     <service name="MAPREDUCE2">
       <component name="HISTORYSERVER">
         <upgrade>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
index ce07f7a..1cdd184 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
@@ -294,6 +294,10 @@
         <task xsi:type="configure" id="hdp_2_6_maint_ranger_hdfs_plugin_cluster_name"/>
       </execute-stage>
 
+      <execute-stage service="HDFS" component="NAMENODE" title="Prevent Garbage Collection Heuristics">
+        <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
+      </execute-stage>
+
       <!-- HIVE -->
       <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Ranger Hive plugin">
         <task xsi:type="configure" id="hdp_2_6_maint_ranger_hive_plugin_cluster_name"/>
@@ -318,6 +322,17 @@
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Apply config changes for Ranger Yarn plugin">
         <task xsi:type="configure" id="hdp_2_6_maint_ranger_yarn_plugin_cluster_name"/>
       </execute-stage>
+      
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Apply config changes for YARN Preemption">
+        <task xsi:type="configure" id="hdp_2_6_yarn_preemption"/>
+      </execute-stage>
+
+      <!-- YARN -->
+      <execute-stage service="YARN" component="NODEMANAGER" title="Apply config changes for YARN NM">
+        <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_llap_mem">
+          <summary>Updating YARN NodeManager config for LLAP</summary>
+        </task>
+      </execute-stage>
 
       <!-- KAFKA -->
       <execute-stage service="KAFKA" component="KAFKA_BROKER" title="Apply config changes for Ranger Kafka plugin">
@@ -329,6 +344,10 @@
         <task xsi:type="configure" id="hdp_2_6_maint_ranger_atlas_plugin_cluster_name"/>
       </execute-stage>
 
+      <execute-stage service="ATLAS" component="ATLAS_SERVER" title="Updating Atlas Env gc-worker configuration">
+        <task xsi:type="configure" id="atlas_env_gc_worker"/>
+      </execute-stage>
+
       <!-- KMS -->
       <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Apply config changes for Ranger Kms plugin">
         <task xsi:type="configure" id="hdp_2_6_maint_ranger_kms_plugin_cluster_name"/>
@@ -389,16 +408,6 @@
       </service>
     </group>
 
-    <group xsi:type="restart" name="RANGER_KMS" title="Ranger KMS">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <supports-auto-skip-failure>false</supports-auto-skip-failure>
-      <parallel-scheduler/>
-      <service name="RANGER_KMS">
-        <component>RANGER_KMS_SERVER</component>
-      </service>
-    </group>
-
     <group xsi:type="restart" name="HDFS" title="HDFS">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -437,6 +446,16 @@
       </execute-stage>
     </group>
 
+    <group xsi:type="restart" name="RANGER_KMS" title="Ranger KMS">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <supports-auto-skip-failure>false</supports-auto-skip-failure>
+      <parallel-scheduler/>
+      <service name="RANGER_KMS">
+        <component>RANGER_KMS_SERVER</component>
+      </service>
+    </group>
+
     <group xsi:type="restart" name="KAFKA" title="Kafka">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -873,30 +892,6 @@
       </component>
     </service>
 
-    <service name="RANGER_KMS">
-      <component name="RANGER_KMS_SERVER">
-        <pre-upgrade>
-          <task xsi:type="execute" hosts="any" sequential="true">
-            <summary>Upgrading Ranger KMS database schema</summary>
-            <script>scripts/kms_server.py</script>
-            <function>setup_ranger_kms_database</function>
-          </task>
-        </pre-upgrade>
-
-        <pre-downgrade>
-          <task xsi:type="execute" hosts="any" sequential="true">
-            <summary>Downgrading Ranger KMS database schema</summary>
-            <script>scripts/kms_server.py</script>
-            <function>setup_ranger_kms_database</function>
-          </task>
-        </pre-downgrade>
-
-        <upgrade>
-          <task xsi:type="restart-task"/>
-        </upgrade>
-      </component>
-    </service>
-
     <service name="HDFS">
       <component name="NAMENODE">
         <upgrade>
@@ -941,6 +936,30 @@
       </component>
     </service>
 
+    <service name="RANGER_KMS">
+      <component name="RANGER_KMS_SERVER">
+        <pre-upgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Upgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-upgrade>
+
+        <pre-downgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Downgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-downgrade>
+
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+    </service>
+
     <service name="MAPREDUCE2">
       <component name="HISTORYSERVER">
         <upgrade>
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
index fd72e4d..3e7e3d7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
@@ -607,10 +607,11 @@
       <component name="NAMENODE">
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_6_maint_ranger_hdfs_plugin_cluster_name"/>
+          <task xsi:type="configure" id="hdfs_namenode_prevent_gc_heuristics"/>
         </pre-upgrade>
         <pre-downgrade/> <!--  no-op to prevent config changes on downgrade -->
         <upgrade>
-          <task xsi:type="restart-task" />
+          <task xsi:type="restart-task" timeout-config="upgrade.parameter.nn-restart.timeout"/>
         </upgrade>
       </component>
 
@@ -685,6 +686,7 @@
       <component name="RESOURCEMANAGER">
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_6_maint_ranger_yarn_plugin_cluster_name"/>
+          <task xsi:type="configure" id="hdp_2_6_yarn_preemption" />
         </pre-upgrade>
         <pre-downgrade/> <!--  no-op to prevent config changes on downgrade -->
         <upgrade>
@@ -693,6 +695,10 @@
       </component>
 
       <component name="NODEMANAGER">
+        <pre-upgrade>
+          <task xsi:type="configure" id="hdp_2_6_0_0_yarn_nodemanager_llap_mem"/>
+        </pre-upgrade>
+        <pre-downgrade/> <!--  no-op to prevent config changes on downgrade -->
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>
@@ -1017,6 +1023,7 @@
       <component name="ATLAS_SERVER">
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_6_maint_ranger_atlas_plugin_cluster_name"/>
+          <task xsi:type="configure" id="atlas_env_gc_worker"/>
         </pre-upgrade>
         <pre-downgrade/> <!--  no-op to prevent config changes on downgrade -->
         <upgrade>
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
index 320872e..5d79084 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
@@ -109,8 +109,8 @@
   if len(parts) == 1:
     parts.append("")
 
-  users_list = parts[0].split(",") if parts[0] else []
-  groups_list = parts[1].split(",") if parts[1] else []
+  users_list = parts[0].strip(",").split(",") if parts[0] else []
+  groups_list = parts[1].strip(",").split(",") if parts[1] else []
 
   if users_list:
     User(users_list,
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
index 5a5361c..a3830f7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
@@ -105,7 +105,15 @@
 namenode_host = default("/clusterHostInfo/namenode_host", [])
 zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
 ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+
+cluster_name = config["clusterName"]
+set_instanceId = "false"
+if 'cluster-env' in config['configurations'] and \
+        'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
+  ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
+  set_instanceId = "true"
+else:
+  ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
 
 has_namenode = not len(namenode_host) == 0
 has_resourcemanager = not len(rm_host) == 0
@@ -131,8 +139,8 @@
 metric_collector_port = None
 if has_metric_collector:
   if 'cluster-env' in config['configurations'] and \
-      'metrics_collector_vip_port' in config['configurations']['cluster-env']:
-    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
+      'metrics_collector_external_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
   else:
     metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
     if metric_collector_web_address.find(':') != -1:
@@ -150,6 +158,8 @@
   pass
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
 
 # Cluster Zookeeper quorum
 zookeeper_quorum = None
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
index 2f3aab6..2cd9aa8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -75,6 +75,8 @@
 *.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}
 *.sink.timeline.protocol={{metric_collector_protocol}}
 *.sink.timeline.port={{metric_collector_port}}
+*.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+*.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 # HTTPS properties
 *.sink.timeline.truststore.path = {{metric_truststore_path}}
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/ATLAS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/ATLAS/metainfo.xml
new file mode 100644
index 0000000..e11b3ca
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/ATLAS/metainfo.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>ATLAS</name>
+            <version>0.7.0.3.0</version>
+            <extends>common-services/ATLAS/0.7.0.3.0</extends>
+        </service>
+    </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HBASE/metainfo.xml
new file mode 100644
index 0000000..04adb75
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HBASE/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <extends>common-services/HBASE/2.0.0.3.0</extends>
+    </service>
+  </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml
index fbda35a..9d504db 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml
@@ -165,6 +165,6 @@
     <value-attributes>
       <type>content</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 </configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/KAFKA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/KAFKA/metainfo.xml
new file mode 100644
index 0000000..d0326c2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/KAFKA/metainfo.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>KAFKA</name>
+            <version>0.10.0.3.0</version>
+            <extends>common-services/KAFKA/0.10.0.3.0</extends>
+        </service>
+    </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/KNOX/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/KNOX/metainfo.xml
new file mode 100644
index 0000000..d8054b1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/KNOX/metainfo.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>KNOX</name>
+            <version>0.5.0.3.0</version>
+            <extends>common-services/KNOX/0.5.0.3.0</extends>
+        </service>
+    </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/OOZIE/metainfo.xml
new file mode 100644
index 0000000..e1c73f1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/OOZIE/metainfo.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>OOZIE</name>
+            <version>4.2.0.3.0</version>
+            <extends>common-services/OOZIE/4.2.0.3.0</extends>
+        </service>
+    </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/RANGER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/RANGER/metainfo.xml
new file mode 100644
index 0000000..c8b3d65
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/RANGER/metainfo.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>RANGER</name>
+            <version>0.7.0.3.0</version>
+            <extends>common-services/RANGER/0.7.0.3.0</extends>
+        </service>
+    </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/RANGER_KMS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/RANGER_KMS/metainfo.xml
new file mode 100644
index 0000000..3375d90
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/RANGER_KMS/metainfo.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>RANGER_KMS</name>
+            <version>0.5.0.3.0</version>
+            <extends>common-services/RANGER_KMS/0.5.0.3.0</extends>
+        </service>
+    </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/SQOOP/metainfo.xml
new file mode 100644
index 0000000..757bce5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/SQOOP/metainfo.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>SQOOP</name>
+            <version>1.4.4.3.0</version>
+            <extends>common-services/SQOOP/1.4.4.3.0</extends>
+        </service>
+    </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/STORM/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/STORM/metainfo.xml
new file mode 100644
index 0000000..1833c6f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/STORM/metainfo.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>STORM</name>
+            <version>1.0.1.3.0</version>
+            <extends>common-services/STORM/1.0.1.3.0</extends>
+        </service>
+    </services>
+</metainfo>
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml
index deb4ef7..a143660 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml
@@ -44,6 +44,6 @@
     <value-attributes>
       <type>content</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 </configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml
index cbeb08b..6ce4d72 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml
@@ -25,7 +25,7 @@
       CLASSPATH for MR applications. A comma-separated list of CLASSPATH
       entries.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <!-- These configs were inherited from HDP 2.2 -->
@@ -37,13 +37,13 @@
       This is not an additive property. You must preserve the original value if
       you want your map and reduce tasks to have access to native libraries (compression, etc)
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.application.framework.path</name>
     <value>/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework</value>
     <description/>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.app.mapreduce.am.admin-command-opts</name>
@@ -61,18 +61,18 @@
       of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
       mapreduce.reduce.env config settings.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.admin.map.child.java.opts</name>
     <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
     <description/>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.admin.reduce.child.java.opts</name>
     <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
     <description/>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 </configuration>
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/configuration/hbase-logsearch-conf.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/configuration/hbase-logsearch-conf.xml
deleted file mode 100644
index 891445d..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/configuration/hbase-logsearch-conf.xml
+++ /dev/null
@@ -1,111 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>HBase</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>FAKEHBASE_MASTER:hbase_master;FAKEHBASE_REGIONSERVER:hbase_regionserver;FAKEPHOENIX_QUERY_SERVER:hbase_phoenix_server</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder Config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-  "input":[
-    {
-      "type":"hbase_master",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hbase-env/hbase_log_dir', '/var/log/hbase')}}/hbase-*-master-*.log"
-    },
-    {
-      "type":"hbase_regionserver",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hbase-env/hbase_log_dir', '/var/log/hbase')}}/hbase-*-regionserver-*.log"
-    },
-    {
-      "type":"hbase_phoenix_server",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hbase-env/hbase_log_dir', '/var/log/hbase')}}/phoenix-*-server.log"
-    }
-   ],
-  "filter":[
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "hbase_master",
-            "hbase_regionserver"
-          ]
-         }
-       },
-      "log4j_format":"%d{ISO8601} %-5p [%t] %c{2}: %m%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       }
-     },
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "hbase_phoenix_server"
-          ]
-         }
-      },
-      "log4j_format":"%d{ISO8601} %-5p [%t] %c{2}: %m%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       }
-     }
-   ]
- }
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/package/templates/input.config-hbase.json.j2 b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/package/templates/input.config-hbase.json.j2
new file mode 100644
index 0000000..94fbc64
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHBASE/package/templates/input.config-hbase.json.j2
@@ -0,0 +1,79 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"hbase_master",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hbase-env/hbase_log_dir', '/var/log/hbase')}}/hbase-*-master-*.log"
+    },
+    {
+      "type":"hbase_regionserver",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hbase-env/hbase_log_dir', '/var/log/hbase')}}/hbase-*-regionserver-*.log"
+    },
+    {
+      "type":"hbase_phoenix_server",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hbase-env/hbase_log_dir', '/var/log/hbase')}}/phoenix-*-server.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "hbase_master",
+            "hbase_regionserver"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} %-5p [%t] %c{2}: %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    },
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "hbase_phoenix_server"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} %-5p [%t] %c{2}: %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/hdfs-logsearch-conf.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/hdfs-logsearch-conf.xml
deleted file mode 100644
index 96abb55..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/configuration/hdfs-logsearch-conf.xml
+++ /dev/null
@@ -1,248 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>FAKEHDFS</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>FAKENAMENODE:hdfs_namenode;FAKEDATANODE:hdfs_datanode;SECONDARY_FAKENAMENODE:hdfs_secondarynamenode;FAKEJOURNALNODE:hdfs_journalnode;FAKEZKFC:hdfs_zkfc;FAKENFS_GATEWAY:hdfs_nfs3</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder Config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-  "input":[
-    {
-      "type":"hdfs_datanode",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-datanode-*.log"
-    },
-    {
-      "type":"hdfs_namenode",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-namenode-*.log"
-    },
-    {
-      "type":"hdfs_journalnode",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-journalnode-*.log"
-    },
-    {
-      "type":"hdfs_secondarynamenode",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-secondarynamenode-*.log"
-    },
-    {
-      "type":"hdfs_zkfc",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-zkfc-*.log"
-    },
-    {
-      "type":"hdfs_nfs3",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-nfs3-*.log"
-    },
-    {
-      "type":"hdfs_audit",
-      "rowtype":"audit",
-      "is_enabled":"true",
-      "add_fields":{
-        "logType":"FAKEHDFSAudit",
-        "enforcer":"hadoop-acl",
-        "repoType":"1",
-        "repo":"hdfs"
-      },
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hdfs-audit.log"
-    }
-   ],
-  "filter":[
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "hdfs_datanode",
-            "hdfs_journalnode",
-            "hdfs_secondarynamenode",
-            "hdfs_namenode",
-            "hdfs_zkfc",
-            "hdfs_nfs3"
-          ]
-         }
-       },
-      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-        }
-       }
-     },
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "hdfs_audit"
-          ]
-         }
-       },
-      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:evtTime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:evtTime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "evtTime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       }
-     },
-    {
-      "filter":"keyvalue",
-      "sort_order":1,
-      "conditions":{
-        "fields":{
-          "type":[
-            "hdfs_audit"
-          ]
-         }
-       },
-      "source_field":"log_message",
-      "value_split":"=",
-      "field_split":"\t",
-      "post_map_values":{
-        "src":{
-          "map_fieldname":{
-            "new_fieldname":"resource"
-          }
-         },
-        "ip":{
-          "map_fieldname":{
-            "new_fieldname":"cliIP"
-          }
-         },
-        "allowed":[
-          {
-            "map_fieldvalue":{
-              "pre_value":"true",
-              "post_value":"1"
-            }
-           },
-          {
-            "map_fieldvalue":{
-              "pre_value":"false",
-              "post_value":"0"
-            }
-           },
-          {
-            "map_fieldname":{
-              "new_fieldname":"result"
-            }
-           }
-         ],
-        "cmd":{
-          "map_fieldname":{
-            "new_fieldname":"action"
-          }
-         },
-        "proto":{
-          "map_fieldname":{
-            "new_fieldname":"cliType"
-          }
-         },
-        "callerContext":{
-          "map_fieldname":{
-            "new_fieldname":"req_caller_id"
-          }
-         }
-       }
-     },
-    {
-      "filter":"grok",
-      "sort_order":2,
-      "source_field":"ugi",
-      "remove_source_field":"false",
-      "conditions":{
-        "fields":{
-          "type":[
-            "hdfs_audit"
-          ]
-         }
-       },
-      "message_pattern":"%{USERNAME:p_user}.+auth:%{USERNAME:p_authType}.+via %{USERNAME:k_user}.+auth:%{USERNAME:k_authType}|%{USERNAME:user}.+auth:%{USERNAME:authType}|%{USERNAME:x_user}",
-      "post_map_values":{
-        "user":{
-          "map_fieldname":{
-            "new_fieldname":"reqUser"
-          }
-         },
-        "x_user":{
-          "map_fieldname":{
-            "new_fieldname":"reqUser"
-          }
-         },
-        "p_user":{
-          "map_fieldname":{
-            "new_fieldname":"reqUser"
-          }
-         },
-        "k_user":{
-          "map_fieldname":{
-            "new_fieldname":"proxyUsers"
-          }
-         },
-        "p_authType":{
-          "map_fieldname":{
-            "new_fieldname":"authType"
-          }
-         },
-        "k_authType":{
-          "map_fieldname":{
-            "new_fieldname":"proxyAuthType"
-          }
-         }
-       }
-     }
-   ]
- }
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/templates/input.config-hdfs.json.j2 b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/templates/input.config-hdfs.json.j2
new file mode 100644
index 0000000..af89b90
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/templates/input.config-hdfs.json.j2
@@ -0,0 +1,216 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"hdfs_datanode",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-datanode-*.log"
+    },
+    {
+      "type":"hdfs_namenode",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-namenode-*.log"
+    },
+    {
+      "type":"hdfs_journalnode",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-journalnode-*.log"
+    },
+    {
+      "type":"hdfs_secondarynamenode",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-secondarynamenode-*.log"
+    },
+    {
+      "type":"hdfs_zkfc",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-zkfc-*.log"
+    },
+    {
+      "type":"hdfs_nfs3",
+      "rowtype":"service",
+      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-nfs3-*.log"
+    },
+    {
+      "type":"hdfs_audit",
+      "rowtype":"audit",
+      "is_enabled":"true",
+      "add_fields":{
+        "logType":"FAKEHDFSAudit",
+        "enforcer":"hadoop-acl",
+        "repoType":"1",
+        "repo":"hdfs"
+      },
+      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hdfs-audit.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "hdfs_datanode",
+            "hdfs_journalnode",
+            "hdfs_secondarynamenode",
+            "hdfs_namenode",
+            "hdfs_zkfc",
+            "hdfs_nfs3"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    },
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "hdfs_audit"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:evtTime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:evtTime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "evtTime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    },
+    {
+      "filter":"keyvalue",
+      "sort_order":1,
+      "conditions":{
+        "fields":{
+          "type":[
+            "hdfs_audit"
+          ]
+        }
+      },
+      "source_field":"log_message",
+      "value_split":"=",
+      "field_split":"\t",
+      "post_map_values":{
+        "src":{
+          "map_fieldname":{
+            "new_fieldname":"resource"
+          }
+        },
+        "ip":{
+          "map_fieldname":{
+            "new_fieldname":"cliIP"
+          }
+        },
+        "allowed":[
+          {
+            "map_fieldvalue":{
+              "pre_value":"true",
+              "post_value":"1"
+            }
+          },
+          {
+            "map_fieldvalue":{
+              "pre_value":"false",
+              "post_value":"0"
+            }
+          },
+          {
+            "map_fieldname":{
+              "new_fieldname":"result"
+            }
+          }
+        ],
+        "cmd":{
+          "map_fieldname":{
+            "new_fieldname":"action"
+          }
+        },
+        "proto":{
+          "map_fieldname":{
+            "new_fieldname":"cliType"
+          }
+        },
+        "callerContext":{
+          "map_fieldname":{
+            "new_fieldname":"req_caller_id"
+          }
+        }
+      }
+    },
+    {
+      "filter":"grok",
+      "sort_order":2,
+      "source_field":"ugi",
+      "remove_source_field":"false",
+      "conditions":{
+        "fields":{
+          "type":[
+            "hdfs_audit"
+          ]
+        }
+      },
+      "message_pattern":"%{USERNAME:p_user}.+auth:%{USERNAME:p_authType}.+via %{USERNAME:k_user}.+auth:%{USERNAME:k_authType}|%{USERNAME:user}.+auth:%{USERNAME:authType}|%{USERNAME:x_user}",
+      "post_map_values":{
+        "user":{
+          "map_fieldname":{
+            "new_fieldname":"reqUser"
+          }
+        },
+        "x_user":{
+          "map_fieldname":{
+            "new_fieldname":"reqUser"
+          }
+        },
+        "p_user":{
+          "map_fieldname":{
+            "new_fieldname":"reqUser"
+          }
+        },
+        "k_user":{
+          "map_fieldname":{
+            "new_fieldname":"proxyUsers"
+          }
+        },
+        "p_authType":{
+          "map_fieldname":{
+            "new_fieldname":"authType"
+          }
+        },
+        "k_authType":{
+          "map_fieldname":{
+            "new_fieldname":"proxyAuthType"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/kerberos_client.py b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/kerberos_client.py
index 1298f1e..b2cdaa6 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/kerberos_client.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/package/scripts/kerberos_client.py
@@ -43,27 +43,6 @@
   def status(self, env):
     raise ClientComponentHasNoStatus()
 
-  def security_status(self, env):
-    import status_params
-    if status_params.security_enabled:
-      if status_params.smoke_user and status_params.smoke_user_keytab:
-        try:
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.smoke_user,
-                                status_params.smoke_user_keytab,
-                                status_params.smoke_user_principal,
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        self.put_structured_out({"securityState": "UNKNOWN"})
-        self.put_structured_out({"securityStateErrorInfo": "Missing smoke user credentials"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def set_keytab(self, env):
     self.write_keytab_file()
 
diff --git a/ambari-server/src/main/resources/upgrade-pack.xsd b/ambari-server/src/main/resources/upgrade-pack.xsd
index 1f11aa1..aa7ddd8 100644
--- a/ambari-server/src/main/resources/upgrade-pack.xsd
+++ b/ambari-server/src/main/resources/upgrade-pack.xsd
@@ -276,6 +276,7 @@
       <xs:element name="summary" minOccurs="0" />
     </xs:sequence>
     <xs:attribute name="sequential" use="optional" type="xs:boolean" />
+    <xs:attribute name="timeout-config" use="optional" type="xs:string" />
   </xs:complexType>
   
   <xs:complexType name="restart-task">
diff --git a/ambari-server/src/main/sh/azuredb_create_generator.sh b/ambari-server/src/main/sh/azuredb_create_generator.sh
new file mode 100755
index 0000000..e233cf4
--- /dev/null
+++ b/ambari-server/src/main/sh/azuredb_create_generator.sh
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Creates an idempotent SQL script for AzureDB from the SQLServer "create" script.
+
+sql_dir="$1"/src/main/resources
+script_dir="$1"/src/main/python
+
+[[ -e "$sql_dir"/Ambari-DDL-SQLServer-CREATE.sql ]] || exit 1
+[[ -x "$script_dir"/azuredb_create_generator.py ]] || exit 2
+
+cat "$sql_dir"/Ambari-DDL-SQLServer-CREATE.sql | "$script_dir"/azuredb_create_generator.py > "$sql_dir"/Ambari-DDL-AzureDB-CREATE.sql
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
index 89ec32b..8165da5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
@@ -149,7 +149,7 @@
 
   private static void createTask(ActionDBAccessor db, long requestId, long stageId, String hostName, String clusterName) throws AmbariException {
 
-    Stage s = stageFactory.createNew(requestId, "/var/log", clusterName, 1L, "execution command wrapper test", "clusterHostInfo", "commandParamsStage", "hostParamsStage");
+    Stage s = stageFactory.createNew(requestId, "/var/log", clusterName, 1L, "execution command wrapper test", "commandParamsStage", "hostParamsStage");
     s.setStageId(stageId);
     s.addHostRoleExecutionCommand(hostName, Role.NAMENODE,
         RoleCommand.START,
@@ -157,7 +157,7 @@
             hostName, System.currentTimeMillis()), clusterName, "HDFS", false, false);
     List<Stage> stages = new ArrayList<>();
     stages.add(s);
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "clusterHostInfo", clusters);
     db.persistActions(request);
   }
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/StageTest.java b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/StageTest.java
index 89627f7..214aee1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/StageTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/StageTest.java
@@ -66,7 +66,7 @@
 
   @Test
   public void testAddServerActionCommand_userName() throws Exception {
-    final Stage stage = stageFactory.createNew(1, "/tmp", "cluster1", 978, "context", CLUSTER_HOST_INFO,
+    final Stage stage = stageFactory.createNew(1, "/tmp", "cluster1", 978, "context",
         "{\"host_param\":\"param_value\"}", "{\"stage_param\":\"param_value\"}");
 
     stage.addServerActionCommand(ConfigureAction.class.getName(),
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
index c1056dd..75ad9ab 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
@@ -193,7 +193,7 @@
     List<Stage> stages = new ArrayList<>();
     stages.add(createStubStage(hostName, requestId, stageId));
     stages.add(createStubStage(hostName, requestId, stageId + 1));
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "", clusters);
     db.persistActions(request);
     assertEquals(2, stages.size());
   }
@@ -539,7 +539,7 @@
   @Test
   public void testAbortRequest() throws AmbariException {
     Stage s = stageFactory.createNew(requestId, "/a/b", "cluster1", 1L, "action db accessor test",
-      "clusterHostInfo", "commandParamsStage", "hostParamsStage");
+      "commandParamsStage", "hostParamsStage");
     s.setStageId(stageId);
 
     clusters.addHost("host2");
@@ -576,7 +576,8 @@
     String hostName = cmd.getHostName();
     cmd.setStatus(HostRoleStatus.COMPLETED);
 
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "", clusters);
+    request.setClusterHostInfo("clusterHostInfo");
     db.persistActions(request);
     db.abortOperation(requestId);
 
@@ -620,7 +621,7 @@
 
     stages.add(stage);
 
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "", clusters);
 
     // persist entities
     db.persistActions(request);
@@ -668,7 +669,7 @@
   @Test
   public void testGet1000TasksFromOracleDB() throws Exception {
     Stage s = stageFactory.createNew(requestId, "/a/b", "cluster1", 1L, "action db accessor test",
-      "clusterHostInfo", "commandParamsStage", "hostParamsStage");
+      "commandParamsStage", "hostParamsStage");
     s.setStageId(stageId);
     for (int i = 1000; i < 2002; i++) {
       String host = "host" + i;
@@ -681,7 +682,8 @@
 
     List<Stage> stages = new ArrayList<>();
     stages.add(s);
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "", clusters);
+    request.setClusterHostInfo("clusterHostInfo");
     db.persistActions(request);
 
     List<HostRoleCommandEntity> entities =
@@ -709,7 +711,7 @@
     Stage s = createStubStage(hostname, requestId, stageId);
     List<Stage> stages = new ArrayList<>();
     stages.add(s);
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "", clusters);
     db.persistActions(request);
   }
 
@@ -723,7 +725,7 @@
       stages.add(stage);
     }
 
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "", clusters);
     db.persistActions(request);
   }
 
@@ -733,7 +735,7 @@
     Stage s = createStubStage(hostname, requestId, stageId);
     List<Stage> stages = new ArrayList<>();
     stages.add(s);
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "", clusters);
 
     s.setHostRoleStatus(hostname, Role.HBASE_REGIONSERVER.name(), HostRoleStatus.COMPLETED);
     s.setHostRoleStatus(hostname, Role.HBASE_MASTER.name(), HostRoleStatus.COMPLETED);
@@ -747,7 +749,7 @@
     List<Stage> stages = new ArrayList<>();
     stages.add(s);
 
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "", clusters);
 
     s.setHostRoleStatus(hostname, Role.HBASE_REGIONSERVER.name(), HostRoleStatus.PENDING);
     s.setHostRoleStatus(hostname, Role.HBASE_MASTER.name(), HostRoleStatus.COMPLETED);
@@ -756,7 +758,7 @@
 
   private Stage createStubStage(String hostname, long requestId, long stageId) {
     Stage s = stageFactory.createNew(requestId, "/a/b", "cluster1", 1L, "action db accessor test",
-      "clusterHostInfo", "commandParamsStage", "hostParamsStage");
+      "commandParamsStage", "hostParamsStage");
     s.setStageId(stageId);
     s.addHostRoleExecutionCommand(hostname, Role.HBASE_MASTER,
         RoleCommand.START,
@@ -774,7 +776,7 @@
   private void populateActionDBWithCustomAction(ActionDBAccessor db, String hostname,
                                 long requestId, long stageId) throws AmbariException {
     Stage s = stageFactory.createNew(requestId, "/a/b", "cluster1", 1L, "action db accessor test",
-      "", "commandParamsStage", "hostParamsStage");
+      "commandParamsStage", "hostParamsStage");
     s.setStageId(stageId);
     s.addHostRoleExecutionCommand(hostname, Role.valueOf(actionName),
         RoleCommand.ACTIONEXECUTE,
@@ -785,20 +787,22 @@
     final RequestResourceFilter resourceFilter = new RequestResourceFilter("HBASE", "HBASE_MASTER", null);
     List<RequestResourceFilter> resourceFilters = new
       ArrayList<RequestResourceFilter>() {{ add(resourceFilter); }};
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "", clusters);
+    request.setClusterHostInfo("");
     db.persistActions(request);
   }
 
   private void populateActionDBWithServerAction(ActionDBAccessor db, String hostname,
                                                 long requestId, long stageId) throws AmbariException {
     Stage s = stageFactory.createNew(requestId, "/a/b", "cluster1", 1L, "action db accessor test",
-        "", "commandParamsStage", "hostParamsStage");
+        "commandParamsStage", "hostParamsStage");
     s.setStageId(stageId);
     s.addServerActionCommand(serverActionName, null, Role.AMBARI_SERVER_ACTION,
         RoleCommand.ACTIONEXECUTE, clusterName, null, null, "command details", null, 300, false, false);
     List<Stage> stages = new ArrayList<>();
     stages.add(s);
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "", clusters);
+    request.setClusterHostInfo("");
     db.persistActions(request);
   }
 }
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java
index e12461d..410de80 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java
@@ -26,10 +26,12 @@
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
 
 import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.List;
 
 import org.apache.ambari.server.AmbariException;
@@ -42,11 +44,16 @@
 import org.apache.ambari.server.events.publishers.JPAEventPublisher;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.dao.StageDAO;
+import org.apache.ambari.server.orm.entities.HostRoleCommandEntity;
+import org.apache.ambari.server.orm.entities.StageEntity;
+import org.apache.ambari.server.orm.entities.StageEntityPK;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartEvent;
 import org.apache.ambari.server.utils.CommandUtils;
 import org.apache.ambari.server.utils.StageUtils;
+import org.apache.commons.collections.CollectionUtils;
 import org.easymock.EasyMock;
 import org.junit.After;
 import org.junit.Before;
@@ -213,7 +220,7 @@
   }
 
   private void populateActionDB(ActionDBAccessor db, String hostname) throws AmbariException {
-    Stage s = stageFactory.createNew(requestId, "/a/b", "cluster1", 1L, "action manager test", "clusterHostInfo", "commandParamsStage", "hostParamsStage");
+    Stage s = stageFactory.createNew(requestId, "/a/b", "cluster1", 1L, "action manager test", "commandParamsStage", "hostParamsStage");
     s.setStageId(stageId);
     s.addHostRoleExecutionCommand(hostname, Role.HBASE_MASTER,
         RoleCommand.START,
@@ -221,12 +228,12 @@
             hostname, System.currentTimeMillis()), "cluster1", "HBASE", false, false);
     List<Stage> stages = new ArrayList<>();
     stages.add(s);
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "clusterHostInfo", clusters);
     db.persistActions(request);
   }
 
   private void populateActionDBWithTwoCommands(ActionDBAccessor db, String hostname) throws AmbariException {
-    Stage s = stageFactory.createNew(requestId, "/a/b", "cluster1", 1L, "action manager test", "clusterHostInfo", "commandParamsStage", "hostParamsStage");
+    Stage s = stageFactory.createNew(requestId, "/a/b", "cluster1", 1L, "action manager test", "commandParamsStage", "hostParamsStage");
     s.setStageId(stageId);
     s.addHostRoleExecutionCommand(hostname, Role.HBASE_MASTER,
         RoleCommand.START,
@@ -238,7 +245,7 @@
           hostname, System.currentTimeMillis()), "cluster1", "HBASE", false, false);
     List<Stage> stages = new ArrayList<>();
     stages.add(s);
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "clusterHostInfo", clusters);
     db.persistActions(request);
   }
 
@@ -286,4 +293,33 @@
 
     verify(queue, db, clusters);
   }
+
+  /**
+   * Tests whether {@link ActionDBAccessor#persistActions(Request)} associates tasks with their
+   * stages.  Improvements to {@code Stage} processing exposed the fact that the association wasn't
+   * being made, and JPA didn't know of the Stage-to-Tasks child relationship.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testPersistCommandsWithStages() throws Exception {
+    ActionDBAccessor db = injector.getInstance(ActionDBAccessorImpl.class);
+
+    populateActionDBWithTwoCommands(db, hostname);
+
+    List<Stage> stages = db.getAllStages(requestId);
+    assertEquals(1, stages.size());
+    Stage stage = stages.get(0);
+
+    StageEntityPK pk = new StageEntityPK();
+    pk.setRequestId(stage.getRequestId());
+    pk.setStageId(stage.getStageId());
+
+    StageDAO dao = injector.getInstance(StageDAO.class);
+    StageEntity stageEntity = dao.findByPK(pk);
+    assertNotNull(stageEntity);
+
+    Collection<HostRoleCommandEntity> commandEntities = stageEntity.getHostRoleCommands();
+    assertTrue(CollectionUtils.isNotEmpty(commandEntities));
+  }
 }
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
index b1a7524..869234b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
@@ -210,7 +210,7 @@
     ActionDBAccessor db = mock(ActionDBAccessorImpl.class);
     HostRoleCommandDAO hostRoleCommandDAOMock = mock(HostRoleCommandDAO.class);
     Mockito.doNothing().when(hostRoleCommandDAOMock).publishTaskCreateEvent(anyListOf(HostRoleCommand.class));
-    Stage s = StageUtils.getATestStage(1, 977, hostname, CLUSTER_HOST_INFO,
+    Stage s = StageUtils.getATestStage(1, 977, hostname,
       "{\"host_param\":\"param_value\"}", "{\"stage_param\":\"param_value\"}");
 
     List<Stage> stages = Collections.singletonList(s);
@@ -219,8 +219,10 @@
 
     RequestEntity request = mock(RequestEntity.class);
     when(request.isExclusive()).thenReturn(false);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
+
     //Keep large number of attempts so that the task is not expired finally
     //Small action timeout to test rescheduling
     ActionScheduler scheduler = new ActionScheduler(100, 5, db, aq, fsm,
@@ -306,7 +308,7 @@
     hostEntity.setHostName(hostname);
     hostDAO.create(hostEntity);
 
-    final Stage s = StageUtils.getATestStage(1, 977, hostname, CLUSTER_HOST_INFO,
+    final Stage s = StageUtils.getATestStage(1, 977, hostname,
       "{\"host_param\":\"param_value\"}", "{\"stage_param\":\"param_value\"}");
     s.addHostRoleExecutionCommand(hostname, Role.SECONDARY_NAMENODE, RoleCommand.INSTALL,
             new ServiceComponentHostInstallEvent("SECONDARY_NAMENODE", hostname, System.currentTimeMillis(), "HDP-1.2.0"),
@@ -322,6 +324,7 @@
 
     RequestEntity request = mock(RequestEntity.class);
     when(request.isExclusive()).thenReturn(false);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
     doAnswer(new Answer<Void>() {
@@ -395,7 +398,7 @@
     when(host.getState()).thenReturn(HostState.HEARTBEAT_LOST);
     when(host.getHostName()).thenReturn(hostname);
 
-    final Stage s = StageUtils.getATestStage(1, 977, hostname, CLUSTER_HOST_INFO,
+    final Stage s = StageUtils.getATestStage(1, 977, hostname,
       "{\"host_param\":\"param_value\"}", "{\"stage_param\":\"param_value\"}");
 
     List<Stage> stages = Collections.singletonList(s);
@@ -483,7 +486,7 @@
     when(serviceObj.getCluster()).thenReturn(oneClusterMock);
 
     final Stage stage = stageFactory.createNew(1, "/tmp", "cluster1", 1L, "stageWith2Tasks",
-      CLUSTER_HOST_INFO, "{\"command_param\":\"param_value\"}", "{\"host_param\":\"param_value\"}");
+      "{\"command_param\":\"param_value\"}", "{\"host_param\":\"param_value\"}");
     addInstallTaskToStage(stage, hostname1, "cluster1", Role.DATANODE,
       RoleCommand.INSTALL, Service.Type.HDFS, 1);
     addInstallTaskToStage(stage, hostname2, "cluster1", Role.NAMENODE,
@@ -613,6 +616,7 @@
 
     RequestEntity request = mock(RequestEntity.class);
     when(request.isExclusive()).thenReturn(false);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
     when(db.getCommandsInProgressCount()).thenReturn(stages.size());
@@ -730,6 +734,7 @@
 
     RequestEntity request = mock(RequestEntity.class);
     when(request.isExclusive()).thenReturn(false);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
     when(db.getCommandsInProgressCount()).thenReturn(stages.size());
@@ -773,6 +778,7 @@
 
     RequestEntity request = mock(RequestEntity.class);
     when(request.isExclusive()).thenReturn(false);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
     when(db.getCommandsInProgressCount()).thenReturn(stages.size());
@@ -987,6 +993,7 @@
 
     RequestEntity request = mock(RequestEntity.class);
     when(request.isExclusive()).thenReturn(false);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
     when(db.getCommandsInProgressCount()).thenReturn(stages.size());
@@ -1053,7 +1060,7 @@
       String requestContext, int timeout, boolean stageSupportsAutoSkip,
       boolean autoSkipFailedTask) {
 
-    Stage stage = stageFactory.createNew(requestId, "/tmp", "cluster1", 1L, requestContext, CLUSTER_HOST_INFO,
+    Stage stage = stageFactory.createNew(requestId, "/tmp", "cluster1", 1L, requestContext,
       "{}", "{}");
 
     stage.setStageId(stageId);
@@ -1141,6 +1148,7 @@
 
     RequestEntity request = mock(RequestEntity.class);
     when(request.isExclusive()).thenReturn(false);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
     when(db.getCommandsInProgressCount()).thenReturn(firstStageInProgressPerRequest.size());
@@ -1232,6 +1240,7 @@
 
     RequestEntity request = mock(RequestEntity.class);
     when(request.isExclusive()).thenReturn(false);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
     when(db.getCommandsInProgressCount()).thenReturn(stages.size());
@@ -1309,6 +1318,7 @@
 
     RequestEntity request = mock(RequestEntity.class);
     when(request.isExclusive()).thenReturn(false);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
     when(db.getCommandsInProgressCount()).thenReturn(stages.size());
@@ -1374,6 +1384,7 @@
 
     RequestEntity request = mock(RequestEntity.class);
     when(request.isExclusive()).thenReturn(false);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
     when(db.getCommandsInProgressCount()).thenReturn(stages.size());
@@ -1520,7 +1531,7 @@
 
     long now = System.currentTimeMillis();
     Stage stage = stageFactory.createNew(1, "/tmp", "cluster1", 1L,
-        "testRequestFailureBasedOnSuccessFactor", CLUSTER_HOST_INFO, "", "");
+        "testRequestFailureBasedOnSuccessFactor", "", "");
     stage.setStageId(1);
 
     addHostRoleExecutionCommand(now, stage, Role.SQOOP, Service.Type.SQOOP,
@@ -1720,7 +1731,7 @@
 
     long now = System.currentTimeMillis();
     Stage stage = stageFactory.createNew(1, "/tmp", "cluster1", 1L, "testRequestFailureBasedOnSuccessFactor",
-      CLUSTER_HOST_INFO, "", "");
+      "", "");
     stage.setStageId(1);
     stage.addHostRoleExecutionCommand("host1", Role.DATANODE, RoleCommand.UPGRADE,
         new ServiceComponentHostUpgradeEvent(Role.DATANODE.toString(), "host1", now, "HDP-0.2"),
@@ -1871,7 +1882,7 @@
 
   private Stage createStage(String clusterName, int stageId, int requestId) {
     Stage stage = stageFactory.createNew(requestId, "/tmp", clusterName, 1L, "getStageWithSingleTask",
-      CLUSTER_HOST_INFO, "{\"host_param\":\"param_value\"}", "{\"stage_param\":\"param_value\"}");
+      "{\"host_param\":\"param_value\"}", "{\"stage_param\":\"param_value\"}");
     stage.setStageId(stageId);
     return stage;
   }
@@ -1971,7 +1982,6 @@
 
     //Data for stages
     Map<String, Set<String>> clusterHostInfo1 = StageUtils.getGson().fromJson(CLUSTER_HOST_INFO, type);
-    Map<String, Set<String>> clusterHostInfo2 = StageUtils.getGson().fromJson(CLUSTER_HOST_INFO_UPDATED, type);
     int stageId = 1;
     int requestId1 = 1;
     int requestId2 = 2;
@@ -2006,11 +2016,12 @@
 
     RequestEntity request = mock(RequestEntity.class);
     when(request.isExclusive()).thenReturn(false);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
-    Stage s1 = StageUtils.getATestStage(requestId1, stageId, hostname, CLUSTER_HOST_INFO,
+    Stage s1 = StageUtils.getATestStage(requestId1, stageId, hostname,
       "{\"host_param\":\"param_value\"}", "{\"stage_param\":\"param_value\"}");
-    Stage s2 = StageUtils.getATestStage(requestId2, stageId, hostname, CLUSTER_HOST_INFO_UPDATED,
+    Stage s2 = StageUtils.getATestStage(requestId2, stageId, hostname,
       "{\"host_param\":\"param_value\"}", "{\"stage_param\":\"param_value\"}");
 
     when(db.getCommandsInProgressCount()).thenReturn(1);
@@ -2037,7 +2048,7 @@
     ac = waitForQueueSize(hostname, aq, 1, scheduler);
     assertTrue(ac.get(0) instanceof ExecutionCommand);
     assertEquals(String.valueOf(requestId2) + "-" + stageId, ((ExecutionCommand) (ac.get(0))).getCommandId());
-    assertEquals(clusterHostInfo2, ((ExecutionCommand) (ac.get(0))).getClusterHostInfo());
+    assertEquals(clusterHostInfo1, ((ExecutionCommand) (ac.get(0))).getClusterHostInfo());
   }
 
 
@@ -2087,7 +2098,7 @@
     when(serviceObj.getCluster()).thenReturn(oneClusterMock);
 
     Stage stage1 = stageFactory.createNew(1, "/tmp", "cluster1", 1L, "stageWith2Tasks",
-            CLUSTER_HOST_INFO, "", "");
+            "", "");
     addInstallTaskToStage(stage1, hostname1, "cluster1", Role.HBASE_MASTER,
             RoleCommand.INSTALL, Service.Type.HBASE, 1);
     addInstallTaskToStage(stage1, hostname1, "cluster1", Role.HBASE_REGIONSERVER,
@@ -2166,6 +2177,7 @@
     Mockito.doNothing().when(hostRoleCommandDAOMock).publishTaskCreateEvent(anyListOf(HostRoleCommand.class));
 
     RequestEntity request = mock(RequestEntity.class);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(request.isExclusive()).thenReturn(false);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
@@ -2319,6 +2331,7 @@
 
     RequestEntity request = mock(RequestEntity.class);
     when(request.isExclusive()).thenReturn(false);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
     when(db.getCommandsInProgressCount()).thenReturn(stagesInProgress.size());
@@ -2569,10 +2582,13 @@
 
     RequestEntity request1 = mock(RequestEntity.class);
     when(request1.isExclusive()).thenReturn(false);
+    when(request1.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     RequestEntity request2 = mock(RequestEntity.class);
     when(request2.isExclusive()).thenReturn(true);
+    when(request2.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     RequestEntity request3 = mock(RequestEntity.class);
     when(request3.isExclusive()).thenReturn(false);
+    when(request3.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
 
     when(db.getRequestEntity(requestId1)).thenReturn(request1);
     when(db.getRequestEntity(requestId2)).thenReturn(request2);
@@ -2764,6 +2780,7 @@
     Mockito.doNothing().when(hostRoleCommandDAOMock).publishTaskCreateEvent(anyListOf(HostRoleCommand.class));
 
     RequestEntity request = mock(RequestEntity.class);
+    when(request.getClusterHostInfo()).thenReturn(CLUSTER_HOST_INFO);
     when(request.isExclusive()).thenReturn(false);
     when(db.getRequestEntity(anyLong())).thenReturn(request);
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestStage.java b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestStage.java
index b76e41e..82db6e1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestStage.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestStage.java
@@ -60,7 +60,7 @@
 
   @Test
   public void testTaskTimeout() {
-    Stage s = StageUtils.getATestStage(1, 1, "h1", CLUSTER_HOST_INFO, "{\"host_param\":\"param_value\"}", "{\"stage_param\":\"param_value\"}");
+    Stage s = StageUtils.getATestStage(1, 1, "h1",  "{\"host_param\":\"param_value\"}", "{\"stage_param\":\"param_value\"}");
     s.addHostRoleExecutionCommand("h1", Role.DATANODE, RoleCommand.INSTALL,
         null, "c1", "HDFS", false, false);
     s.addHostRoleExecutionCommand("h1", Role.HBASE_MASTER, RoleCommand.INSTALL,
@@ -75,9 +75,8 @@
 
   @Test
   public void testGetRequestContext() {
-    Stage stage = stageFactory.createNew(1, "/logDir", "c1", 1L, "My Context", CLUSTER_HOST_INFO, "", "");
+    Stage stage = stageFactory.createNew(1, "/logDir", "c1", 1L, "My Context",  "", "");
     assertEquals("My Context", stage.getRequestContext());
-    assertEquals(CLUSTER_HOST_INFO, stage.getClusterHostInfo());
   }
 
   @After
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
index cff0e34..ceda927 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
@@ -495,7 +495,6 @@
     componentStatus1.setServiceName(HDFS);
     componentStatus1.setMessage(DummyHostStatus);
     componentStatus1.setStatus(State.STARTED.name());
-    componentStatus1.setSecurityState(SecurityState.SECURED_KERBEROS.name());
     componentStatus1.setComponentName(DATANODE);
     componentStatuses.add(componentStatus1);
     ComponentStatus componentStatus2 = new ComponentStatus();
@@ -503,7 +502,6 @@
     componentStatus2.setServiceName(HDFS);
     componentStatus2.setMessage(DummyHostStatus);
     componentStatus2.setStatus(State.STARTED.name());
-    componentStatus2.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus2.setComponentName(SECONDARY_NAMENODE);
     componentStatuses.add(componentStatus2);
     hb.setComponentStatus(componentStatuses);
@@ -526,7 +524,6 @@
     State componentState2 = serviceComponentHost2.getState();
     State componentState3 = serviceComponentHost3.getState();
     assertEquals(State.STARTED, componentState1);
-    assertEquals(SecurityState.SECURED_KERBEROS, serviceComponentHost1.getSecurityState());
     assertEquals(State.INSTALLED, componentState2);
     assertEquals(SecurityState.SECURING, serviceComponentHost2.getSecurityState());
     //starting state will not be overridden by status command
@@ -837,7 +834,6 @@
     componentStatus1.setServiceName(HDFS);
     componentStatus1.setMessage(DummyHostStatus);
     componentStatus1.setStatus(State.STARTED.name());
-    componentStatus1.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus1.setComponentName(DATANODE);
 
     componentStatus1.setExtra(extra);
@@ -873,7 +869,6 @@
     componentStatus1.setServiceName(HDFS);
     componentStatus1.setMessage(DummyHostStatus);
     componentStatus1.setStatus(State.STARTED.name());
-    componentStatus1.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus1.setComponentName(DATANODE);
     hb.setComponentStatus(Collections.singletonList(componentStatus1));
 
@@ -992,7 +987,7 @@
     serviceComponentHost2.setStackVersion(stack120);
 
     Stage s = stageFactory.createNew(requestId, "/a/b", "cluster1", 1L, "action manager test",
-        "clusterHostInfo", "commandParamsStage", "hostParamsStage");
+        "commandParamsStage", "hostParamsStage");
     s.setStageId(stageId);
     s.addHostRoleExecutionCommand(DummyHostname1, Role.DATANODE, RoleCommand.UPGRADE,
         new ServiceComponentHostUpgradeEvent(Role.DATANODE.toString(),
@@ -1004,7 +999,7 @@
         DummyCluster, "HDFS", false, false);
     List<Stage> stages = new ArrayList<>();
     stages.add(s);
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "clusterHostInfo", clusters);
     actionDBAccessor.persistActions(request);
     CommandReport cr = new CommandReport();
     cr.setActionId(StageUtils.getActionId(requestId, stageId));
@@ -1328,7 +1323,6 @@
     componentStatus1.setServiceName(HDFS);
     componentStatus1.setMessage(DummyHostStatus);
     componentStatus1.setStatus(State.INSTALLED.name());
-    componentStatus1.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus1.setComponentName(DATANODE);
     componentStatuses.add(componentStatus1);
 
@@ -1337,7 +1331,6 @@
     componentStatus2.setServiceName(HDFS);
     componentStatus2.setMessage(DummyHostStatus);
     componentStatus2.setStatus(State.INSTALLED.name());
-    componentStatus2.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus2.setComponentName(NAMENODE);
     componentStatuses.add(componentStatus2);
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
index 2e65e8d..a13053c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
@@ -223,7 +223,7 @@
 
   public void populateActionDB(ActionDBAccessor db, String DummyHostname1, long requestId, long stageId) throws AmbariException {
     Stage s = stageFactory.createNew(requestId, "/a/b", DummyCluster, 1L, "heartbeat handler test",
-        "clusterHostInfo", "commandParamsStage", "hostParamsStage");
+        "commandParamsStage", "hostParamsStage");
     s.setStageId(stageId);
     String filename = null;
     s.addHostRoleExecutionCommand(DummyHostname1, Role.HBASE_MASTER,
@@ -232,7 +232,7 @@
             DummyHostname1, System.currentTimeMillis()), DummyCluster, HBASE, false, false);
     List<Stage> stages = new ArrayList<>();
     stages.add(s);
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "clusterHostInfo", clusters);
     db.persistActions(request);
   }
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
index 80775c3..5ced924 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
@@ -316,7 +316,6 @@
     componentStatus1.setServiceName(HDFS);
     componentStatus1.setMessage(DummyHostStatus);
     componentStatus1.setStatus(State.STARTED.name());
-    componentStatus1.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus1.setComponentName(DATANODE);
     componentStatuses.add(componentStatus1);
 
@@ -325,7 +324,6 @@
     componentStatus2.setServiceName(HDFS);
     componentStatus2.setMessage(DummyHostStatus);
     componentStatus2.setStatus(State.INSTALLED.name());
-    componentStatus2.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus2.setComponentName(NAMENODE);
     componentStatuses.add(componentStatus2);
 
@@ -866,7 +864,7 @@
     serviceComponentHost1.setState(State.INSTALLING);
 
     Stage s = stageFactory.createNew(1, "/a/b", "cluster1", 1L, "action manager test",
-      "clusterHostInfo", "commandParamsStage", "hostParamsStage");
+      "commandParamsStage", "hostParamsStage");
     s.setStageId(1);
     s.addHostRoleExecutionCommand(DummyHostname1, Role.DATANODE, RoleCommand.INSTALL,
       new ServiceComponentHostInstallEvent(Role.DATANODE.toString(),
@@ -874,7 +872,7 @@
           DummyCluster, "HDFS", false, false);
     List<Stage> stages = new ArrayList<>();
     stages.add(s);
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "clusterHostInfo", clusters);
     actionDBAccessor.persistActions(request);
     actionDBAccessor.abortHostRole(DummyHostname1, 1, 1, Role.DATANODE.name());
 
@@ -1128,14 +1126,12 @@
     dataNodeStatus.setServiceName(HDFS);
     dataNodeStatus.setComponentName(DATANODE);
     dataNodeStatus.setStatus("STARTED");
-    dataNodeStatus.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus.add(dataNodeStatus);
     ComponentStatus nameNodeStatus = new ComponentStatus();
     nameNodeStatus.setClusterName(cluster.getClusterName());
     nameNodeStatus.setServiceName(HDFS);
     nameNodeStatus.setComponentName(NAMENODE);
     nameNodeStatus.setStatus("STARTED");
-    nameNodeStatus.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus.add(nameNodeStatus);
     hb1.setComponentStatus(componentStatus);
     handler.handleHeartBeat(hb1);
@@ -1153,14 +1149,12 @@
     dataNodeStatus.setServiceName(HDFS);
     dataNodeStatus.setComponentName(DATANODE);
     dataNodeStatus.setStatus("INSTALLED");
-    dataNodeStatus.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus.add(dataNodeStatus);
     nameNodeStatus = new ComponentStatus();
     nameNodeStatus.setClusterName(cluster.getClusterName());
     nameNodeStatus.setServiceName(HDFS);
     nameNodeStatus.setComponentName(NAMENODE);
     nameNodeStatus.setStatus("STARTED");
-    nameNodeStatus.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus.add(nameNodeStatus);
     hb2.setComponentStatus(componentStatus);
     handler.handleHeartBeat(hb2);
@@ -1180,14 +1174,12 @@
     dataNodeStatus.setServiceName(HDFS);
     dataNodeStatus.setComponentName(DATANODE);
     dataNodeStatus.setStatus("INSTALLED");
-    dataNodeStatus.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus.add(dataNodeStatus);
     nameNodeStatus = new ComponentStatus();
     nameNodeStatus.setClusterName(cluster.getClusterName());
     nameNodeStatus.setServiceName(HDFS);
     nameNodeStatus.setComponentName(NAMENODE);
     nameNodeStatus.setStatus("STARTED");
-    nameNodeStatus.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus.add(nameNodeStatus);
     hb2a.setComponentStatus(componentStatus);
     handler.handleHeartBeat(hb2a);
@@ -1208,14 +1200,12 @@
     dataNodeStatus.setServiceName(HDFS);
     dataNodeStatus.setComponentName(DATANODE);
     dataNodeStatus.setStatus("INSTALLED");
-    dataNodeStatus.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus.add(dataNodeStatus);
     nameNodeStatus = new ComponentStatus();
     nameNodeStatus.setClusterName(cluster.getClusterName());
     nameNodeStatus.setServiceName(HDFS);
     nameNodeStatus.setComponentName(NAMENODE);
     nameNodeStatus.setStatus("INSTALLED");
-    nameNodeStatus.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus.add(nameNodeStatus);
     hb3.setComponentStatus(componentStatus);
     handler.handleHeartBeat(hb3);
@@ -1247,7 +1237,6 @@
     dataNodeStatus.setServiceName(HDFS);
     dataNodeStatus.setComponentName(DATANODE);
     dataNodeStatus.setStatus("STARTED");
-    dataNodeStatus.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus.add(dataNodeStatus);
     hb4.setComponentStatus(componentStatus);
     handler.handleHeartBeat(hb4);
@@ -1405,7 +1394,6 @@
     componentStatus1.setServiceName(serviceName);
     componentStatus1.setMessage(message);
     componentStatus1.setStatus(state.name());
-    componentStatus1.setSecurityState(securityState.name());
     componentStatus1.setComponentName(componentName);
     componentStatus1.setStackVersion(stackVersion);
     return componentStatus1;
@@ -1434,7 +1422,6 @@
     componentStatus1.setServiceName(HDFS);
     componentStatus1.setMessage(DummyHostStatus);
     componentStatus1.setStatus(State.STARTED.name());
-    componentStatus1.setSecurityState(SecurityState.UNSECURED.name());
     componentStatus1.setComponentName(DATANODE);
 
     componentStatuses.add(componentStatus1);
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/alerts/ComponentVersionAlertRunnableTest.java b/ambari-server/src/test/java/org/apache/ambari/server/alerts/ComponentVersionAlertRunnableTest.java
new file mode 100644
index 0000000..98f6f44
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/alerts/ComponentVersionAlertRunnableTest.java
@@ -0,0 +1,362 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.alerts;
+
+import static junit.framework.Assert.assertEquals;
+import static org.easymock.EasyMock.expect;
+
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import javax.persistence.EntityManager;
+
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.events.AlertEvent;
+import org.apache.ambari.server.events.AlertReceivedEvent;
+import org.apache.ambari.server.events.MockEventListener;
+import org.apache.ambari.server.events.publishers.AlertEventPublisher;
+import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
+import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
+import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.orm.entities.UpgradeEntity;
+import org.apache.ambari.server.stack.StackManagerFactory;
+import org.apache.ambari.server.state.Alert;
+import org.apache.ambari.server.state.AlertState;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.ComponentInfo;
+import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.stack.OsFamily;
+import org.easymock.EasyMock;
+import org.easymock.EasyMockSupport;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.google.common.eventbus.EventBus;
+import com.google.inject.Binder;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.Module;
+
+/**
+ * Tests {@link ComponentVersionAlertRunnable}.
+ */
+public class ComponentVersionAlertRunnableTest extends EasyMockSupport {
+
+  private final static long CLUSTER_ID = 1;
+  private final static String CLUSTER_NAME = "c1";
+  private final static String HOSTNAME_1 = "c6401.ambari.apache.org";
+  private final static String HOSTNAME_2 = "c6402.ambari.apache.org";
+
+  private final static String EXPECTED_VERSION = "2.6.0.0-1234";
+  private final static String WRONG_VERSION = "9.9.9.9-9999";
+
+  private final static String DEFINITION_NAME = "ambari_server_component_version";
+  private final static String DEFINITION_SERVICE = "AMBARI";
+  private final static String DEFINITION_COMPONENT = "AMBARI_SERVER";
+  private final static String DEFINITION_LABEL = "Mock Definition";
+
+  private Clusters m_clusters;
+  private Cluster m_cluster;
+  private Injector m_injector;
+  private AlertDefinitionDAO m_definitionDao;
+  private AlertDefinitionEntity m_definition;
+  private MockEventListener m_listener;
+  private AmbariMetaInfo m_metaInfo;
+
+  private AlertEventPublisher m_eventPublisher;
+  private EventBus m_synchronizedBus;
+
+  private Collection<Host> m_hosts;
+  private Map<String, List<ServiceComponentHost>> m_hostComponentMap = new HashMap<>();
+  private StackId m_desidredStackId;
+
+  /**
+   *
+   */
+  @Before
+  public void setup() throws Exception {
+    m_injector = Guice.createInjector(new MockModule());
+    m_definitionDao = m_injector.getInstance(AlertDefinitionDAO.class);
+    m_clusters = m_injector.getInstance(Clusters.class);
+    m_cluster = m_injector.getInstance(Cluster.class);
+    m_eventPublisher = m_injector.getInstance(AlertEventPublisher.class);
+    m_listener = m_injector.getInstance(MockEventListener.class);
+    m_definition = createNiceMock(AlertDefinitionEntity.class);
+    m_metaInfo = m_injector.getInstance(AmbariMetaInfo.class);
+
+    // !!! need a synchronous op for testing
+    m_synchronizedBus = new EventBus();
+    Field field = AlertEventPublisher.class.getDeclaredField("m_eventBus");
+    field.setAccessible(true);
+    field.set(m_eventPublisher, m_synchronizedBus);
+
+    // register mock listener
+    m_synchronizedBus.register(m_listener);
+
+    // create the cluster map
+    Map<String,Cluster> clusterMap = new HashMap<>();
+    clusterMap.put(CLUSTER_NAME, m_cluster);
+
+    // hosts
+    m_hosts = new ArrayList<>();
+    Host host1 = createNiceMock(Host.class);
+    Host host2 = createNiceMock(Host.class);
+    expect(host1.getHostName()).andReturn(HOSTNAME_1).atLeastOnce();
+    expect(host2.getHostName()).andReturn(HOSTNAME_2).atLeastOnce();
+    m_hosts.add(host1);
+    m_hosts.add(host2);
+
+    m_hostComponentMap.put(HOSTNAME_1, new ArrayList<ServiceComponentHost>());
+    m_hostComponentMap.put(HOSTNAME_2, new ArrayList<ServiceComponentHost>());
+
+    // desired stack
+    m_desidredStackId = createNiceMock(StackId.class);
+    expect(m_desidredStackId.getStackName()).andReturn("SOME-STACK").atLeastOnce();
+    expect(m_desidredStackId.getStackVersion()).andReturn("STACK-VERSION").atLeastOnce();
+
+    // components
+    ServiceComponentHost sch1_1 = createNiceMock(ServiceComponentHost.class);
+    ServiceComponentHost sch1_2 = createNiceMock(ServiceComponentHost.class);
+    ServiceComponentHost sch2_1 = createNiceMock(ServiceComponentHost.class);
+    ServiceComponentHost sch2_2 = createNiceMock(ServiceComponentHost.class);
+
+    expect(sch1_1.getServiceName()).andReturn("FOO").atLeastOnce();
+    expect(sch1_1.getServiceComponentName()).andReturn("FOO_COMPONENT").atLeastOnce();
+    expect(sch1_1.getVersion()).andReturn(EXPECTED_VERSION).atLeastOnce();
+    expect(sch1_1.getDesiredStackVersion()).andReturn(m_desidredStackId).atLeastOnce();
+    expect(sch1_2.getServiceName()).andReturn("BAR").atLeastOnce();
+    expect(sch1_2.getServiceComponentName()).andReturn("BAR_COMPONENT").atLeastOnce();
+    expect(sch1_2.getVersion()).andReturn(EXPECTED_VERSION).atLeastOnce();
+    expect(sch1_2.getDesiredStackVersion()).andReturn(m_desidredStackId).atLeastOnce();
+    expect(sch2_1.getServiceName()).andReturn("FOO").atLeastOnce();
+    expect(sch2_1.getServiceComponentName()).andReturn("FOO_COMPONENT").atLeastOnce();
+    expect(sch2_1.getVersion()).andReturn(EXPECTED_VERSION).atLeastOnce();
+    expect(sch2_1.getDesiredStackVersion()).andReturn(m_desidredStackId).atLeastOnce();
+    expect(sch2_2.getServiceName()).andReturn("BAZ").atLeastOnce();
+    expect(sch2_2.getServiceComponentName()).andReturn("BAZ_COMPONENT").atLeastOnce();
+    expect(sch2_2.getVersion()).andReturn(EXPECTED_VERSION).atLeastOnce();
+    expect(sch2_2.getDesiredStackVersion()).andReturn(m_desidredStackId).atLeastOnce();
+
+    m_hostComponentMap.get(HOSTNAME_1).add(sch1_1);
+    m_hostComponentMap.get(HOSTNAME_1).add(sch1_2);
+    m_hostComponentMap.get(HOSTNAME_2).add(sch2_1);
+    m_hostComponentMap.get(HOSTNAME_2).add(sch2_2);
+
+    // mock the definition for the alert
+    expect(m_definition.getDefinitionName()).andReturn(DEFINITION_NAME).atLeastOnce();
+    expect(m_definition.getServiceName()).andReturn(DEFINITION_SERVICE).atLeastOnce();
+    expect(m_definition.getComponentName()).andReturn(DEFINITION_COMPONENT).atLeastOnce();
+    expect(m_definition.getLabel()).andReturn(DEFINITION_LABEL).atLeastOnce();
+    expect(m_definition.getEnabled()).andReturn(true).atLeastOnce();
+
+    // mock the cluster
+    expect(m_cluster.getClusterId()).andReturn(CLUSTER_ID).atLeastOnce();
+    expect(m_cluster.getClusterName()).andReturn(CLUSTER_NAME).atLeastOnce();
+    expect(m_cluster.getHosts()).andReturn(m_hosts).atLeastOnce();
+
+    ClusterVersionEntity clusterVersionEntity = createNiceMock(ClusterVersionEntity.class);
+    RepositoryVersionEntity repositoryVersionEntity = createNiceMock(RepositoryVersionEntity.class);
+    expect(clusterVersionEntity.getRepositoryVersion()).andReturn(
+        repositoryVersionEntity).anyTimes();
+
+    expect(repositoryVersionEntity.getVersion()).andReturn(EXPECTED_VERSION).anyTimes();
+    expect(m_cluster.getCurrentClusterVersion()).andReturn(clusterVersionEntity).anyTimes();
+
+    // mock clusters
+    expect(m_clusters.getClusters()).andReturn(clusterMap).atLeastOnce();
+
+    // mock the definition DAO
+    expect(m_definitionDao.findByName(CLUSTER_ID, DEFINITION_NAME)).andReturn(
+        m_definition).atLeastOnce();
+
+    m_metaInfo.init();
+    EasyMock.expectLastCall().anyTimes();
+
+    // expect the cluster host mapping
+    expect(m_cluster.getServiceComponentHosts(HOSTNAME_1)).andReturn(
+        m_hostComponentMap.get(HOSTNAME_1)).once();
+    expect(m_cluster.getServiceComponentHosts(HOSTNAME_2)).andReturn(
+        m_hostComponentMap.get(HOSTNAME_2)).once();
+
+    // expect the component from metainfo
+    ComponentInfo componentInfo = createNiceMock(ComponentInfo.class);
+    expect(componentInfo.isVersionAdvertised()).andReturn(true).atLeastOnce();
+    expect(m_metaInfo.getComponent(EasyMock.anyString(), EasyMock.anyString(), EasyMock.anyString(),
+        EasyMock.anyString())).andReturn(componentInfo).atLeastOnce();
+    }
+
+  /**
+   * @throws Exception
+   */
+  @After
+  public void teardown() throws Exception {
+  }
+
+  /**
+   * Tests that the alert is SKIPPED when there is an upgrade in progress.
+   */
+  @Test
+  public void testUpgradeInProgress() throws Exception {
+    UpgradeEntity upgrade = createNiceMock(UpgradeEntity.class);
+    expect(upgrade.getToVersion()).andReturn("VERSION").once();
+    expect(m_cluster.getUpgradeInProgress()).andReturn(upgrade).once();
+
+    replayAll();
+
+    m_metaInfo.init();
+
+    // precondition that no events were fired
+    assertEquals(0, m_listener.getAlertEventReceivedCount(AlertReceivedEvent.class));
+
+    // instantiate and inject mocks
+    ComponentVersionAlertRunnable runnable = new ComponentVersionAlertRunnable(
+        m_definition.getDefinitionName());
+
+    m_injector.injectMembers(runnable);
+
+    // run the alert
+    runnable.run();
+
+    assertEquals(1, m_listener.getAlertEventReceivedCount(AlertReceivedEvent.class));
+
+    List<AlertEvent> events = m_listener.getAlertEventInstances(AlertReceivedEvent.class);
+    assertEquals(1, events.size());
+
+    AlertReceivedEvent event = (AlertReceivedEvent) events.get(0);
+    Alert alert = event.getAlert();
+    assertEquals("AMBARI", alert.getService());
+    assertEquals("AMBARI_SERVER", alert.getComponent());
+    assertEquals(AlertState.SKIPPED, alert.getState());
+    assertEquals(DEFINITION_NAME, alert.getName());
+  }
+
+  /**
+   * Tests the alert that fires when all components are reporting correct
+   * versions.
+   */
+  @Test
+  public void testAllComponentVersionsCorrect() throws Exception {
+    replayAll();
+
+    m_metaInfo.init();
+
+    // precondition that no events were fired
+    assertEquals(0, m_listener.getAlertEventReceivedCount(AlertReceivedEvent.class));
+
+    // instantiate and inject mocks
+    ComponentVersionAlertRunnable runnable = new ComponentVersionAlertRunnable(
+        m_definition.getDefinitionName());
+
+    m_injector.injectMembers(runnable);
+
+    // run the alert
+    runnable.run();
+
+    assertEquals(1, m_listener.getAlertEventReceivedCount(AlertReceivedEvent.class));
+
+    List<AlertEvent> events = m_listener.getAlertEventInstances(AlertReceivedEvent.class);
+    assertEquals(1, events.size());
+
+    AlertReceivedEvent event = (AlertReceivedEvent) events.get(0);
+    Alert alert = event.getAlert();
+    assertEquals("AMBARI", alert.getService());
+    assertEquals("AMBARI_SERVER", alert.getComponent());
+    assertEquals(AlertState.OK, alert.getState());
+    assertEquals(DEFINITION_NAME, alert.getName());
+
+    verifyAll();
+  }
+
+  /**
+   * Tests that the alert which fires when there is a mismatch is a WARNING.
+   */
+  @Test
+  public void testomponentVersionMismatch() throws Exception {
+    // reset expectation so that it returns a wrong version
+    ServiceComponentHost sch = m_hostComponentMap.get(HOSTNAME_1).get(0);
+    EasyMock.reset(sch);
+    expect(sch.getServiceName()).andReturn("FOO").atLeastOnce();
+    expect(sch.getServiceComponentName()).andReturn("FOO_COMPONENT").atLeastOnce();
+    expect(sch.getVersion()).andReturn(WRONG_VERSION).atLeastOnce();
+    expect(sch.getDesiredStackVersion()).andReturn(m_desidredStackId).atLeastOnce();
+
+    replayAll();
+
+    m_metaInfo.init();
+
+    // precondition that no events were fired
+    assertEquals(0, m_listener.getAlertEventReceivedCount(AlertReceivedEvent.class));
+
+    // instantiate and inject mocks
+    ComponentVersionAlertRunnable runnable = new ComponentVersionAlertRunnable(
+        m_definition.getDefinitionName());
+
+    m_injector.injectMembers(runnable);
+
+    // run the alert
+    runnable.run();
+
+    assertEquals(1, m_listener.getAlertEventReceivedCount(AlertReceivedEvent.class));
+
+    List<AlertEvent> events = m_listener.getAlertEventInstances(AlertReceivedEvent.class);
+    assertEquals(1, events.size());
+
+    AlertReceivedEvent event = (AlertReceivedEvent) events.get(0);
+    Alert alert = event.getAlert();
+    assertEquals("AMBARI", alert.getService());
+    assertEquals("AMBARI_SERVER", alert.getComponent());
+    assertEquals(AlertState.WARNING, alert.getState());
+    assertEquals(DEFINITION_NAME, alert.getName());
+
+    verifyAll();
+  }
+
+
+  /**
+   *
+   */
+  private class MockModule implements Module {
+    /**
+     *
+     */
+    @Override
+    public void configure(Binder binder) {
+      Cluster cluster = createNiceMock(Cluster.class);
+
+      binder.bind(Clusters.class).toInstance(createNiceMock(Clusters.class));
+      binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+      binder.bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
+      binder.bind(Cluster.class).toInstance(cluster);
+      binder.bind(AlertDefinitionDAO.class).toInstance(createNiceMock(AlertDefinitionDAO.class));
+      binder.bind(EntityManager.class).toInstance(createNiceMock(EntityManager.class));
+      binder.bind(AmbariMetaInfo.class).toInstance(createNiceMock(AmbariMetaInfo.class));
+      binder.bind(StackManagerFactory.class).toInstance(createNiceMock(StackManagerFactory.class));
+    }
+  }
+}
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/query/JpaSortBuilderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/query/JpaSortBuilderTest.java
index 394de9f..ec60966 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/query/JpaSortBuilderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/query/JpaSortBuilderTest.java
@@ -126,10 +126,6 @@
     Assert.assertEquals(1, roots.size());
   }
 
-  /**
-   * The {@link HistoryPredicateVisitor} is used to convert an Ambari
-   * {@link Predicate} into a JPA {@link javax.persistence.criteria.Predicate}.
-   */
   private final class MockAlertHistoryredicateVisitor
       extends JpaPredicateVisitor<AlertHistoryEntity> {
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRendererTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRendererTest.java
index 13db5f8..95bceb8 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRendererTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRendererTest.java
@@ -52,6 +52,11 @@
 import org.apache.ambari.server.api.services.ResultImpl;
 import org.apache.ambari.server.api.util.TreeNode;
 import org.apache.ambari.server.api.util.TreeNodeImpl;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.AmbariManagementControllerImpl;
+import org.apache.ambari.server.controller.AmbariServer;
+import org.apache.ambari.server.controller.KerberosHelper;
+import org.apache.ambari.server.controller.KerberosHelperImpl;
 import org.apache.ambari.server.controller.internal.ArtifactResourceProvider;
 import org.apache.ambari.server.controller.internal.ClusterControllerImpl;
 import org.apache.ambari.server.controller.internal.ResourceImpl;
@@ -61,8 +66,12 @@
 import org.apache.ambari.server.controller.spi.Request;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.ResourceProvider;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.cluster.ClustersImpl;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
 import org.apache.ambari.server.topology.AmbariContext;
 import org.apache.ambari.server.topology.Blueprint;
 import org.apache.ambari.server.topology.ClusterTopology;
@@ -85,12 +94,19 @@
  */
 @SuppressWarnings("unchecked")
 @RunWith(PowerMockRunner.class)
-@PrepareForTest(AmbariContext.class)
+@PrepareForTest({AmbariContext.class, AmbariServer.class})
 public class ClusterBlueprintRendererTest {
 
   private static final ClusterTopology topology = createNiceMock(ClusterTopology.class);
   private static final ClusterController clusterController = createNiceMock(ClusterControllerImpl.class);
 
+  private static final AmbariContext ambariContext = createNiceMock(AmbariContext.class);
+  private static final Cluster cluster = createNiceMock(Cluster.class);
+  private static final Clusters clusters = createNiceMock(ClustersImpl.class);
+  private static final AmbariManagementController controller = createNiceMock(AmbariManagementControllerImpl.class);
+  private static final KerberosHelper kerberosHelper = createNiceMock(KerberosHelperImpl.class);
+  private static final KerberosDescriptor kerberosDescriptor = createNiceMock(KerberosDescriptor.class);
+
   private static final Blueprint blueprint = createNiceMock(Blueprint.class);
   private static final Stack stack = createNiceMock(Stack.class);
   private static final HostGroup group1 = createNiceMock(HostGroup.class);
@@ -155,7 +171,20 @@
     expect(group1.getComponents()).andReturn(group1Components).anyTimes();
     expect(group2.getComponents()).andReturn(group2Components).anyTimes();
 
-    replay(topology, blueprint, stack, group1, group2);
+    expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes();
+    expect(topology.getClusterId()).andReturn(1L).anyTimes();
+    PowerMock.mockStatic(AmbariServer.class);
+    expect(AmbariServer.getController()).andReturn(controller).anyTimes();
+    PowerMock.replay(AmbariServer.class);
+    expect(clusters.getCluster("clusterName")).andReturn(cluster).anyTimes();
+    expect(controller.getKerberosHelper()).andReturn(kerberosHelper).anyTimes();
+    expect(controller.getClusters()).andReturn(clusters).anyTimes();
+    expect(kerberosHelper.getKerberosDescriptor(cluster)).andReturn(kerberosDescriptor).anyTimes();
+    Set<String> properties = new HashSet<String>();
+    properties.add("core-site/hadoop.security.auth_to_local");
+    expect(kerberosDescriptor.getAllAuthToLocalProperties()).andReturn(properties).anyTimes();
+    expect(ambariContext.getClusterName(1L)).andReturn("clusterName").anyTimes();
+    replay(topology, blueprint, stack, group1, group2, ambariContext, clusters, controller, kerberosHelper, cluster, kerberosDescriptor);
   }
 
   private void setupMocksForKerberosEnabledCluster() throws Exception {
@@ -165,6 +194,7 @@
 
     PowerMock.mockStatic(AmbariContext.class);
     expect(AmbariContext.getClusterController()).andReturn(clusterController).anyTimes();
+    expect(AmbariContext.getController()).andReturn(controller).anyTimes();
 
     reset(topology);
 
@@ -210,8 +240,8 @@
 
   @After
   public void tearDown() {
-    verify(topology, blueprint, stack, group1, group2);
-    reset(topology, blueprint, stack, group1, group2);
+    verify(topology, blueprint, stack, group1, group2, ambariContext, clusters, controller, kerberosHelper, cluster, kerberosDescriptor);
+    reset(topology, blueprint, stack, group1, group2, ambariContext, clusters, controller, kerberosHelper, cluster, kerberosDescriptor);
   }
 
   @Test
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/resources/BaseResourceDefinitionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/resources/BaseResourceDefinitionTest.java
index e076268..8e83f56 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/resources/BaseResourceDefinitionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/resources/BaseResourceDefinitionTest.java
@@ -141,7 +141,7 @@
       resource.getRenderer("foo");
       fail("Should have thrown an exception due to invalid renderer type");
     } catch (IllegalArgumentException e) {
-      // expected
+      assertEquals("Invalid renderer name for resource of type Service", e.getMessage());
     }
   }
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
index 884777d..c9acfe9 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
@@ -1942,7 +1942,7 @@
 
     AlertDefinitionDAO dao = injector.getInstance(AlertDefinitionDAO.class);
     List<AlertDefinitionEntity> definitions = dao.findAll(clusterId);
-    assertEquals(12, definitions.size());
+    assertEquals(13, definitions.size());
 
     // figure out how many of these alerts were merged into from the
     // non-stack alerts.json
@@ -1955,7 +1955,7 @@
     }
 
     assertEquals(3, hostAlertCount);
-    assertEquals(9, definitions.size() - hostAlertCount);
+    assertEquals(10, definitions.size() - hostAlertCount);
 
     for (AlertDefinitionEntity definition : definitions) {
       definition.setScheduleInterval(28);
@@ -1965,7 +1965,7 @@
     metaInfo.reconcileAlertDefinitions(clusters);
 
     definitions = dao.findAll();
-    assertEquals(12, definitions.size());
+    assertEquals(13, definitions.size());
 
     for (AlertDefinitionEntity definition : definitions) {
       assertEquals(28, definition.getScheduleInterval().intValue());
@@ -1974,7 +1974,7 @@
     // find all enabled for the cluster should find 6 (the ones from HDFS;
     // it will not find the agent alert since it's not bound to the cluster)
     definitions = dao.findAllEnabled(cluster.getClusterId());
-    assertEquals(11, definitions.size());
+    assertEquals(12, definitions.size());
 
     // create new definition
     AlertDefinitionEntity entity = new AlertDefinitionEntity();
@@ -1993,19 +1993,19 @@
 
     // verify the new definition is found (6 HDFS + 1 new one)
     definitions = dao.findAllEnabled(cluster.getClusterId());
-    assertEquals(12, definitions.size());
+    assertEquals(13, definitions.size());
 
     // reconcile, which should disable our bad definition
     metaInfo.reconcileAlertDefinitions(clusters);
 
     // find all enabled for the cluster should find 6
     definitions = dao.findAllEnabled(cluster.getClusterId());
-    assertEquals(11, definitions.size());
+    assertEquals(12, definitions.size());
 
     // find all should find 6 HDFS + 1 disabled + 1 agent alert + 2 server
     // alerts
     definitions = dao.findAll();
-    assertEquals(13, definitions.size());
+    assertEquals(14, definitions.size());
 
     entity = dao.findById(entity.getDefinitionId());
     assertFalse(entity.getEnabled());
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessorTest.java
index 3febd48..bc82999 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessorTest.java
@@ -87,6 +87,7 @@
     expect(stack.getConfiguration(Arrays.asList("HDFS", "YARN", "HIVE"))).andReturn(createStackDefaults()).anyTimes();
     expect(blueprint.getServices()).andReturn(Arrays.asList("HDFS", "YARN", "HIVE")).anyTimes();
     expect(blueprint.getHostGroups()).andReturn(createHostGroupMap()).anyTimes();
+    expect(blueprint.isValidConfigType("core-site")).andReturn(true).anyTimes();
     expect(hostGroup.getComponentNames()).andReturn(Arrays.asList("comp1", "comp2")).anyTimes();
     expect(stackAdvisorHelper.recommend(anyObject(StackAdvisorRequest.class))).andReturn(createRecommendationResponse());
     expect(configuration.getFullProperties()).andReturn(props).anyTimes();
@@ -114,13 +115,14 @@
     expect(clusterTopology.getAdvisedConfigurations()).andReturn(advisedConfigurations).anyTimes();
     expect(clusterTopology.getConfiguration()).andReturn(configuration).anyTimes();
     expect(clusterTopology.isClusterKerberosEnabled()).andReturn(false).anyTimes();
-    expect(clusterTopology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY).anyTimes();
+    expect(clusterTopology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY);
     expect(blueprint.getStack()).andReturn(stack).anyTimes();
     expect(stack.getVersion()).andReturn("2.3").anyTimes();
     expect(stack.getName()).andReturn("HDP").anyTimes();
     expect(stack.getConfiguration(Arrays.asList("HDFS", "YARN", "HIVE"))).andReturn(createStackDefaults()).anyTimes();
     expect(blueprint.getServices()).andReturn(Arrays.asList("HDFS", "YARN", "HIVE")).anyTimes();
     expect(blueprint.getHostGroups()).andReturn(createHostGroupMap()).anyTimes();
+    expect(blueprint.isValidConfigType("core-site")).andReturn(true).anyTimes();
     expect(hostGroup.getComponentNames()).andReturn(Arrays.asList("comp1", "comp2")).anyTimes();
     expect(stackAdvisorHelper.recommend(anyObject(StackAdvisorRequest.class))).andReturn(createRecommendationResponse());
     expect(configuration.getFullProperties()).andReturn(props).anyTimes();
@@ -149,13 +151,14 @@
     expect(clusterTopology.getAdvisedConfigurations()).andReturn(advisedConfigurations).anyTimes();
     expect(clusterTopology.getConfiguration()).andReturn(configuration).anyTimes();
     expect(clusterTopology.isClusterKerberosEnabled()).andReturn(false).anyTimes();
-    expect(clusterTopology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY).anyTimes();
+    expect(clusterTopology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY);
     expect(blueprint.getStack()).andReturn(stack).anyTimes();
     expect(stack.getVersion()).andReturn("2.3").anyTimes();
     expect(stack.getName()).andReturn("HDP").anyTimes();
     expect(stack.getConfiguration(Arrays.asList("HDFS", "YARN", "HIVE"))).andReturn(createStackDefaults()).anyTimes();
     expect(blueprint.getServices()).andReturn(Arrays.asList("HDFS", "YARN", "HIVE")).anyTimes();
     expect(blueprint.getHostGroups()).andReturn(createHostGroupMap()).anyTimes();
+    expect(blueprint.isValidConfigType("core-site")).andReturn(true).anyTimes();
     expect(hostGroup.getComponentNames()).andReturn(Arrays.asList("comp1", "comp2")).anyTimes();
     expect(stackAdvisorHelper.recommend(anyObject(StackAdvisorRequest.class))).andReturn(createRecommendationResponse());
     expect(configuration.getFullProperties()).andReturn(props).anyTimes();
@@ -165,9 +168,7 @@
     underTest.adviseConfiguration(clusterTopology, props);
     // THEN
     assertTrue(advisedConfigurations.get("core-site").getProperties().containsKey("dummyKey1"));
-    assertTrue(advisedConfigurations.get("core-site").getProperties().containsKey("dummyKey3"));
     assertTrue(advisedConfigurations.get("core-site").getPropertyValueAttributes().containsKey("dummyKey2"));
-    assertTrue(advisedConfigurations.get("core-site").getPropertyValueAttributes().containsKey("dummyKey3"));
     assertEquals("dummyValue", advisedConfigurations.get("core-site").getProperties().get("dummyKey1"));
     assertEquals(Boolean.toString(true), advisedConfigurations.get("core-site")
       .getPropertyValueAttributes().get("dummyKey2").getDelete());
@@ -191,6 +192,7 @@
     expect(stack.getConfiguration(Arrays.asList("HDFS", "YARN", "HIVE"))).andReturn(createStackDefaults()).anyTimes();
     expect(blueprint.getServices()).andReturn(Arrays.asList("HDFS", "YARN", "HIVE")).anyTimes();
     expect(blueprint.getHostGroups()).andReturn(createHostGroupMap()).anyTimes();
+    expect(blueprint.isValidConfigType("core-site")).andReturn(true).anyTimes();
     expect(hostGroup.getComponentNames()).andReturn(Arrays.asList("comp1", "comp2")).anyTimes();
     expect(stackAdvisorHelper.recommend(anyObject(StackAdvisorRequest.class))).andReturn(createRecommendationResponse());
     expect(configuration.getFullProperties()).andReturn(props).anyTimes();
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/AbstractCheckDescriptorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/AbstractCheckDescriptorTest.java
index a96ca6c..01cda02 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/AbstractCheckDescriptorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/AbstractCheckDescriptorTest.java
@@ -20,8 +20,11 @@
 import static org.easymock.EasyMock.anyString;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.reset;
 
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedHashSet;
 import java.util.List;
@@ -29,24 +32,29 @@
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.RepositoryType;
 import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.repository.VersionDefinitionXml;
 import org.apache.ambari.server.state.stack.PrereqCheckType;
 import org.apache.ambari.server.state.stack.PrerequisiteCheck;
 import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
 import org.easymock.EasyMock;
+import org.junit.Assert;
 import org.junit.Test;
 
 import com.google.inject.Provider;
 
-import junit.framework.Assert;
-
 /**
  * Unit tests for AbstractCheckDescriptor
  */
 public class AbstractCheckDescriptorTest {
   final private Clusters clusters = EasyMock.createNiceMock(Clusters.class);
+  private final RepositoryVersionDAO repositoryVersionDao = EasyMock.createNiceMock(RepositoryVersionDAO.class);
 
   @UpgradeCheck(
       group = UpgradeCheckGroup.DEFAULT,
@@ -65,6 +73,13 @@
           return clusters;
         }
       };
+
+      repositoryVersionDaoProvider = new Provider<RepositoryVersionDAO>() {
+        @Override
+        public RepositoryVersionDAO get() {
+          return repositoryVersionDao;
+        }
+      };
     }
 
     @Override
@@ -170,20 +185,14 @@
     AbstractCheckDescriptor check = new TestCheckImpl(PrereqCheckType.SERVICE);
     PrereqCheckRequest request = new PrereqCheckRequest(clusterName, UpgradeType.ROLLING);
 
-    List<String> oneServiceList = new ArrayList<String>() {{
-      add("SERVICE1");
-    }};
-    List<String> atLeastOneServiceList = new ArrayList<String>() {{
-      add("SERVICE1");
-      add("NON_EXISTED_SERVICE");
-    }};
-    List<String> allServicesList = new ArrayList<String>(){{
-      add("SERVICE1");
-      add("SERVICE2");
-    }};
-    List<String> nonExistedList = new ArrayList<String>(){{
-      add("NON_EXISTED_SERVICE");
-    }};
+    List<String> oneServiceList = Arrays.asList("SERVICE1");
+
+    List<String> atLeastOneServiceList = Arrays.asList("SERVICE1", "NON_EXISTED_SERVICE");
+
+    List<String> allServicesList = Arrays.asList("SERVICE1", "SERVICE2");
+
+    List<String> nonExistedList = Arrays.asList("NON_EXISTED_SERVICE");
+
 
     // case, where we need at least one service to be present
     Assert.assertEquals(true, check.isApplicable(request, oneServiceList, false));
@@ -202,6 +211,52 @@
     Assert.assertEquals(false, check.isApplicable(request, nonExistedList, true));
   }
 
+  @Test
+  public void testIsApplicableWithVDF() throws Exception{
+    final String clusterName = "c1";
+    final Cluster cluster = EasyMock.createMock(Cluster.class);
+
+    Map<String, Service> services = new HashMap<String, Service>(){{
+      put("SERVICE1", null);
+      put("SERVICE2", null);
+      put("SERVICE3", null);
+    }};
+
+    expect(clusters.getCluster(anyString())).andReturn(cluster).atLeastOnce();
+    expect(cluster.getServices()).andReturn(services).atLeastOnce();
+
+    RepositoryVersionEntity repoVersion = EasyMock.createMock(RepositoryVersionEntity.class);
+    VersionDefinitionXml repoXml = EasyMock.createMock(VersionDefinitionXml.class);
+    expect(repoVersion.getType()).andReturn(RepositoryType.PATCH).atLeastOnce();
+    expect(repoVersion.getRepositoryXml()).andReturn(repoXml).atLeastOnce();
+    expect(repoXml.getAvailableServiceNames()).andReturn(Collections.singleton("SERVICE2")).atLeastOnce();
+
+    expect(repositoryVersionDao.findByStackNameAndVersion(
+        anyString(), anyString())).andReturn(repoVersion).atLeastOnce();
+
+    replay(clusters, cluster, repositoryVersionDao, repoVersion, repoXml);
+
+    AbstractCheckDescriptor check = new TestCheckImpl(PrereqCheckType.SERVICE);
+    PrereqCheckRequest request = new PrereqCheckRequest(clusterName, UpgradeType.ROLLING);
+    request.setTargetStackId(new StackId("HDP-2.5"));
+
+    List<String> allServicesList = Arrays.asList("SERVICE1", "SERVICE2");
+
+    // SERVICE2 is the only thing in VDF
+    Assert.assertEquals(true, check.isApplicable(request, allServicesList, false));
+
+    List<String> oneServicesList = Arrays.asList("SERVICE1");
+
+    // SERVICE2 is the only thing in VDF, a check for only SERVICE1 fail
+    Assert.assertEquals(false, check.isApplicable(request, oneServicesList, false));
+
+    // a VDF without available services is technically invalid, so expect any passed services to return false
+    reset(repoXml);
+    expect(repoXml.getAvailableServiceNames()).andReturn(Collections.<String>emptySet()).atLeastOnce();
+    replay(repoXml);
+    Assert.assertEquals(false, check.isApplicable(request, allServicesList, false));
+  }
+
   /**
    * Tests {@link UpgradeCheck#required()}.
    *
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java
index 7d8ba50..868dea1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java
@@ -187,7 +187,12 @@
     expect(mockJoinResultSet.getInt(1)).andReturn(resultCount);
     expect(mockStatement.executeQuery("select count(tpr.id) from topology_request tpr")).andReturn(mockCountResultSet);
     expect(mockStatement.executeQuery("select count(DISTINCT tpr.id) from topology_request tpr join " +
-      "topology_logical_request tlr on tpr.id = tlr.request_id join topology_host_request thr on tlr.id = thr.logical_request_id join topology_host_task tht on thr.id = tht.host_request_id join topology_logical_task tlt on tht.id = tlt.host_task_id")).andReturn(mockJoinResultSet);
+            "topology_logical_request tlr on tpr.id = tlr.request_id")).andReturn(mockJoinResultSet);
+
+    expect(mockStatement.executeQuery("select count(thr.id) from topology_host_request thr")).andReturn(mockCountResultSet);
+    expect(mockStatement.executeQuery("select count(DISTINCT thr.id) from topology_host_request thr join " +
+            "topology_host_task tht on thr.id = tht.host_request_id join topology_logical_task " +
+            "tlt on tht.id = tlt.host_task_id")).andReturn(mockJoinResultSet);
 
     DatabaseConsistencyCheckHelper.setInjector(mockInjector);
     DatabaseConsistencyCheckHelper.setConnection(mockConnection);
@@ -533,4 +538,70 @@
   }
 
 
+  @Test
+  public void testCheckForLargeTables() throws Exception {
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+    final AmbariMetaInfo mockAmbariMetainfo = easyMockSupport.createNiceMock(AmbariMetaInfo.class);
+    final DBAccessor mockDBDbAccessor = easyMockSupport.createNiceMock(DBAccessor.class);
+    final Connection mockConnection = easyMockSupport.createNiceMock(Connection.class);
+    final Statement mockStatement = easyMockSupport.createNiceMock(Statement.class);
+    final EntityManager mockEntityManager = easyMockSupport.createNiceMock(EntityManager.class);
+    final Clusters mockClusters = easyMockSupport.createNiceMock(Clusters.class);
+    final OsFamily mockOSFamily = easyMockSupport.createNiceMock(OsFamily.class);
+    final StackManagerFactory mockStackManagerFactory = easyMockSupport.createNiceMock(StackManagerFactory.class);
+
+    final ResultSet hostRoleCommandResultSet = easyMockSupport.createNiceMock(ResultSet.class);
+    final ResultSet executionCommandResultSet = easyMockSupport.createNiceMock(ResultSet.class);
+    final ResultSet stageResultSet = easyMockSupport.createNiceMock(ResultSet.class);
+    final ResultSet requestResultSet = easyMockSupport.createNiceMock(ResultSet.class);
+    final ResultSet alertHistoryResultSet = easyMockSupport.createNiceMock(ResultSet.class);
+
+    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(AmbariMetaInfo.class).toInstance(mockAmbariMetainfo);
+        bind(StackManagerFactory.class).toInstance(mockStackManagerFactory);
+        bind(EntityManager.class).toInstance(mockEntityManager);
+        bind(DBAccessor.class).toInstance(mockDBDbAccessor);
+        bind(Clusters.class).toInstance(mockClusters);
+        bind(OsFamily.class).toInstance(mockOSFamily);
+      }
+    });
+
+    expect(hostRoleCommandResultSet.next()).andReturn(true).once();
+    expect(executionCommandResultSet.next()).andReturn(true).once();
+    expect(stageResultSet.next()).andReturn(true).once();
+    expect(requestResultSet.next()).andReturn(true).once();
+    expect(alertHistoryResultSet.next()).andReturn(true).once();
+    expect(hostRoleCommandResultSet.getLong(1)).andReturn(2345L).atLeastOnce();
+    expect(executionCommandResultSet.getLong(1)).andReturn(12345L).atLeastOnce();
+    expect(stageResultSet.getLong(1)).andReturn(2321L).atLeastOnce();
+    expect(requestResultSet.getLong(1)).andReturn(1111L).atLeastOnce();
+    expect(alertHistoryResultSet.getLong(1)).andReturn(2223L).atLeastOnce();
+    expect(mockDBDbAccessor.getConnection()).andReturn(mockConnection);
+    expect(mockDBDbAccessor.getDbType()).andReturn(DBAccessor.DbType.MYSQL);
+    expect(mockDBDbAccessor.getDbSchema()).andReturn("test_schema");
+    expect(mockConnection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE)).andReturn(mockStatement).anyTimes();
+    expect(mockStatement.executeQuery("SELECT (data_length + index_length) \"Table Size\" " +
+            "FROM information_schema.TABLES WHERE table_schema = \"test_schema\" AND table_name =\"host_role_command\"")).andReturn(hostRoleCommandResultSet);
+    expect(mockStatement.executeQuery("SELECT (data_length + index_length) \"Table Size\" " +
+            "FROM information_schema.TABLES WHERE table_schema = \"test_schema\" AND table_name =\"execution_command\"")).andReturn(executionCommandResultSet);
+    expect(mockStatement.executeQuery("SELECT (data_length + index_length) \"Table Size\" " +
+            "FROM information_schema.TABLES WHERE table_schema = \"test_schema\" AND table_name =\"stage\"")).andReturn(stageResultSet);
+    expect(mockStatement.executeQuery("SELECT (data_length + index_length) \"Table Size\" " +
+            "FROM information_schema.TABLES WHERE table_schema = \"test_schema\" AND table_name =\"request\"")).andReturn(requestResultSet);
+    expect(mockStatement.executeQuery("SELECT (data_length + index_length) \"Table Size\" " +
+            "FROM information_schema.TABLES WHERE table_schema = \"test_schema\" AND table_name =\"alert_history\"")).andReturn(alertHistoryResultSet);
+
+    DatabaseConsistencyCheckHelper.setInjector(mockInjector);
+
+    easyMockSupport.replayAll();
+
+    mockAmbariMetainfo.init();
+
+    DatabaseConsistencyCheckHelper.resetCheckResult();
+    DatabaseConsistencyCheckHelper.checkForLargeTables();
+
+    easyMockSupport.verifyAll();
+  }
 }
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/MapReduce2JobHistoryStatePreservingCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/MapReduce2JobHistoryStatePreservingCheckTest.java
index 61953a7..c899af6 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/MapReduce2JobHistoryStatePreservingCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/MapReduce2JobHistoryStatePreservingCheckTest.java
@@ -22,12 +22,14 @@
 
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
 import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.DesiredConfig;
+import org.apache.ambari.server.state.RepositoryType;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.stack.PrereqCheckStatus;
@@ -44,6 +46,7 @@
  */
 public class MapReduce2JobHistoryStatePreservingCheckTest {
   private final Clusters m_clusters = Mockito.mock(Clusters.class);
+  private final RepositoryVersionDAO m_repositoryVersionDao = Mockito.mock(RepositoryVersionDAO.class);
 
   private final MapReduce2JobHistoryStatePreservingCheck m_check = new MapReduce2JobHistoryStatePreservingCheck();
 
@@ -59,8 +62,20 @@
         return m_clusters;
       }
     };
+
+    m_check.repositoryVersionDaoProvider = new Provider<RepositoryVersionDAO>() {
+      @Override
+      public RepositoryVersionDAO get() {
+        return m_repositoryVersionDao;
+      };
+    };
+
     Configuration config = Mockito.mock(Configuration.class);
     m_check.config = config;
+
+    RepositoryVersionEntity rve = Mockito.mock(RepositoryVersionEntity.class);
+    Mockito.when(rve.getType()).thenReturn(RepositoryType.STANDARD);
+    Mockito.when(m_repositoryVersionDao.findByStackNameAndVersion(Mockito.anyString(), Mockito.anyString())).thenReturn(rve);
   }
 
   /**
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServiceCheckValidityCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServiceCheckValidityCheckTest.java
index 996f349..253c835 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServiceCheckValidityCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServiceCheckValidityCheckTest.java
@@ -45,7 +45,6 @@
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.stack.PrereqCheckStatus;
 import org.apache.ambari.server.state.stack.PrerequisiteCheck;
-
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheckTest.java
index 7d1f907..ca71e3f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheckTest.java
@@ -27,9 +27,12 @@
 
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.RepositoryType;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.stack.PrereqCheckStatus;
@@ -51,6 +54,7 @@
   private Clusters m_clusters = EasyMock.createMock(Clusters.class);
   private ServicesNamenodeTruncateCheck m_check = new ServicesNamenodeTruncateCheck();
   private final Map<String, String> m_configMap = new HashMap<>();
+  private RepositoryVersionDAO m_repositoryVersionDAO = EasyMock.createMock(RepositoryVersionDAO.class);
 
   @Before
   public void setup() throws Exception {
@@ -80,6 +84,19 @@
         return m_clusters;
       }
     };
+
+    m_check.repositoryVersionDaoProvider = new Provider<RepositoryVersionDAO>() {
+      @Override
+      public RepositoryVersionDAO get() {
+        return m_repositoryVersionDAO;
+      }
+    };
+
+    RepositoryVersionEntity rve = EasyMock.createMock(RepositoryVersionEntity.class);
+    expect(rve.getType()).andReturn(RepositoryType.STANDARD).anyTimes();
+    expect(m_repositoryVersionDAO.findByStackNameAndVersion(EasyMock.anyString(), EasyMock.anyString())).andReturn(rve).anyTimes();
+    replay(m_repositoryVersionDAO, rve);
+
   }
 
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
index 625ac8a..f35122a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
@@ -2359,8 +2359,10 @@
     expect(cluster.getDesiredStackVersion()).andReturn(new StackId("HDP-2.1")).atLeastOnce();
 
     // this getting called one time means the cluster version is getting created
-    cluster.createClusterVersion(anyObject(StackId.class), anyObject(String.class), anyObject(String.class), anyObject(RepositoryVersionState.class));
-    expectLastCall().once();
+    ClusterVersionEntity clusterVersionEntity = createNiceMock(ClusterVersionEntity.class);
+    expect(cluster.createClusterVersion(anyObject(StackId.class), anyObject(String.class),
+        anyObject(String.class), anyObject(RepositoryVersionState.class))).andReturn(
+            clusterVersionEntity).once();
 
     expect(clusters.getCluster("c1")).andReturn(cluster).atLeastOnce();
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 7e4c4c2..83ba0bb 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -158,6 +158,7 @@
 import org.easymock.EasyMock;
 import org.junit.After;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Ignore;
@@ -174,8 +175,6 @@
 import com.google.inject.Guice;
 import com.google.inject.Injector;
 
-import junit.framework.Assert;
-
 public class AmbariManagementControllerTest {
 
   private static final Logger LOG =
@@ -8253,7 +8252,7 @@
 
     List<Stage> stages = new ArrayList<>();
     stages.add(stageFactory.createNew(requestId1, "/a1", cluster1, clusterId, context,
-        CLUSTER_HOST_INFO, "", ""));
+        "", ""));
     stages.get(0).setStageId(1);
     stages.get(0).addHostRoleExecutionCommand(hostName1, Role.HBASE_MASTER,
             RoleCommand.START,
@@ -8262,7 +8261,7 @@
             cluster1, "HBASE", false, false);
 
     stages.add(stageFactory.createNew(requestId1, "/a2", cluster1, clusterId, context,
-      CLUSTER_HOST_INFO, "", ""));
+      "", ""));
     stages.get(1).setStageId(2);
     stages.get(1).addHostRoleExecutionCommand(hostName1, Role.HBASE_CLIENT,
             RoleCommand.START,
@@ -8270,19 +8269,19 @@
                     hostName1, System.currentTimeMillis()), cluster1, "HBASE", false, false);
 
     stages.add(stageFactory.createNew(requestId1, "/a3", cluster1, clusterId, context,
-      CLUSTER_HOST_INFO, "", ""));
+      "", ""));
     stages.get(2).setStageId(3);
     stages.get(2).addHostRoleExecutionCommand(hostName1, Role.HBASE_CLIENT,
             RoleCommand.START,
             new ServiceComponentHostStartEvent(Role.HBASE_CLIENT.toString(),
                     hostName1, System.currentTimeMillis()), cluster1, "HBASE", false, false);
 
-    Request request = new Request(stages, clusters);
+    Request request = new Request(stages, "", clusters);
     actionDB.persistActions(request);
 
     stages.clear();
     stages.add(stageFactory.createNew(requestId2, "/a4", cluster1, clusterId, context,
-      CLUSTER_HOST_INFO, "", ""));
+      "", ""));
     stages.get(0).setStageId(4);
     stages.get(0).addHostRoleExecutionCommand(hostName1, Role.HBASE_CLIENT,
             RoleCommand.START,
@@ -8290,14 +8289,14 @@
                     hostName1, System.currentTimeMillis()), cluster1, "HBASE", false, false);
 
     stages.add(stageFactory.createNew(requestId2, "/a5", cluster1, clusterId, context,
-      CLUSTER_HOST_INFO, "", ""));
+      "", ""));
     stages.get(1).setStageId(5);
     stages.get(1).addHostRoleExecutionCommand(hostName1, Role.HBASE_CLIENT,
             RoleCommand.START,
             new ServiceComponentHostStartEvent(Role.HBASE_CLIENT.toString(),
                     hostName1, System.currentTimeMillis()), cluster1, "HBASE", false, false);
 
-    request = new Request(stages, clusters);
+    request = new Request(stages, "", clusters);
     actionDB.persistActions(request);
 
     // Add a stage to execute a task as server-side action on the Ambari server
@@ -8305,12 +8304,12 @@
         new ServiceComponentHostServerActionEvent(Role.AMBARI_SERVER_ACTION.toString(), null, System.currentTimeMillis());
     stages.clear();
     stages.add(stageFactory.createNew(requestId3, "/a6", cluster1, clusterId, context,
-      CLUSTER_HOST_INFO, "", ""));
+      "", ""));
     stages.get(0).setStageId(6);
     stages.get(0).addServerActionCommand("some.action.class.name", null, Role.AMBARI_SERVER_ACTION,
         RoleCommand.EXECUTE, cluster1, serviceComponentHostServerActionEvent, null, null, null, null, false, false);
     assertEquals("_internal_ambari", stages.get(0).getOrderedHostRoleCommands().get(0).getHostName());
-    request = new Request(stages, clusters);
+    request = new Request(stages, "", clusters);
     actionDB.persistActions(request);
 
     org.apache.ambari.server.controller.spi.Request spiRequest = PropertyHelper.getReadRequest(
@@ -8998,6 +8997,8 @@
 
     Assert.assertNull(topologyHostInfoDAO.findByHostname(host1));
 
+    Long firstHostId = clusters.getHost(host1).getHostId();
+
     // Deletion without specifying cluster should be successful
     requests.clear();
     requests.add(new HostRequest(host1, null));
@@ -9011,6 +9012,10 @@
     Assert.assertFalse(clusters.getClustersForHost(host1).contains(cluster));
     Assert.assertNull(topologyHostInfoDAO.findByHostname(host1));
 
+    // verify there are no host role commands for the host
+    List<HostRoleCommandEntity> tasks = hostRoleCommandDAO.findByHostId(firstHostId);
+    assertEquals(0, tasks.size());
+
     // Case 3: Delete host that is still part of the cluster, and specify the cluster_name in the request
     requests.clear();
     requests.add(new HostRequest(host2, cluster1));
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
index 5275580..e654c72 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
@@ -1103,6 +1103,8 @@
     // Create Preparation Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
 
@@ -1110,28 +1112,38 @@
       // Create Principals Stage
       expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
       // Create Keytabs Stage
       expect(requestStageContainer.getLastStageId()).andReturn(0L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
       // Distribute Keytabs Stage
       expect(requestStageContainer.getLastStageId()).andReturn(1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
     }
     // Update Configs Stage
     expect(requestStageContainer.getLastStageId()).andReturn(2L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // TODO: Add more of these when more stages are added.
     // Clean-up/Finalize Stage
     expect(requestStageContainer.getLastStageId()).andReturn(3L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
 
@@ -1294,36 +1306,50 @@
     // Create Preparation Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
-    requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
+    requestStageContainer.addStages(anyObject(List.class));
     expectLastCall().once();
 
     if (identitiesManaged) {
       // Create Principals Stage
       expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
-      requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
+      requestStageContainer.addStages(anyObject(List.class));
       expectLastCall().once();
       // Create Keytabs Stage
       expect(requestStageContainer.getLastStageId()).andReturn(0L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
-      requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
+      requestStageContainer.addStages(anyObject(List.class));
       expectLastCall().once();
       // Distribute Keytabs Stage
       expect(requestStageContainer.getLastStageId()).andReturn(1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
-      requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
+      requestStageContainer.addStages(anyObject(List.class));
       expectLastCall().once();
     }
     // Update Configs Stage
     expect(requestStageContainer.getLastStageId()).andReturn(2L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
-    requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
+    requestStageContainer.addStages(anyObject(List.class));
     expectLastCall().once();
     // TODO: Add more of these when more stages are added.
     // Clean-up/Finalize Stage
     expect(requestStageContainer.getLastStageId()).andReturn(3L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
-    requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
+
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
+
+    requestStageContainer.addStages(anyObject(List.class));
     expectLastCall().once();
 
     replayAll();
@@ -1479,42 +1505,58 @@
     // Hook Stage
     expect(requestStageContainer.getLastStageId()).andReturn(2L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(anyObject(List.class));
     expectLastCall().once();
     // StopZk Stage
     expect(requestStageContainer.getLastStageId()).andReturn(2L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(anyObject(List.class));
     expectLastCall().once();
     // Preparation Stage
     expect(requestStageContainer.getLastStageId()).andReturn(2L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Update Configs Stage
     expect(requestStageContainer.getLastStageId()).andReturn(2L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Destroy Principals Stage
     expect(requestStageContainer.getLastStageId()).andReturn(2L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Delete Keytabs Stage
     expect(requestStageContainer.getLastStageId()).andReturn(2L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Finalize Stage
     expect(requestStageContainer.getLastStageId()).andReturn(3L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
 
     // Cleanup Stage
     expect(requestStageContainer.getLastStageId()).andReturn(3L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
 
@@ -1687,31 +1729,43 @@
       // Create Preparation Stage
       expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
       // Create Principals Stage
       expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
       // Create Keytabs Stage
       expect(requestStageContainer.getLastStageId()).andReturn(0L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
       // Distribute Keytabs Stage
       expect(requestStageContainer.getLastStageId()).andReturn(1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
       // Update Configurations Stage
       expect(requestStageContainer.getLastStageId()).andReturn(1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
       // Clean-up/Finalize Stage
       expect(requestStageContainer.getLastStageId()).andReturn(3L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
     } else {
@@ -2809,7 +2863,7 @@
   private void setupStageFactory() {
     final StageFactory stageFactory = injector.getInstance(StageFactory.class);
     expect(stageFactory.createNew(anyLong(), anyObject(String.class), anyObject(String.class),
-        anyLong(), anyObject(String.class), anyObject(String.class), anyObject(String.class),
+        anyLong(), anyObject(String.class), anyObject(String.class),
         anyObject(String.class)))
         .andAnswer(new IAnswer<Stage>() {
           @Override
@@ -3061,26 +3115,36 @@
     // Preparation Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Create Principals Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Create Keytabs Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Distribute Keytabs Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Clean-up/Finalize Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
 
@@ -3253,21 +3317,29 @@
     // Preparation Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Delete Principals Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Delete Keytabs Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Clean-up/Finalize Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
 
@@ -3420,26 +3492,37 @@
       // Preparation Stage
       expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
       // Create Principals Stage
       expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
+
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
       // Create Keytabs Stage
       expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
       // Distribute Keytabs Stage
       expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
       // Clean-up/Finalize Stage
       expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
       expect(requestStageContainer.getId()).andReturn(1L).once();
+      requestStageContainer.setClusterHostInfo(anyString());
+      expectLastCall().once();
       requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
       expectLastCall().once();
     }
@@ -3582,21 +3665,29 @@
     // Preparation Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Delete Principals Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Delete Keytabs Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
     // Clean-up/Finalize Stage
     expect(requestStageContainer.getLastStageId()).andReturn(-1L).anyTimes();
     expect(requestStageContainer.getId()).andReturn(1L).once();
+    requestStageContainer.setClusterHostInfo(anyString());
+    expectLastCall().once();
     requestStageContainer.addStages(EasyMock.<List<Stage>>anyObject());
     expectLastCall().once();
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerClusterProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerClusterProviderTest.java
deleted file mode 100644
index 7583e66..0000000
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerClusterProviderTest.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import java.util.HashMap;
-import java.util.Set;
-
-import org.apache.ambari.server.controller.internal.RequestImpl;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import junit.framework.Assert;
-
-/**
- * Tests for GSInstallerClusterProvider
- */
-public class GSInstallerClusterProviderTest {
-
-  @Test
-  public void testGetResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-
-    GSInstallerResourceProvider provider = new GSInstallerClusterProvider(clusterDefinition);
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), null);
-    Assert.assertEquals(1, resources.size());
-    Assert.assertEquals("ambari", resources.iterator().next().getPropertyValue(GSInstallerClusterProvider.CLUSTER_NAME_PROPERTY_ID));
-  }
-
-  @Test
-  public void testGetResourcesWithPredicate() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-
-    GSInstallerResourceProvider provider = new GSInstallerClusterProvider(clusterDefinition);
-
-    Predicate predicate = new PredicateBuilder().property(GSInstallerClusterProvider.CLUSTER_NAME_PROPERTY_ID).equals("ambari").toPredicate();
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(1, resources.size());
-    Resource next = resources.iterator().next();
-    Assert.assertEquals("ambari",    next.getPropertyValue(GSInstallerClusterProvider.CLUSTER_NAME_PROPERTY_ID));
-    Assert.assertEquals("HDP-1.2.0", next.getPropertyValue(GSInstallerClusterProvider.CLUSTER_VERSION_PROPERTY_ID));
-
-    predicate = new PredicateBuilder().property(GSInstallerClusterProvider.CLUSTER_NAME_PROPERTY_ID).equals("non-existent Cluster").toPredicate();
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertTrue(resources.isEmpty());
-  }
-
-  @Test
-  public void testCreateResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerClusterProvider(clusterDefinition);
-
-    try {
-      provider.createResources(PropertyHelper.getReadRequest());
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-
-  @Test
-  public void testUpdateResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerClusterProvider(clusterDefinition);
-
-    try {
-      provider.updateResources(PropertyHelper.getUpdateRequest(new HashMap<String, Object>(), null), null);
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-
-  @Test
-  public void testDeleteResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerClusterProvider(clusterDefinition);
-
-    try {
-      provider.deleteResources(new RequestImpl(null, null, null, null), null);
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-}
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerComponentProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerComponentProviderTest.java
deleted file mode 100644
index 2e26ec9..0000000
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerComponentProviderTest.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import java.util.HashMap;
-import java.util.Set;
-
-import org.apache.ambari.server.controller.internal.RequestImpl;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import junit.framework.Assert;
-
-/**
- * Tests for GSInstallerComponentProvider.
- */
-public class GSInstallerComponentProviderTest {
-
-  @Test
-  public void testGetResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerComponentProvider(clusterDefinition);
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), null);
-    Assert.assertEquals(24, resources.size());
-  }
-
-  @Test
-  public void testGetResourcesWithPredicate() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerComponentProvider(clusterDefinition);
-    Predicate predicate = new PredicateBuilder().property(GSInstallerComponentProvider.COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("TASKTRACKER").toPredicate();
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(1, resources.size());
-
-    predicate = new PredicateBuilder().property(GSInstallerComponentProvider.COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("TASKTRACKER").or().
-        property(GSInstallerComponentProvider.COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("GANGLIA_MONITOR").toPredicate();
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(2, resources.size());
-
-    predicate = new PredicateBuilder().property(GSInstallerComponentProvider.COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("BadComponent").toPredicate();
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertTrue(resources.isEmpty());
-  }
-
-  @Test
-  public void testCreateResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerComponentProvider(clusterDefinition);
-
-    try {
-      provider.createResources(PropertyHelper.getReadRequest());
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-
-  @Test
-  public void testUpdateResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerComponentProvider(clusterDefinition);
-
-    try {
-      provider.updateResources(PropertyHelper.getUpdateRequest(new HashMap<String, Object>(), null), null);
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-
-  @Test
-  public void testDeleteResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerComponentProvider(clusterDefinition);
-
-    try {
-      provider.deleteResources(new RequestImpl(null, null, null, null), null);
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-}
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostComponentProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostComponentProviderTest.java
deleted file mode 100644
index 2552fa4..0000000
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostComponentProviderTest.java
+++ /dev/null
@@ -1,149 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import java.util.HashMap;
-import java.util.Set;
-
-import org.apache.ambari.server.controller.internal.RequestImpl;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import junit.framework.Assert;
-
-/**
- *
- */
-public class GSInstallerHostComponentProviderTest {
-
-  @Test
-  public void testGetResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerHostComponentProvider(clusterDefinition);
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), null);
-    Assert.assertEquals(32, resources.size());
-  }
-
-  @Test
-  public void testGetResourcesWithPredicate() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerHostComponentProvider(clusterDefinition);
-    Predicate predicate = new PredicateBuilder().property(GSInstallerHostComponentProvider.HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID).equals("MAPREDUCE").toPredicate();
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(5, resources.size());
-
-    predicate = new PredicateBuilder().property(GSInstallerHostComponentProvider.HOST_COMPONENT_HOST_NAME_PROPERTY_ID).equals("UnknownHost").toPredicate();
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertTrue(resources.isEmpty());
-  }
-
-  @Test
-  public void testGetResourcesCheckState() throws Exception {
-    TestGSInstallerStateProvider stateProvider = new TestGSInstallerStateProvider();
-    ClusterDefinition clusterDefinition = new ClusterDefinition(stateProvider, 500);
-    GSInstallerResourceProvider provider = new GSInstallerHostComponentProvider(clusterDefinition);
-    Predicate predicate = new PredicateBuilder().property(GSInstallerHostComponentProvider.HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("HBASE_REGIONSERVER").toPredicate();
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(3, resources.size());
-
-    for (Resource resource : resources) {
-      Assert.assertEquals("STARTED", resource.getPropertyValue(GSInstallerHostComponentProvider.HOST_COMPONENT_STATE_PROPERTY_ID));
-    }
-
-    stateProvider.setHealthy(false);
-
-    // need to wait for old state value to expire
-    Thread.sleep(501);
-
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(3, resources.size());
-
-    for (Resource resource : resources) {
-      Assert.assertEquals("INIT", resource.getPropertyValue(GSInstallerHostComponentProvider.HOST_COMPONENT_STATE_PROPERTY_ID));
-    }
-  }
-
-  @Test
-  public void testGetResourcesCheckStateFromCategory() throws Exception {
-    TestGSInstallerStateProvider stateProvider = new TestGSInstallerStateProvider();
-    ClusterDefinition clusterDefinition = new ClusterDefinition(stateProvider, 500);
-    GSInstallerResourceProvider provider = new GSInstallerHostComponentProvider(clusterDefinition);
-    Predicate predicate = new PredicateBuilder().property(GSInstallerHostComponentProvider.HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("HBASE_REGIONSERVER").toPredicate();
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest("HostRoles"), predicate);
-    Assert.assertEquals(3, resources.size());
-
-    for (Resource resource : resources) {
-      Assert.assertEquals("STARTED", resource.getPropertyValue(GSInstallerHostComponentProvider.HOST_COMPONENT_STATE_PROPERTY_ID));
-    }
-
-    stateProvider.setHealthy(false);
-
-    // need to wait for old state value to expire
-    Thread.sleep(501);
-
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(3, resources.size());
-
-    for (Resource resource : resources) {
-      Assert.assertEquals("INIT", resource.getPropertyValue(GSInstallerHostComponentProvider.HOST_COMPONENT_STATE_PROPERTY_ID));
-    }
-  }
-
-  @Test
-  public void testCreateResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerHostComponentProvider(clusterDefinition);
-
-    try {
-      provider.createResources(PropertyHelper.getReadRequest());
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-
-  @Test
-  public void testUpdateResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerHostComponentProvider(clusterDefinition);
-
-    try {
-      provider.updateResources(PropertyHelper.getUpdateRequest(new HashMap<String, Object>(), null), null);
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-
-  @Test
-  public void testDeleteResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerHostComponentProvider(clusterDefinition);
-
-    try {
-      provider.deleteResources(new RequestImpl(null, null, null, null), null);
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-}
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostProviderTest.java
deleted file mode 100644
index beacc72..0000000
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostProviderTest.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import java.util.HashMap;
-import java.util.Set;
-
-import org.apache.ambari.server.controller.internal.RequestImpl;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import junit.framework.Assert;
-
-/**
- *
- */
-public class GSInstallerHostProviderTest {
-
-  @Test
-  public void testGetResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerHostProvider(clusterDefinition);
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), null);
-    Assert.assertEquals(5, resources.size());
-  }
-
-  @Test
-  public void testGetResourcesWithPredicate() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerHostProvider(clusterDefinition);
-    Predicate predicate = new PredicateBuilder().property(GSInstallerHostProvider.HOST_NAME_PROPERTY_ID).equals("ip-10-190-97-104.ec2.internal").toPredicate();
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(1, resources.size());
-
-    predicate = new PredicateBuilder().property(GSInstallerHostProvider.HOST_NAME_PROPERTY_ID).equals("ip-10-190-97-104.ec2.internal").or().
-        property(GSInstallerHostProvider.HOST_NAME_PROPERTY_ID).equals("ip-10-8-113-183.ec2.internal").toPredicate();
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(2, resources.size());
-
-    predicate = new PredicateBuilder().property(GSInstallerHostProvider.HOST_NAME_PROPERTY_ID).equals("unknownHost").toPredicate();
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertTrue(resources.isEmpty());
-  }
-
-  @Test
-  public void testGetResourcesCheckState() throws Exception {
-    TestGSInstallerStateProvider stateProvider = new TestGSInstallerStateProvider();
-    ClusterDefinition clusterDefinition = new ClusterDefinition(stateProvider, 500);
-    GSInstallerResourceProvider provider = new GSInstallerHostProvider(clusterDefinition);
-    Predicate predicate = new PredicateBuilder().property(GSInstallerHostProvider.HOST_NAME_PROPERTY_ID).equals("ip-10-190-97-104.ec2.internal").toPredicate();
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(1, resources.size());
-
-    Resource resource = resources.iterator().next();
-
-    Assert.assertEquals("HEALTHY", resource.getPropertyValue(GSInstallerHostProvider.HOST_STATE_PROPERTY_ID));
-
-    stateProvider.setHealthy(false);
-
-    // need to wait for old state value to expire
-    Thread.sleep(501);
-
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(1, resources.size());
-
-    resource = resources.iterator().next();
-    Assert.assertEquals("INIT", resource.getPropertyValue(GSInstallerHostProvider.HOST_STATE_PROPERTY_ID));
-  }
-
-  @Test
-  public void testGetResourcesCheckStateFromCategory() throws Exception {
-    TestGSInstallerStateProvider stateProvider = new TestGSInstallerStateProvider();
-    ClusterDefinition clusterDefinition = new ClusterDefinition(stateProvider, 500);
-    GSInstallerResourceProvider provider = new GSInstallerHostProvider(clusterDefinition);
-    Predicate predicate = new PredicateBuilder().property(GSInstallerHostProvider.HOST_NAME_PROPERTY_ID).equals("ip-10-190-97-104.ec2.internal").toPredicate();
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest("Hosts"), predicate);
-    Assert.assertEquals(1, resources.size());
-
-    Resource resource = resources.iterator().next();
-
-    Assert.assertEquals("HEALTHY", resource.getPropertyValue(GSInstallerHostProvider.HOST_STATE_PROPERTY_ID));
-
-    stateProvider.setHealthy(false);
-
-    // need to wait for old state value to expire
-    Thread.sleep(501);
-
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(1, resources.size());
-
-    resource = resources.iterator().next();
-    Assert.assertEquals("INIT", resource.getPropertyValue(GSInstallerHostProvider.HOST_STATE_PROPERTY_ID));
-  }
-
-  @Test
-  public void testCreateResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerHostProvider(clusterDefinition);
-
-    try {
-      provider.createResources(PropertyHelper.getReadRequest());
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-
-  @Test
-  public void testUpdateResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerHostProvider(clusterDefinition);
-
-    try {
-      provider.updateResources(PropertyHelper.getUpdateRequest(new HashMap<String, Object>(), null), null);
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-
-  @Test
-  public void testDeleteResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerHostProvider(clusterDefinition);
-
-    try {
-      provider.deleteResources(new RequestImpl(null, null, null, null), null);
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-}
-
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerNoOpProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerNoOpProviderTest.java
deleted file mode 100644
index 9d1f053..0000000
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerNoOpProviderTest.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import java.util.Collections;
-
-import org.apache.ambari.server.controller.spi.Resource;
-import org.junit.Test;
-
-import junit.framework.Assert;
-
-/**
- * GSInstallerNoOpProvider tests.
- */
-public class GSInstallerNoOpProviderTest {
-
-  @Test
-  public void testGetKeyPropertyIds() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerNoOpProvider provider = new GSInstallerNoOpProvider(Resource.Type.Workflow, clusterDefinition);
-    Assert.assertNotNull(provider.getKeyPropertyIds());
-  }
-
-  @Test
-  public void testCheckPropertyIds() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerNoOpProvider provider = new GSInstallerNoOpProvider(Resource.Type.Workflow, clusterDefinition);
-    Assert.assertTrue(provider.checkPropertyIds(Collections.singleton("id")).isEmpty());
-  }
-}
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerServiceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerServiceProviderTest.java
deleted file mode 100644
index 5ea00a8..0000000
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerServiceProviderTest.java
+++ /dev/null
@@ -1,166 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import java.util.HashMap;
-import java.util.Set;
-
-import org.apache.ambari.server.controller.internal.RequestImpl;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import junit.framework.Assert;
-
-/**
- *
- */
-public class GSInstallerServiceProviderTest {
-
-  @Test
-  public void testGetResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerServiceProvider(clusterDefinition);
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), null);
-    Assert.assertEquals(11, resources.size());
-  }
-
-  @Test
-  public void testGetResourcesWithPredicate() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(
-        new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerServiceProvider(
-        clusterDefinition);
-    Predicate predicate = new PredicateBuilder().property(
-        GSInstallerServiceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals(
-        "MAPREDUCE").toPredicate();
-    Set<Resource> resources = provider.getResources(
-        PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(1, resources.size());
-
-    predicate = new PredicateBuilder().property(
-        GSInstallerServiceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals(
-        "HDFS").or().property(
-        GSInstallerServiceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals(
-        "GANGLIA").toPredicate();
-
-    resources = provider.getResources(PropertyHelper.getReadRequest(),
-        predicate);
-
-    Assert.assertEquals(2, resources.size());
-
-    predicate = new PredicateBuilder().property(
-        GSInstallerServiceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals(
-        "NO SERVICE").toPredicate();
-    resources = provider.getResources(PropertyHelper.getReadRequest(),
-        predicate);
-    Assert.assertTrue(resources.isEmpty());
-  }
-
-  @Test
-  public void testGetResourcesCheckState() throws Exception {
-    TestGSInstallerStateProvider stateProvider = new TestGSInstallerStateProvider();
-    ClusterDefinition clusterDefinition = new ClusterDefinition(stateProvider, 500);
-    GSInstallerResourceProvider provider = new GSInstallerServiceProvider(clusterDefinition);
-    Predicate predicate = new PredicateBuilder().property(GSInstallerServiceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals("MAPREDUCE").toPredicate();
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(1, resources.size());
-
-    Resource resource = resources.iterator().next();
-
-    Assert.assertEquals("STARTED", resource.getPropertyValue(GSInstallerServiceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID));
-
-    stateProvider.setHealthy(false);
-
-    // need to wait for old state value to expire
-    Thread.sleep(501);
-
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(1, resources.size());
-
-    resource = resources.iterator().next();
-    Assert.assertEquals("INIT", resource.getPropertyValue(GSInstallerServiceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID));
-  }
-
-  @Test
-  public void testGetResourcesCheckStateFromCategory() throws Exception {
-    TestGSInstallerStateProvider stateProvider = new TestGSInstallerStateProvider();
-    ClusterDefinition clusterDefinition = new ClusterDefinition(stateProvider, 500);
-    GSInstallerResourceProvider provider = new GSInstallerServiceProvider(clusterDefinition);
-    Predicate predicate = new PredicateBuilder().property(GSInstallerServiceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals("MAPREDUCE").toPredicate();
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest("ServiceInfo"), predicate);
-    Assert.assertEquals(1, resources.size());
-
-    Resource resource = resources.iterator().next();
-
-    Assert.assertEquals("STARTED", resource.getPropertyValue(GSInstallerServiceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID));
-
-    stateProvider.setHealthy(false);
-
-    // need to wait for old state value to expire
-    Thread.sleep(501);
-
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(1, resources.size());
-
-    resource = resources.iterator().next();
-    Assert.assertEquals("INIT", resource.getPropertyValue(GSInstallerServiceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID));
-  }
-
-  @Test
-  public void testCreateResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerServiceProvider(clusterDefinition);
-
-    try {
-      provider.createResources(PropertyHelper.getReadRequest());
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-
-  @Test
-  public void testUpdateResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerServiceProvider(clusterDefinition);
-
-    try {
-      provider.updateResources(PropertyHelper.getUpdateRequest(new HashMap<String, Object>(), null), null);
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-
-  @Test
-  public void testDeleteResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerServiceProvider(clusterDefinition);
-
-    try {
-      provider.deleteResources(new RequestImpl(null, null, null, null), null);
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-}
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/TestGSInstallerStateProvider.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/TestGSInstallerStateProvider.java
deleted file mode 100644
index a8e7a6d..0000000
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/TestGSInstallerStateProvider.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-/**
- * Test gsInstaller state provider.
- */
-public class TestGSInstallerStateProvider implements GSInstallerStateProvider {
-
-  private boolean healthy = true;
-
-  public void setHealthy(boolean healthy) {
-    this.healthy = healthy;
-  }
-
-  @Override
-  public boolean isHealthy(String hostName, String componentName) {
-    return healthy;
-  }
-}
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AbstractJDBCResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AbstractJDBCResourceProviderTest.java
index f546aab..07540bf 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AbstractJDBCResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AbstractJDBCResourceProviderTest.java
@@ -88,7 +88,7 @@
     verify(rs);
   }
 
-  private static enum TestFields {
+  private enum TestFields {
     field1, field2
   }
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index dba4043..24fc3c7 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -18,16 +18,16 @@
 
 package org.apache.ambari.server.controller.internal;
 
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertFalse;
-import static junit.framework.Assert.assertNotNull;
-import static junit.framework.Assert.assertNull;
-import static junit.framework.Assert.assertTrue;
-import static junit.framework.Assert.fail;
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.reset;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -40,11 +40,17 @@
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.AmbariServer;
+import org.apache.ambari.server.controller.KerberosHelper;
 import org.apache.ambari.server.controller.StackConfigurationResponse;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.PropertyDependencyInfo;
 import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.ValueAttributesInfo;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
 import org.apache.ambari.server.topology.AdvisedConfiguration;
 import org.apache.ambari.server.topology.AmbariContext;
 import org.apache.ambari.server.topology.Blueprint;
@@ -58,8 +64,10 @@
 import org.apache.ambari.server.topology.HostGroupImpl;
 import org.apache.ambari.server.topology.HostGroupInfo;
 import org.apache.ambari.server.topology.InvalidTopologyException;
+import org.apache.ambari.server.topology.TopologyRequest;
 import org.apache.commons.lang.StringUtils;
 import org.easymock.EasyMockRule;
+import org.easymock.EasyMockSupport;
 import org.easymock.Mock;
 import org.easymock.MockType;
 import org.junit.After;
@@ -67,6 +75,10 @@
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.powermock.api.easymock.PowerMock;
+import org.powermock.core.classloader.annotations.PrepareForTest;
+import org.powermock.modules.junit4.PowerMockRunner;
 
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
@@ -77,7 +89,9 @@
 /**
  * BlueprintConfigurationProcessor unit tests.
  */
-public class BlueprintConfigurationProcessorTest {
+@RunWith(PowerMockRunner.class)
+@PrepareForTest(AmbariServer.class)
+public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
 
   private static final Configuration EMPTY_CONFIG = new Configuration(Collections.<String, Map<String, String>>emptyMap(), Collections.<String, Map<String, Map<String, String>>>emptyMap());
   private final Map<String, Collection<String>> serviceComponents = new HashMap<>();
@@ -97,6 +111,24 @@
   @Mock(type = MockType.NICE)
   private Stack stack;
 
+  @Mock(type = MockType.NICE)
+  private AmbariManagementController controller;
+
+  @Mock(type = MockType.NICE)
+  private KerberosHelper kerberosHelper;
+
+  @Mock(type = MockType.NICE)
+  private KerberosDescriptor kerberosDescriptor;
+
+  @Mock(type = MockType.NICE)
+  private Clusters clusters;
+
+  @Mock(type = MockType.NICE)
+  private Cluster cluster;
+
+  @Mock
+  private TopologyRequest topologyRequestMock;
+
   @Before
   public void init() throws Exception {
     expect(bp.getStack()).andReturn(stack).anyTimes();
@@ -109,7 +141,7 @@
     expect(stack.getConfigurationPropertiesWithMetadata(anyObject(String.class), anyObject(String.class))).andReturn(Collections.<String, Stack.ConfigProperty>emptyMap()).anyTimes();
 
     expect(serviceInfo.getRequiredProperties()).andReturn(
-        Collections.<String, org.apache.ambari.server.state.PropertyInfo>emptyMap()).anyTimes();
+      Collections.<String, org.apache.ambari.server.state.PropertyInfo>emptyMap()).anyTimes();
     expect(serviceInfo.getRequiredServices()).andReturn(Collections.<String>emptyList()).anyTimes();
 
     Collection<String> hdfsComponents = new HashSet<>();
@@ -193,6 +225,18 @@
 
     Set<String> emptySet = Collections.emptySet();
     expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes();
+    expect(ambariContext.isClusterKerberosEnabled(1)).andReturn(true).once();
+    expect(ambariContext.getClusterName(1L)).andReturn("clusterName").anyTimes();
+    PowerMock.mockStatic(AmbariServer.class);
+    expect(AmbariServer.getController()).andReturn(controller).anyTimes();
+    PowerMock.replay(AmbariServer.class);
+    expect(clusters.getCluster("clusterName")).andReturn(cluster).anyTimes();
+    expect(controller.getKerberosHelper()).andReturn(kerberosHelper).anyTimes();
+    expect(controller.getClusters()).andReturn(clusters).anyTimes();
+    expect(kerberosHelper.getKerberosDescriptor(cluster)).andReturn(kerberosDescriptor).anyTimes();
+    Set<String> properties = new HashSet<String>();
+    properties.add("core-site/hadoop.security.auth_to_local");
+    expect(kerberosDescriptor.getAllAuthToLocalProperties()).andReturn(properties).anyTimes();
   }
 
   @After
@@ -208,7 +252,7 @@
     properties.put("yarn-site", typeProps);
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> hgComponents = new HashSet<>();
     hgComponents.add("NAMENODE");
@@ -295,7 +339,7 @@
       ImmutableMap.of("admin-properties", rangerAdminProperties);
 
 
-    Configuration clusterConfig = new Configuration(properties, ImmutableMap.<String, Map<String,Map<String,String>>>of());
+    Configuration clusterConfig = new Configuration(properties, ImmutableMap.<String, Map<String, Map<String, String>>>of());
 
     Collection<String> hostGroup1Components = ImmutableSet.of("RANGER_ADMIN");
     TestHostGroup group1 = new TestHostGroup("group1", hostGroup1Components, Collections.singleton("testhost1"));
@@ -331,10 +375,10 @@
     parentProperties.put("yarn-site", parentYarnSiteProps);
 
     Configuration parentClusterConfig = new Configuration(parentProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
 
     Collection<String> hgComponents = new HashSet<>();
     hgComponents.add("NAMENODE");
@@ -367,7 +411,7 @@
     properties.put("yarn-site", typeProps);
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> hgComponents = new HashSet<>();
     hgComponents.add("NAMENODE");
@@ -385,14 +429,14 @@
     group2Properties.put("yarn-site", group2YarnSiteProps);
     // host group config -> BP config -> cluster scoped config
     Configuration group2BPConfiguration = new Configuration(Collections.<String, Map<String, String>>emptyMap(),
-        Collections.<String, Map<String, Map<String, String>>>emptyMap(), clusterConfig);
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), clusterConfig);
 
     Configuration group2Configuration = new Configuration(group2Properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap(), group2BPConfiguration);
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), group2BPConfiguration);
 
     // set config on hostgroup
     TestHostGroup group2 = new TestHostGroup("group2", hgComponents2,
-        Collections.singleton("testhost2"), group2Configuration);
+      Collections.singleton("testhost2"), group2Configuration);
 
     Collection<TestHostGroup> hostGroups = new HashSet<>();
     hostGroups.add(group1);
@@ -404,7 +448,7 @@
 
     assertEquals("%HOSTGROUP::group1%", properties.get("yarn-site").get("yarn.resourcemanager.hostname"));
     assertEquals("%HOSTGROUP::group1%",
-        group2Configuration.getPropertyValue("yarn-site", "yarn.resourcemanager.resource-tracker.address"));
+      group2Configuration.getPropertyValue("yarn-site", "yarn.resourcemanager.resource-tracker.address"));
   }
 
   @Test
@@ -415,7 +459,7 @@
     properties.put("core-site", typeProps);
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> hgComponents = new HashSet<>();
     hgComponents.add("NAMENODE");
@@ -447,7 +491,7 @@
     properties.put("yarn-site", typeProps);
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> hgComponents = new HashSet<>();
     hgComponents.add("NAMENODE");
@@ -479,7 +523,7 @@
     properties.put("hbase-site", typeProps);
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> hgComponents = new HashSet<>();
     hgComponents.add("NAMENODE");
@@ -526,7 +570,7 @@
     properties.put("webhcat-site", typeProps);
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> hgComponents = new HashSet<>();
     hgComponents.add("NAMENODE");
@@ -577,7 +621,7 @@
     properties.put("storm-site", typeProps);
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> hgComponents = new HashSet<>();
     hgComponents.add("NAMENODE");
@@ -637,7 +681,7 @@
     properties.put("hive-site", hiveSiteProps);
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> hgComponents = new HashSet<>();
     hgComponents.add("NAMENODE");
@@ -671,7 +715,7 @@
     properties.put("hive-site", typeProps);
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> hgComponents = new HashSet<>();
     hgComponents.add("NAMENODE");
@@ -756,7 +800,7 @@
     assertFalse("Password property should have been excluded",
       properties.get("ranger-yarn-plugin-properties").containsKey("REPOSITORY_CONFIG_PASSWORD"));
     assertFalse("Password property should have been excluded",
-                properties.get("ranger-yarn-plugin-properties").containsKey("SSL_KEYSTORE_PASSWORD"));
+      properties.get("ranger-yarn-plugin-properties").containsKey("SSL_KEYSTORE_PASSWORD"));
     assertFalse("Password property should have been excluded",
       properties.get("ranger-yarn-plugin-properties").containsKey("SSL_TRUSTSTORE_PASSWORD"));
     assertFalse("Password property should have been excluded",
@@ -771,10 +815,10 @@
 
     // verify that the following password properties matching the "*_SECRET" rule have been excluded
     assertFalse("Secret property should have been excluded",
-	      properties.get("secret-test-properties").containsKey("knox_master_secret"));
+      properties.get("secret-test-properties").containsKey("knox_master_secret"));
     // verify that the property that does not match the "*_SECRET" rule is still included
     assertTrue("Expected secret property not found",
-	      properties.get("secret-test-properties").containsKey("test.secret.should.be.included"));
+      properties.get("secret-test-properties").containsKey("test.secret.should.be.included"));
     // verify the custom properties map has been modified by the filters
     assertEquals("custom-test-properties type was not properly exported",
       2, properties.get("custom-test-properties").size());
@@ -808,7 +852,7 @@
     falconStartupProperties.put("*.falcon.http.authentication.kerberos.principal", "HTTP/" + expectedHostName + "@EXAMPLE.COM");
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     // note: test hostgroups may not accurately reflect the required components for the config properties
     // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
@@ -828,13 +872,13 @@
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("Falcon Broker URL property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), falconStartupProperties.get("*.broker.url"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), falconStartupProperties.get("*.broker.url"));
 
     assertEquals("Falcon Kerberos Principal property not properly exported",
       "falcon/" + "%HOSTGROUP::" + expectedHostGroupName + "%" + "@EXAMPLE.COM", falconStartupProperties.get("*.falcon.service.authentication.kerberos.principal"));
 
     assertEquals("Falcon Kerberos HTTP Principal property not properly exported",
-        "HTTP/" + "%HOSTGROUP::" + expectedHostGroupName + "%" + "@EXAMPLE.COM", falconStartupProperties.get("*.falcon.http.authentication.kerberos.principal"));
+      "HTTP/" + "%HOSTGROUP::" + expectedHostGroupName + "%" + "@EXAMPLE.COM", falconStartupProperties.get("*.falcon.http.authentication.kerberos.principal"));
   }
 
   @Test
@@ -897,6 +941,7 @@
     kerberosEnvProperties.put("kdc_hosts", expectedHostName + ",secondary.kdc.org");
     kerberosEnvProperties.put("master_kdc", expectedHostName);
     coreSiteProperties.put("hadoop.proxyuser.yarn.hosts", expectedHostName);
+    coreSiteProperties.put("hadoop.security.auth_to_local", "RULE:clustername");
 
     Configuration clusterConfig = new Configuration(configProperties,
       Collections.<String, Map<String, Map<String, String>>>emptyMap());
@@ -925,9 +970,11 @@
     assertFalse("kdc_hosts should not be present in exported blueprint in kerberos-env",
       kerberosEnvProperties.containsKey("kdc_hosts"));
     assertFalse("master_kdc should not be present in exported blueprint in kerberos-env",
-        kerberosEnvProperties.containsKey("master_kdc"));
+      kerberosEnvProperties.containsKey("master_kdc"));
     assertEquals("hadoop.proxyuser.yarn.hosts was not exported correctly",
       createExportedHostName("host_group_1"), coreSiteProperties.get("hadoop.proxyuser.yarn.hosts"));
+    assertFalse("hadoop.security.auth_to_local should not be present in exported blueprint in core-site",
+      coreSiteProperties.containsKey("hadoop.security.auth_to_local"));
   }
 
   @Test
@@ -962,7 +1009,7 @@
     hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo, expectedHostName + ":" + expectedPortNum);
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     // note: test hostgroups may not accurately reflect the required components for the config properties
     // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
@@ -982,19 +1029,19 @@
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeOne));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeTwo));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeTwo));
 
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeOne));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeTwo));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeTwo));
 
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeOne));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo));
   }
 
   @Test
@@ -1035,7 +1082,7 @@
     hadoopEnvProperties.put("dfs_ha_initial_namenode_standby", expectedHostName);
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     // note: test hostgroups may not accurately reflect the required components for the config properties
     // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
@@ -1055,19 +1102,19 @@
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeOne));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeTwo));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeTwo));
 
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeOne));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeTwo));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeTwo));
 
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeOne));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo));
 
     assertNull("Initial NameNode HA property exported although should not have", hadoopEnvProperties.get("dfs_ha_initial_namenode_active"));
     assertNull("Initial NameNode HA property exported although should not have", hadoopEnvProperties.get("dfs_ha_initial_namenode_standby"));
@@ -1095,7 +1142,7 @@
     accumuloSiteProperties.put("instance.volumes", "hdfs://" + expectedNameService + "/apps/accumulo/data");
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     // note: test hostgroups may not accurately reflect the required components for the config properties
     // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
@@ -1116,11 +1163,11 @@
 
     // verify that any properties that include nameservices are not removed from the exported blueprint's configuration
     assertEquals("Property containing an HA nameservice (fs.defaultFS), was not correctly exported by the processor",
-        "hdfs://" + expectedNameService, coreSiteProperties.get("fs.defaultFS"));
+      "hdfs://" + expectedNameService, coreSiteProperties.get("fs.defaultFS"));
     assertEquals("Property containing an HA nameservice (hbase.rootdir), was not correctly exported by the processor",
-        "hdfs://" + expectedNameService + "/apps/hbase/data", hbaseSiteProperties.get("hbase.rootdir"));
+      "hdfs://" + expectedNameService + "/apps/hbase/data", hbaseSiteProperties.get("hbase.rootdir"));
     assertEquals("Property containing an HA nameservice (instance.volumes), was not correctly exported by the processor",
-        "hdfs://" + expectedNameService + "/apps/accumulo/data", accumuloSiteProperties.get("instance.volumes"));
+      "hdfs://" + expectedNameService + "/apps/accumulo/data", accumuloSiteProperties.get("instance.volumes"));
   }
 
   @Test
@@ -1131,10 +1178,10 @@
     configProperties.put("hdfs-site", hdfsSiteProperties);
 
     assertEquals("Incorrect initial state for hdfs-site config",
-        0, hdfsSiteProperties.size());
+      0, hdfsSiteProperties.size());
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     // note: test hostgroups may not accurately reflect the required components for the config properties
     // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
@@ -1151,7 +1198,7 @@
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("Incorrect state for hdfs-site config after HA call in non-HA environment, should be zero",
-        0, hdfsSiteProperties.size());
+      0, hdfsSiteProperties.size());
   }
 
   @Test
@@ -1193,17 +1240,17 @@
     hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameServiceTwo + "." + expectedNodeTwo, expectedHostNameTwo + ":" + expectedPortNum);
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     // note: test hostgroups may not accurately reflect the required components for the config properties
     // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
     // are not validated
     Collection<String> groupComponents = new HashSet<>();
-    groupComponents.add("RESOURCEMANAGER");
+    groupComponents.add("NAMENODE");
     Collection<String> hosts = new ArrayList<>();
     hosts.add(expectedHostNameOne);
     hosts.add(expectedHostNameTwo);
-    hosts.add("serverTwo");
+//    hosts.add("serverTwo");
     TestHostGroup group = new TestHostGroup(expectedHostGroupName, groupComponents, hosts);
 
     Collection<TestHostGroup> hostGroups = new HashSet<>();
@@ -1215,36 +1262,36 @@
 
     // verify results for name service one
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameServiceOne + "." + expectedNodeOne));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameServiceOne + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameServiceOne + "." + expectedNodeTwo));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameServiceOne + "." + expectedNodeTwo));
 
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameServiceOne + "." + expectedNodeOne));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameServiceOne + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameServiceOne + "." + expectedNodeTwo));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameServiceOne + "." + expectedNodeTwo));
 
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameServiceOne + "." + expectedNodeOne));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameServiceOne + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameServiceOne + "." + expectedNodeTwo));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameServiceOne + "." + expectedNodeTwo));
 
 
     // verify results for name service two
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameServiceTwo + "." + expectedNodeOne));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameServiceTwo + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameServiceTwo + "." + expectedNodeTwo));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameServiceTwo + "." + expectedNodeTwo));
 
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameServiceTwo + "." + expectedNodeOne));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameServiceTwo + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameServiceTwo + "." + expectedNodeTwo));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameServiceTwo + "." + expectedNodeTwo));
 
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameServiceTwo + "." + expectedNodeOne));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameServiceTwo + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameServiceTwo + "." + expectedNodeTwo));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameServiceTwo + "." + expectedNodeTwo));
   }
 
   @Test
@@ -1258,7 +1305,7 @@
     configProperties.put("yarn-site", yarnSiteProperties);
 
     // setup properties that include host information
-    yarnSiteProperties.put("yarn.log.server.url", "http://" + expectedHostName +":19888/jobhistory/logs");
+    yarnSiteProperties.put("yarn.log.server.url", "http://" + expectedHostName + ":19888/jobhistory/logs");
     yarnSiteProperties.put("yarn.resourcemanager.hostname", expectedHostName);
     yarnSiteProperties.put("yarn.resourcemanager.resource-tracker.address", expectedHostName + ":" + expectedPortNum);
     yarnSiteProperties.put("yarn.resourcemanager.webapp.address", expectedHostName + ":" + expectedPortNum);
@@ -1271,7 +1318,7 @@
     yarnSiteProperties.put("yarn.log.server.web-service.url", expectedHostName + ":" + expectedPortNum);
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     // note: test hostgroups may not accurately reflect the required components for the config properties
     // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
@@ -1297,21 +1344,21 @@
     assertEquals("Yarn ResourceManager tracker address was incorrectly exported",
       createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.resource-tracker.address"));
     assertEquals("Yarn ResourceManager webapp address was incorrectly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.webapp.address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.webapp.address"));
     assertEquals("Yarn ResourceManager scheduler address was incorrectly exported",
       createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.scheduler.address"));
     assertEquals("Yarn ResourceManager address was incorrectly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.address"));
     assertEquals("Yarn ResourceManager admin address was incorrectly exported",
       createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.admin.address"));
     assertEquals("Yarn ResourceManager timeline-service address was incorrectly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.timeline-service.address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.timeline-service.address"));
     assertEquals("Yarn ResourceManager timeline webapp address was incorrectly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.timeline-service.webapp.address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.timeline-service.webapp.address"));
     assertEquals("Yarn ResourceManager timeline webapp HTTPS address was incorrectly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.timeline-service.webapp.https.address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.timeline-service.webapp.https.address"));
     assertEquals("Yarn ResourceManager timeline web service url was incorrectly exported",
-            createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.log.server.web-service.url"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.log.server.web-service.url"));
   }
 
   @Test
@@ -1325,7 +1372,7 @@
     configProperties.put("yarn-site", yarnSiteProperties);
 
     // setup properties that include host information
-    yarnSiteProperties.put("yarn.log.server.url", "http://" + expectedHostName +":19888/jobhistory/logs");
+    yarnSiteProperties.put("yarn.log.server.url", "http://" + expectedHostName + ":19888/jobhistory/logs");
     yarnSiteProperties.put("yarn.resourcemanager.hostname", expectedHostName);
     yarnSiteProperties.put("yarn.resourcemanager.resource-tracker.address", expectedHostName + ":" + expectedPortNum);
     yarnSiteProperties.put("yarn.resourcemanager.webapp.address", expectedHostName + ":" + expectedPortNum);
@@ -1337,7 +1384,7 @@
     yarnSiteProperties.put("yarn.timeline-service.webapp.https.address", "0.0.0.0" + ":" + expectedPortNum);
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     // note: test hostgroups may not accurately reflect the required components for the config properties
     // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
@@ -1357,25 +1404,25 @@
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("Yarn Log Server URL was incorrectly exported",
-        "http://" + "%HOSTGROUP::" + expectedHostGroupName + "%" +":19888/jobhistory/logs", yarnSiteProperties.get("yarn.log.server.url"));
+      "http://" + "%HOSTGROUP::" + expectedHostGroupName + "%" + ":19888/jobhistory/logs", yarnSiteProperties.get("yarn.log.server.url"));
     assertEquals("Yarn ResourceManager hostname was incorrectly exported",
-        createExportedHostName(expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.hostname"));
+      createExportedHostName(expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.hostname"));
     assertEquals("Yarn ResourceManager tracker address was incorrectly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.resource-tracker.address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.resource-tracker.address"));
     assertEquals("Yarn ResourceManager webapp address was incorrectly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.webapp.address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.webapp.address"));
     assertEquals("Yarn ResourceManager scheduler address was incorrectly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.scheduler.address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.scheduler.address"));
     assertEquals("Yarn ResourceManager address was incorrectly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.address"));
     assertEquals("Yarn ResourceManager admin address was incorrectly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.admin.address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), yarnSiteProperties.get("yarn.resourcemanager.admin.address"));
     assertEquals("Yarn ResourceManager timeline-service address was incorrectly exported",
-        "0.0.0.0" + ":" + expectedPortNum, yarnSiteProperties.get("yarn.timeline-service.address"));
+      "0.0.0.0" + ":" + expectedPortNum, yarnSiteProperties.get("yarn.timeline-service.address"));
     assertEquals("Yarn ResourceManager timeline webapp address was incorrectly exported",
-        "0.0.0.0" + ":" + expectedPortNum, yarnSiteProperties.get("yarn.timeline-service.webapp.address"));
+      "0.0.0.0" + ":" + expectedPortNum, yarnSiteProperties.get("yarn.timeline-service.webapp.address"));
     assertEquals("Yarn ResourceManager timeline webapp HTTPS address was incorrectly exported",
-        "0.0.0.0" + ":" + expectedPortNum, yarnSiteProperties.get("yarn.timeline-service.webapp.https.address"));
+      "0.0.0.0" + ":" + expectedPortNum, yarnSiteProperties.get("yarn.timeline-service.webapp.https.address"));
   }
 
   @Test
@@ -1412,7 +1459,7 @@
     accumuloSiteProperties.put("instance.volumes", "hdfs://" + expectedHostName + ":" + expectedPortNum + "/apps/accumulo/data");
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     // note: test hostgroups may not accurately reflect the required components for the config properties
     // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
@@ -1435,28 +1482,28 @@
     assertEquals("hdfs config property not exported properly",
       createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.http.address"));
     assertEquals("hdfs config property not exported properly",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.https.address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.https.address"));
     assertEquals("hdfs config property not exported properly",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address"));
     assertEquals("hdfs config property not exported properly",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address"));
     assertEquals("hdfs config property not exported properly",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.secondary.http.address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.secondary.http.address"));
     assertEquals("hdfs config property not exported properly",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.secondary.http-address"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.secondary.http-address"));
     assertEquals("hdfs config property not exported properly",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.shared.edits.dir"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.shared.edits.dir"));
 
     assertEquals("hdfs config in core-site not exported properly",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), coreSiteProperties.get("fs.default.name"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), coreSiteProperties.get("fs.default.name"));
     assertEquals("hdfs config in core-site not exported properly",
-        "hdfs://" + createExportedAddress(expectedPortNum, expectedHostGroupName), coreSiteProperties.get("fs.defaultFS"));
+      "hdfs://" + createExportedAddress(expectedPortNum, expectedHostGroupName), coreSiteProperties.get("fs.defaultFS"));
 
     assertEquals("hdfs config in hbase-site not exported properly",
-        "hdfs://" + createExportedAddress(expectedPortNum, expectedHostGroupName) + "/apps/hbase/data", hbaseSiteProperties.get("hbase.rootdir"));
+      "hdfs://" + createExportedAddress(expectedPortNum, expectedHostGroupName) + "/apps/hbase/data", hbaseSiteProperties.get("hbase.rootdir"));
 
     assertEquals("hdfs config in accumulo-site not exported properly",
-        "hdfs://" + createExportedAddress(expectedPortNum, expectedHostGroupName) + "/apps/accumulo/data", accumuloSiteProperties.get("instance.volumes"));
+      "hdfs://" + createExportedAddress(expectedPortNum, expectedHostGroupName) + "/apps/accumulo/data", accumuloSiteProperties.get("instance.volumes"));
   }
 
   @Test
@@ -1492,7 +1539,7 @@
     coreSiteProperties.put("hadoop.proxyuser.hcat.hosts", expectedHostName + "," + expectedHostNameTwo);
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     // note: test hostgroups may not accurately reflect the required components for the config properties
     // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
@@ -1522,31 +1569,31 @@
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("hive property not properly exported",
-        "thrift://" + createExportedAddress(expectedPortNum, expectedHostGroupName), hiveSiteProperties.get("hive.metastore.uris"));
+      "thrift://" + createExportedAddress(expectedPortNum, expectedHostGroupName), hiveSiteProperties.get("hive.metastore.uris"));
     assertEquals("hive property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hiveSiteProperties.get("javax.jdo.option.ConnectionURL"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hiveSiteProperties.get("javax.jdo.option.ConnectionURL"));
     assertEquals("hive property not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
-        webHCatSiteProperties.get("templeton.hive.properties"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
+      webHCatSiteProperties.get("templeton.hive.properties"));
     assertEquals("hive property not properly exported",
-        createExportedHostName(expectedHostGroupName), webHCatSiteProperties.get("templeton.kerberos.principal"));
+      createExportedHostName(expectedHostGroupName), webHCatSiteProperties.get("templeton.kerberos.principal"));
 
     assertEquals("hive property not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.hive.hosts"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.hive.hosts"));
 
     assertEquals("hive property not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.HTTP.hosts"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.HTTP.hosts"));
 
     assertEquals("hive property not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.hcat.hosts"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.hcat.hosts"));
 
     assertEquals("hive zookeeper quorum property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName) + "," + createExportedAddress(expectedPortNum, expectedHostGroupNameTwo),
-        hiveSiteProperties.get("hive.zookeeper.quorum"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName) + "," + createExportedAddress(expectedPortNum, expectedHostGroupNameTwo),
+      hiveSiteProperties.get("hive.zookeeper.quorum"));
 
     assertEquals("hive zookeeper connectString property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName) + "," + createExportedAddress(expectedPortNum, expectedHostGroupNameTwo),
-        hiveSiteProperties.get("hive.cluster.delegation.token.store.zookeeper.connectString"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName) + "," + createExportedAddress(expectedPortNum, expectedHostGroupNameTwo),
+      hiveSiteProperties.get("hive.cluster.delegation.token.store.zookeeper.connectString"));
 
   }
 
@@ -1590,7 +1637,7 @@
     coreSiteProperties.put("hadoop.proxyuser.hcat.hosts", expectedHostName + "," + expectedHostNameTwo);
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> groupComponents = new HashSet<>();
     groupComponents.add("NAMENODE");
@@ -1621,18 +1668,18 @@
     assertEquals("hive property not properly exported",
       "thrift://" + createExportedAddress(expectedPortNum, expectedHostGroupName) + "," + "thrift://" + createExportedAddress(expectedPortNum, expectedHostGroupNameTwo), hiveSiteProperties.get("hive.metastore.uris"));
     assertEquals("hive property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName), hiveSiteProperties.get("javax.jdo.option.ConnectionURL"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hiveSiteProperties.get("javax.jdo.option.ConnectionURL"));
     assertEquals("hive property not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
-        webHCatSiteProperties.get("templeton.hive.properties"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
+      webHCatSiteProperties.get("templeton.hive.properties"));
     assertEquals("hive property not properly exported",
-        createExportedHostName(expectedHostGroupName), webHCatSiteProperties.get("templeton.kerberos.principal"));
+      createExportedHostName(expectedHostGroupName), webHCatSiteProperties.get("templeton.kerberos.principal"));
 
     assertEquals("hive property not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.hive.hosts"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.hive.hosts"));
 
     assertFalse("hive.server2.authentication.ldap.url should not have been present in the exported configuration",
-        hiveSiteProperties.containsKey("hive.server2.authentication.ldap.url"));
+      hiveSiteProperties.containsKey("hive.server2.authentication.ldap.url"));
     assertEquals("hive property not properly exported",
       createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.HTTP.hosts"));
 
@@ -1640,12 +1687,12 @@
       createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.hcat.hosts"));
 
     assertEquals("hive zookeeper quorum property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName) + "," + createExportedAddress(expectedPortNum, expectedHostGroupNameTwo),
-        hiveSiteProperties.get("hive.zookeeper.quorum"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName) + "," + createExportedAddress(expectedPortNum, expectedHostGroupNameTwo),
+      hiveSiteProperties.get("hive.zookeeper.quorum"));
 
     assertEquals("hive zookeeper connectString property not properly exported",
-        createExportedAddress(expectedPortNum, expectedHostGroupName) + "," + createExportedAddress(expectedPortNum, expectedHostGroupNameTwo),
-        hiveSiteProperties.get("hive.cluster.delegation.token.store.zookeeper.connectString"));
+      createExportedAddress(expectedPortNum, expectedHostGroupName) + "," + createExportedAddress(expectedPortNum, expectedHostGroupNameTwo),
+      hiveSiteProperties.get("hive.cluster.delegation.token.store.zookeeper.connectString"));
   }
 
   @Test
@@ -1680,7 +1727,7 @@
     coreSiteProperties.put("hadoop.proxyuser.oozie.hosts", expectedHostName + "," + expectedHostNameTwo);
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     // note: test hostgroups may not accurately reflect the required components for the config properties
     // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
@@ -1704,7 +1751,7 @@
     hostGroups.add(group2);
 
     if (BlueprintConfigurationProcessor.singleHostTopologyUpdaters != null &&
-            BlueprintConfigurationProcessor.singleHostTopologyUpdaters.containsKey("oozie-site")) {
+      BlueprintConfigurationProcessor.singleHostTopologyUpdaters.containsKey("oozie-site")) {
       BlueprintConfigurationProcessor.singleHostTopologyUpdaters.get("oozie-site").remove("oozie.service.JPAService.jdbc.url");
     }
 
@@ -1719,13 +1766,13 @@
     assertTrue(configProcessor.getRemovePropertyUpdaters().get("oozie-site").containsKey("oozie.service.JPAService.jdbc.url"));
 
     assertEquals("oozie property not exported correctly",
-        createExportedHostName(expectedHostGroupName), oozieSiteProperties.get("oozie.base.url"));
+      createExportedHostName(expectedHostGroupName), oozieSiteProperties.get("oozie.base.url"));
     assertEquals("oozie property not exported correctly",
       createExportedHostName(expectedHostGroupName), oozieSiteProperties.get("oozie.authentication.kerberos.principal"));
     assertEquals("oozie property not exported correctly",
-        createExportedHostName(expectedHostGroupName), oozieSiteProperties.get("oozie.service.HadoopAccessorService.kerberos.principal"));
+      createExportedHostName(expectedHostGroupName), oozieSiteProperties.get("oozie.service.HadoopAccessorService.kerberos.principal"));
     assertEquals("oozie property not exported correctly",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.oozie.hosts"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.oozie.hosts"));
 
     // verify that the oozie properties that can refer to an external DB are not included in the export
     assertFalse("oozie_existing_mysql_host should not have been present in the exported configuration",
@@ -1856,7 +1903,7 @@
     accumuloSiteProperties.put("instance.zookeeper.host", createHostAddress(expectedHostName, expectedPortNumberOne) + "," + createHostAddress(expectedHostNameTwo, expectedPortNumberTwo));
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     // test hostgroups may not accurately reflect the required components for the config properties which are mapped to them
     Collection<String> groupComponents = new HashSet<>();
@@ -1884,14 +1931,14 @@
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("zookeeper config not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
-        coreSiteProperties.get("ha.zookeeper.quorum"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
+      coreSiteProperties.get("ha.zookeeper.quorum"));
     assertEquals("zookeeper config not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
-        hbaseSiteProperties.get("hbase.zookeeper.quorum"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
+      hbaseSiteProperties.get("hbase.zookeeper.quorum"));
     assertEquals("zookeeper config not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
-        webHCatSiteProperties.get("templeton.zookeeper.hosts"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
+      webHCatSiteProperties.get("templeton.zookeeper.hosts"));
     assertEquals("yarn-site zookeeper config not properly exported",
       createExportedHostName(expectedHostGroupName, expectedPortNumberOne) + "," + createExportedHostName(expectedHostGroupNameTwo, expectedPortNumberTwo),
       yarnSiteProperties.get("hadoop.registry.zk.quorum"));
@@ -1899,11 +1946,11 @@
       createExportedHostName(expectedHostGroupName, expectedPortNumberOne) + "," + createExportedHostName(expectedHostGroupNameTwo, expectedPortNumberTwo),
       sliderClientProperties.get("slider.zookeeper.quorum"));
     assertEquals("kafka zookeeper config not properly exported",
-        createExportedHostName(expectedHostGroupName, expectedPortNumberOne) + "," + createExportedHostName(expectedHostGroupNameTwo, expectedPortNumberTwo),
-        kafkaBrokerProperties.get("zookeeper.connect"));
+      createExportedHostName(expectedHostGroupName, expectedPortNumberOne) + "," + createExportedHostName(expectedHostGroupNameTwo, expectedPortNumberTwo),
+      kafkaBrokerProperties.get("zookeeper.connect"));
     assertEquals("accumulo-site zookeeper config not properly exported",
-        createExportedHostName(expectedHostGroupName, expectedPortNumberOne) + "," + createExportedHostName(expectedHostGroupNameTwo, expectedPortNumberTwo),
-        accumuloSiteProperties.get("instance.zookeeper.host"));
+      createExportedHostName(expectedHostGroupName, expectedPortNumberOne) + "," + createExportedHostName(expectedHostGroupNameTwo, expectedPortNumberTwo),
+      accumuloSiteProperties.get("instance.zookeeper.host"));
   }
 
   @Test
@@ -1937,7 +1984,7 @@
 //    multiOozieSiteMap.put("hadoop.proxyuser.knox.hosts", new MultipleHostTopologyUpdater("KNOX_GATEWAY"));
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> groupComponents = new HashSet<>();
     groupComponents.add("KNOX_GATEWAY");
@@ -1964,17 +2011,17 @@
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("Knox for core-site config not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
-        coreSiteProperties.get("hadoop.proxyuser.knox.hosts"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
+      coreSiteProperties.get("hadoop.proxyuser.knox.hosts"));
     assertEquals("Knox config for WebHCat not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
-        webHCatSiteProperties.get("webhcat.proxyuser.knox.hosts"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
+      webHCatSiteProperties.get("webhcat.proxyuser.knox.hosts"));
     assertEquals("Knox config for Oozie not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
-        oozieSiteProperties.get("hadoop.proxyuser.knox.hosts"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
+      oozieSiteProperties.get("hadoop.proxyuser.knox.hosts"));
     assertEquals("Knox config for Oozie not properly exported",
-        createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
-        oozieSiteProperties.get("oozie.service.ProxyUserService.proxyuser.knox.hosts"));
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
+      oozieSiteProperties.get("oozie.service.ProxyUserService.proxyuser.knox.hosts"));
   }
 
   @Test
@@ -1989,7 +2036,7 @@
     kafkaBrokerProperties.put("kafka.ganglia.metrics.host", createHostAddress(expectedHostName, expectedPortNumberOne));
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> groupComponents = new HashSet<>();
     groupComponents.add("KAFKA_BROKER");
@@ -2013,8 +2060,8 @@
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("kafka Ganglia config not properly exported",
-        createExportedHostName(expectedHostGroupName, expectedPortNumberOne),
-        kafkaBrokerProperties.get("kafka.ganglia.metrics.host"));
+      createExportedHostName(expectedHostGroupName, expectedPortNumberOne),
+      kafkaBrokerProperties.get("kafka.ganglia.metrics.host"));
   }
 
   @Test
@@ -2033,7 +2080,7 @@
     properties.put("worker.childopts", "some other info, undefined, more info");
 
     Configuration clusterConfig = new Configuration(configProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> groupComponents = new HashSet<>();
     groupComponents.add("ZOOKEEPER_SERVER");
@@ -2052,7 +2099,7 @@
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("Property was incorrectly exported",
-        "%HOSTGROUP::" + expectedHostGroupName + "%", properties.get("storm.zookeeper.servers"));
+      "%HOSTGROUP::" + expectedHostGroupName + "%", properties.get("storm.zookeeper.servers"));
     assertEquals("Property with undefined host was incorrectly exported",
       "undefined", properties.get("nimbus.childopts"));
     assertEquals("Property with undefined host was incorrectly exported",
@@ -2123,10 +2170,10 @@
     parentProperties.put("yarn-site", parentYarnSiteProps);
 
     Configuration parentClusterConfig = new Configuration(parentProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
 
     Collection<String> group1Components = new HashSet<>();
     group1Components.add("NAMENODE");
@@ -2160,7 +2207,7 @@
     properties.put("yarn-site", yarnSiteProps);
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> group1Components = new HashSet<>();
     group1Components.add("NAMENODE");
@@ -2179,10 +2226,10 @@
     // group 2 host group configuration
     // HG config -> BP HG config -> cluster scoped config
     Configuration group2BPConfig = new Configuration(Collections.<String, Map<String, String>>emptyMap(),
-        Collections.<String, Map<String, Map<String, String>>>emptyMap(), clusterConfig);
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), clusterConfig);
 
     Configuration group2Config = new Configuration(group2Properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap(), group2BPConfig);
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), group2BPConfig);
     // set config on HG
     TestHostGroup group2 = new TestHostGroup("group2", group2Components, Collections.singleton("testhost2"), group2Config);
 
@@ -2207,7 +2254,7 @@
     properties.put("yarn-site", yarnSiteProps);
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> group1Components = new HashSet<>();
     group1Components.add("NAMENODE");
@@ -2226,11 +2273,11 @@
     // group 2 host group configuration
     // HG config -> BP HG config -> cluster scoped config
     Configuration group2BPConfig = new Configuration(group2BPProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap(), clusterConfig);
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), clusterConfig);
 
     // can't set parent here because it is reset in cluster topology
     Configuration group2Config = new Configuration(new HashMap<String, Map<String, String>>(),
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
     // set config on HG
     TestHostGroup group2 = new TestHostGroup("group2", group2Components, Collections.singleton("testhost2"), group2Config);
 
@@ -2811,19 +2858,19 @@
 
     // verify that the expected hostname was substituted for the host group name in the config
     assertEquals("HTTPS address HA property not properly exported",
-        expectedHostName + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeOne));
+      expectedHostName + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        expectedHostNameTwo + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeTwo));
+      expectedHostNameTwo + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeTwo));
 
     assertEquals("HTTPS address HA property not properly exported",
-        expectedHostName + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeOne));
+      expectedHostName + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        expectedHostNameTwo + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeTwo));
+      expectedHostNameTwo + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeTwo));
 
     assertEquals("HTTPS address HA property not properly exported",
-        expectedHostName + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeOne));
+      expectedHostName + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported",
-        expectedHostNameTwo + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo));
+      expectedHostNameTwo + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo));
 
     // verify that the Blueprint config processor has set the internal required properties
     // that determine the active and standby node hostnames for this HA setup
@@ -2831,22 +2878,22 @@
     String activeHost = hadoopEnvProperties.get("dfs_ha_initial_namenode_active");
     if (activeHost.equals(expectedHostName)) {
       assertEquals("Standby Namenode hostname was not set correctly",
-          expectedHostNameTwo, hadoopEnvProperties.get("dfs_ha_initial_namenode_standby"));
+        expectedHostNameTwo, hadoopEnvProperties.get("dfs_ha_initial_namenode_standby"));
     } else if (activeHost.equals(expectedHostNameTwo)) {
       assertEquals("Standby Namenode hostname was not set correctly",
-          expectedHostName, hadoopEnvProperties.get("dfs_ha_initial_namenode_standby"));
+        expectedHostName, hadoopEnvProperties.get("dfs_ha_initial_namenode_standby"));
     } else {
       fail("Active Namenode hostname was not set correctly: " + activeHost);
     }
 
     assertEquals("fs.defaultFS should not be modified by cluster update when NameNode HA is enabled.",
-        "hdfs://" + expectedNameService, coreSiteProperties.get("fs.defaultFS"));
+      "hdfs://" + expectedNameService, coreSiteProperties.get("fs.defaultFS"));
 
     assertEquals("hbase.rootdir should not be modified by cluster update when NameNode HA is enabled.",
-        "hdfs://" + expectedNameService + "/hbase/test/root/dir", hbaseSiteProperties.get("hbase.rootdir"));
+      "hdfs://" + expectedNameService + "/hbase/test/root/dir", hbaseSiteProperties.get("hbase.rootdir"));
 
     assertEquals("instance.volumes should not be modified by cluster update when NameNode HA is enabled.",
-        "hdfs://" + expectedNameService + "/accumulo/test/instance/volumes", accumuloSiteProperties.get("instance.volumes"));
+      "hdfs://" + expectedNameService + "/accumulo/test/instance/volumes", accumuloSiteProperties.get("instance.volumes"));
   }
 
   @Test
@@ -2854,7 +2901,7 @@
     final String expectedHostGroupName = "host_group_1";
 
     final String expectedPropertyValue =
-        "hive.metastore.local=false,hive.metastore.uris=thrift://headnode0.ivantestcluster2-ssh.d1.internal.cloudapp.net:9083,hive.user.install.directory=/user";
+      "hive.metastore.local=false,hive.metastore.uris=thrift://headnode0.ivantestcluster2-ssh.d1.internal.cloudapp.net:9083,hive.user.install.directory=/user";
 
     Map<String, Map<String, String>> configProperties = new HashMap<>();
     Map<String, String> webHCatSiteProperties = new HashMap<>();
@@ -2931,8 +2978,8 @@
     updater.doUpdateForClusterCreate();
 
     assertEquals("Unexpected config update for hive.metastore.uris",
-        expectedMetaStoreURIs,
-        hiveSiteProperties.get("hive.metastore.uris"));
+      expectedMetaStoreURIs,
+      hiveSiteProperties.get("hive.metastore.uris"));
   }
 
   @Test
@@ -2941,10 +2988,10 @@
     final String expectedHostGroupNameTwo = "host_group_2";
 
     final String expectedHostNameOne =
-        "c6401.ambari.apache.org";
+      "c6401.ambari.apache.org";
 
     final String expectedHostNameTwo =
-        "c6402.ambari.apache.org";
+      "c6402.ambari.apache.org";
 
 
     // use exported HOSTGROUP syntax for this property, to make sure the
@@ -2995,8 +3042,8 @@
     updater.doUpdateForClusterCreate();
 
     assertEquals("Unexpected config update for hive.metastore.uris",
-        expectedMetaStoreURIs,
-        hiveSiteProperties.get("hive.metastore.uris"));
+      expectedMetaStoreURIs,
+      hiveSiteProperties.get("hive.metastore.uris"));
   }
 
   @Test
@@ -3020,12 +3067,12 @@
   }
 
   private void testHiveMetastoreHA(String separator) throws InvalidTopologyException, ConfigurationTopologyException {
-    final String[] parts = new String[] {
+    final String[] parts = new String[]{
       "hive.metastore.local=false",
       "hive.metastore.uris=" + getThriftURI("localhost"),
       "hive.metastore.sasl.enabled=false"
     };
-    final String[] hostNames = new String[] { "c6401.ambari.apache.org", "example.com", "c6402.ambari.apache.org" };
+    final String[] hostNames = new String[]{"c6401.ambari.apache.org", "example.com", "c6402.ambari.apache.org"};
     final Set<String> expectedUris = new HashSet<>();
     for (String hostName : hostNames) {
       expectedUris.add(getThriftURI(hostName));
@@ -3138,7 +3185,7 @@
 
     // simulate the Oozie HA configuration
     oozieSiteProperties.put("oozie.services.ext",
-        "org.apache.oozie.service.ZKLocksService,org.apache.oozie.service.ZKXLogStreamingService,org.apache.oozie.service.ZKJobsConcurrencyService,org.apache.oozie.service.ZKUUIDService");
+      "org.apache.oozie.service.ZKLocksService,org.apache.oozie.service.ZKXLogStreamingService,org.apache.oozie.service.ZKJobsConcurrencyService,org.apache.oozie.service.ZKUUIDService");
 
     oozieEnvProperties.put("oozie_existing_mysql_host", expectedExternalHost);
 
@@ -3164,13 +3211,13 @@
     updater.doUpdateForClusterCreate();
 
     assertEquals("oozie property not updated correctly",
-        expectedHostName, oozieSiteProperties.get("oozie.base.url"));
+      expectedHostName, oozieSiteProperties.get("oozie.base.url"));
     assertEquals("oozie property not updated correctly",
-        expectedHostName, oozieSiteProperties.get("oozie.authentication.kerberos.principal"));
+      expectedHostName, oozieSiteProperties.get("oozie.authentication.kerberos.principal"));
     assertEquals("oozie property not updated correctly",
-        expectedHostName, oozieSiteProperties.get("oozie.service.HadoopAccessorService.kerberos.principal"));
+      expectedHostName, oozieSiteProperties.get("oozie.service.HadoopAccessorService.kerberos.principal"));
     assertEquals("oozie property not updated correctly",
-        expectedHostName + "," + expectedHostNameTwo, coreSiteProperties.get("hadoop.proxyuser.oozie.hosts"));
+      expectedHostName + "," + expectedHostNameTwo, coreSiteProperties.get("hadoop.proxyuser.oozie.hosts"));
   }
 
   @Test
@@ -3253,7 +3300,7 @@
     configProperties.put("yarn-site", yarnSiteProperties);
 
     // setup properties that include host information
-    yarnSiteProperties.put("yarn.log.server.url", "http://" + expectedHostName +":19888/jobhistory/logs");
+    yarnSiteProperties.put("yarn.log.server.url", "http://" + expectedHostName + ":19888/jobhistory/logs");
     yarnSiteProperties.put("yarn.resourcemanager.hostname", expectedHostName);
     yarnSiteProperties.put("yarn.resourcemanager.resource-tracker.address", expectedHostName + ":" + expectedPortNum);
     yarnSiteProperties.put("yarn.resourcemanager.webapp.address", expectedHostName + ":" + expectedPortNum);
@@ -3293,21 +3340,21 @@
     assertEquals("Yarn ResourceManager hostname was incorrectly exported",
       expectedHostName, yarnSiteProperties.get("yarn.resourcemanager.hostname"));
     assertEquals("Yarn ResourceManager tracker address was incorrectly updated",
-        createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.resourcemanager.resource-tracker.address"));
+      createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.resourcemanager.resource-tracker.address"));
     assertEquals("Yarn ResourceManager webapp address was incorrectly updated",
       createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.resourcemanager.webapp.address"));
     assertEquals("Yarn ResourceManager scheduler address was incorrectly updated",
-        createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.resourcemanager.scheduler.address"));
+      createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.resourcemanager.scheduler.address"));
     assertEquals("Yarn ResourceManager address was incorrectly updated",
-        createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.resourcemanager.address"));
+      createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.resourcemanager.address"));
     assertEquals("Yarn ResourceManager admin address was incorrectly updated",
-        createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.resourcemanager.admin.address"));
+      createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.resourcemanager.admin.address"));
     assertEquals("Yarn ResourceManager timeline-service address was incorrectly updated",
-        createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.timeline-service.address"));
+      createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.timeline-service.address"));
     assertEquals("Yarn ResourceManager timeline webapp address was incorrectly updated",
-        createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.timeline-service.webapp.address"));
+      createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.timeline-service.webapp.address"));
     assertEquals("Yarn ResourceManager timeline webapp HTTPS address was incorrectly updated",
-        createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.timeline-service.webapp.https.address"));
+      createHostAddress(expectedHostName, expectedPortNum), yarnSiteProperties.get("yarn.timeline-service.webapp.https.address"));
   }
 
   @Test
@@ -3323,7 +3370,7 @@
     configProperties.put("yarn-site", yarnSiteProperties);
 
     // setup properties that include host information
-    yarnSiteProperties.put("yarn.log.server.url", "http://" + expectedHostName +":19888/jobhistory/logs");
+    yarnSiteProperties.put("yarn.log.server.url", "http://" + expectedHostName + ":19888/jobhistory/logs");
     yarnSiteProperties.put("yarn.resourcemanager.hostname", expectedHostName);
     yarnSiteProperties.put("yarn.resourcemanager.resource-tracker.address", expectedHostName + ":" + expectedPortNum);
     yarnSiteProperties.put("yarn.resourcemanager.webapp.address", expectedHostName + ":" + expectedPortNum);
@@ -3419,7 +3466,7 @@
     final String expectedHostGroupName = "host_group_1";
     final String expectedHostGroupNameTwo = "host_group_2";
     final String expectedQuorumJournalURL = "qjournal://" + createHostAddress(expectedHostNameOne, expectedPortNum) + ";" +
-        createHostAddress(expectedHostNameTwo, expectedPortNum) + "/mycluster";
+      createHostAddress(expectedHostNameTwo, expectedPortNum) + "/mycluster";
 
     Map<String, Map<String, String>> configProperties = new HashMap<>();
     Map<String, String> hdfsSiteProperties = new HashMap<>();
@@ -3449,8 +3496,8 @@
 
     // expect that all servers are included in configuration property without changes, and that the qjournal URL format is preserved
     assertEquals("HDFS HA shared edits directory property should not have been modified, since FQDNs were specified.",
-        expectedQuorumJournalURL,
-        hdfsSiteProperties.get("dfs.namenode.shared.edits.dir"));
+      expectedQuorumJournalURL,
+      hdfsSiteProperties.get("dfs.namenode.shared.edits.dir"));
   }
 
   @Test
@@ -3600,7 +3647,7 @@
     String updatedVal = topology.getConfiguration().getFullProperties().get("storm-site").get("nimbus.seeds");
 
     assertEquals("nimbus.seeds property should not be updated when FQDNs are specified in configuration",
-                 expectedValue, updatedVal);
+      expectedValue, updatedVal);
   }
 
 
@@ -4091,8 +4138,6 @@
     }
 
 
-
-
   }
 
   @Test
@@ -4718,9 +4763,9 @@
     updater.doUpdateForClusterCreate();
 
     assertTrue("hive.server2.authentication.kerberos.keytab should have been included in configuration",
-        hiveSiteProperties.containsKey("hive.server2.authentication.kerberos.keytab"));
+      hiveSiteProperties.containsKey("hive.server2.authentication.kerberos.keytab"));
     assertTrue("hive.server2.authentication.kerberos.principal should have been included in configuration",
-        hiveSiteProperties.containsKey("hive.server2.authentication.kerberos.principal"));
+      hiveSiteProperties.containsKey("hive.server2.authentication.kerberos.principal"));
   }
 
   @Test
@@ -4754,22 +4799,22 @@
       };
 
     Stack.ConfigProperty configProperty2 =
-        new Stack.ConfigProperty("hbase-site", "hbase.coprocessor.master.classes", "") {
-          @Override
-          Set<PropertyDependencyInfo> getDependsOnProperties() {
-            PropertyDependencyInfo dependencyInfo = new PropertyDependencyInfo("hbase-site", "hbase.security.authorization");
-            return Collections.singleton(dependencyInfo);
-          }
-        };
+      new Stack.ConfigProperty("hbase-site", "hbase.coprocessor.master.classes", "") {
+        @Override
+        Set<PropertyDependencyInfo> getDependsOnProperties() {
+          PropertyDependencyInfo dependencyInfo = new PropertyDependencyInfo("hbase-site", "hbase.security.authorization");
+          return Collections.singleton(dependencyInfo);
+        }
+      };
 
     Stack.ConfigProperty configProperty3 =
-        new Stack.ConfigProperty("hbase-site", "hbase.coprocessor.region.classes", "") {
-          @Override
-          Set<PropertyDependencyInfo> getDependsOnProperties() {
-            PropertyDependencyInfo dependencyInfo = new PropertyDependencyInfo("hbase-site", "hbase.security.authorization");
-            return Collections.singleton(dependencyInfo);
-          }
-        };
+      new Stack.ConfigProperty("hbase-site", "hbase.coprocessor.region.classes", "") {
+        @Override
+        Set<PropertyDependencyInfo> getDependsOnProperties() {
+          PropertyDependencyInfo dependencyInfo = new PropertyDependencyInfo("hbase-site", "hbase.security.authorization");
+          return Collections.singleton(dependencyInfo);
+        }
+      };
 
     mapOfMetadata.put("hbase.coprocessor.regionserver.classes", configProperty1);
     mapOfMetadata.put("hbase.coprocessor.master.classes", configProperty2);
@@ -4783,7 +4828,7 @@
     Set<String> emptySet = Collections.emptySet();
     expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes();
 
-      // customized stack calls for this test only
+    // customized stack calls for this test only
     expect(stack.getServiceForConfigType("hbase-site")).andReturn("HBASE").atLeastOnce();
     expect(stack.getConfigurationPropertiesWithMetadata("HBASE", "hbase-site")).andReturn(mapOfMetadata).atLeastOnce();
 
@@ -4874,7 +4919,7 @@
     updater.doUpdateForClusterCreate();
 
     assertTrue("hbase.coprocessor.regionserver.classes should have been included in configuration",
-        hbaseSiteProperties.containsKey("hbase.coprocessor.regionserver.classes"));
+      hbaseSiteProperties.containsKey("hbase.coprocessor.regionserver.classes"));
 
   }
 
@@ -4958,31 +5003,31 @@
     updater.doUpdateForClusterCreate();
 
     List<String> hostArray =
-        Arrays.asList(atlasProperties.get("atlas.kafka.bootstrap.servers").split(","));
+      Arrays.asList(atlasProperties.get("atlas.kafka.bootstrap.servers").split(","));
     List<String> expected =
-        Arrays.asList("c6401.ambari.apache.org:6667","c6402.ambari.apache.org:6667", "c6403.ambari.apache.org:6667");
+      Arrays.asList("c6401.ambari.apache.org:6667", "c6402.ambari.apache.org:6667", "c6403.ambari.apache.org:6667");
 
     Assert.assertTrue(hostArray.containsAll(expected) && expected.containsAll(hostArray));
 
     hostArray = Arrays.asList(atlasProperties.get("atlas.kafka.zookeeper.connect").split(","));
     expected =
-        Arrays.asList("c6401.ambari.apache.org:2181","c6402.ambari.apache.org:2181", "c6403.ambari.apache.org:2181");
+      Arrays.asList("c6401.ambari.apache.org:2181", "c6402.ambari.apache.org:2181", "c6403.ambari.apache.org:2181");
     Assert.assertTrue(hostArray.containsAll(expected) && expected.containsAll(hostArray));
 
 
     hostArray = Arrays.asList(atlasProperties.get("atlas.graph.index.search.solr.zookeeper-url").split(","));
     expected =
-        Arrays.asList("c6401.ambari.apache.org:2181/ambari-solr","c6402.ambari.apache.org:2181/ambari-solr", "c6403.ambari.apache.org:2181/ambari-solr");
+      Arrays.asList("c6401.ambari.apache.org:2181/ambari-solr", "c6402.ambari.apache.org:2181/ambari-solr", "c6403.ambari.apache.org:2181/ambari-solr");
     Assert.assertTrue(hostArray.containsAll(expected) && expected.containsAll(hostArray));
 
     hostArray = Arrays.asList(atlasProperties.get("atlas.graph.storage.hostname").split(","));
     expected =
-        Arrays.asList("c6401.ambari.apache.org","c6402.ambari.apache.org", "c6403.ambari.apache.org");
+      Arrays.asList("c6401.ambari.apache.org", "c6402.ambari.apache.org", "c6403.ambari.apache.org");
     Assert.assertTrue(hostArray.containsAll(expected) && expected.containsAll(hostArray));
 
     hostArray = Arrays.asList(atlasProperties.get("atlas.audit.hbase.zookeeper.quorum").split(","));
     expected =
-        Arrays.asList("c6401.ambari.apache.org","c6402.ambari.apache.org", "c6403.ambari.apache.org");
+      Arrays.asList("c6401.ambari.apache.org", "c6402.ambari.apache.org", "c6403.ambari.apache.org");
     Assert.assertTrue(hostArray.containsAll(expected) && expected.containsAll(hostArray));
   }
 
@@ -5240,13 +5285,13 @@
 
 
     assertEquals("fs.defaultFS should not be modified by cluster update when NameNode HA is enabled.",
-                 "hdfs://" + expectedNameService, coreSiteProperties.get("fs.defaultFS"));
+      "hdfs://" + expectedNameService, coreSiteProperties.get("fs.defaultFS"));
 
     assertEquals("hbase.rootdir should not be modified by cluster update when NameNode HA is enabled.",
       "hdfs://" + expectedNameService + "/hbase/test/root/dir", hbaseSiteProperties.get("hbase.rootdir"));
 
     assertEquals("instance.volumes should not be modified by cluster update when NameNode HA is enabled.",
-        "hdfs://" + expectedNameService + "/accumulo/test/instance/volumes", accumuloSiteProperties.get("instance.volumes"));
+      "hdfs://" + expectedNameService + "/accumulo/test/instance/volumes", accumuloSiteProperties.get("instance.volumes"));
 
     // verify that the non-HA properties are filtered out in HA mode
     assertFalse("dfs.namenode.http-address should have been filtered out of this HA configuration",
@@ -5395,17 +5440,17 @@
     // all of these dynamic props will be set to the same host in this case where there is a single host group
     // with multiple hosts.  This may not be correct and a Jira is being filed to track this issue.
     String expectedPropertyValue = hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeOne);
-    if (! expectedPropertyValue.equals(expectedHostName + ":" + expectedPortNum) &&
-        ! expectedPropertyValue.equals(expectedHostNameTwo + ":" + expectedPortNum)) {
+    if (!expectedPropertyValue.equals(expectedHostName + ":" + expectedPortNum) &&
+      !expectedPropertyValue.equals(expectedHostNameTwo + ":" + expectedPortNum)) {
       fail("HTTPS address HA property not properly exported");
     }
     assertEquals("HTTPS address HA property not properly exported", expectedPropertyValue,
-        hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeTwo));
+      hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeTwo));
 
     assertEquals("HTTPS address HA property not properly exported", expectedPropertyValue,
-        hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeOne));
+      hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeOne));
     assertEquals("HTTPS address HA property not properly exported", expectedPropertyValue,
-        hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeTwo));
+      hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeTwo));
 
     assertEquals("HTTPS address HA property not properly exported", expectedPropertyValue,
       hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeOne));
@@ -5617,9 +5662,9 @@
 
     Map<String, Map<String, String>> parentProperties = new HashMap<>();
     Configuration parentClusterConfig = new Configuration(parentProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
 
     TestHostGroup group1 = new TestHostGroup("master_1", ImmutableSet.of("DATANODE", "NAMENODE"), Collections.singleton("node_1"));
     TestHostGroup group2 = new TestHostGroup("master_2", ImmutableSet.of("DATANODE", "NAMENODE"), Collections.singleton("node_2"));
@@ -5651,7 +5696,7 @@
 
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> hgComponents1 = new HashSet<>();
     hgComponents1.add("HIVE_SERVER");
@@ -5683,7 +5728,7 @@
     coreSiteMap.put("fs.defaultFS", "localhost");
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     expect(stack.getCardinality("NAMENODE")).andReturn(new Cardinality("1")).anyTimes();
 
@@ -5716,7 +5761,7 @@
     coreSiteMap.put("fs.defaultFS", "localhost");
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     expect(stack.getCardinality("NAMENODE")).andReturn(new Cardinality("1")).anyTimes();
 
@@ -5766,9 +5811,10 @@
     falconEnvProperties.put("falcon_user", "test-falcon-user");
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> hgComponents1 = new HashSet<>();
+    hgComponents1.add("DATANODE");
     hgComponents1.add("OOZIE_SERVER");
     hgComponents1.add("HIVE_SERVER");
     hgComponents1.add("HBASE_MASTER");
@@ -5812,9 +5858,10 @@
     falconEnvProperties.put("falcon_user", "test-falcon-user");
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> hgComponents1 = new HashSet<>();
+    hgComponents1.add("DATANODE");
     hgComponents1.add("OOZIE_SERVER");
     hgComponents1.add("FALCON_SERVER");
     TestHostGroup group1 = new TestHostGroup("group1", hgComponents1, Collections.singleton("host1"));
@@ -5855,9 +5902,10 @@
     falconEnvProperties.put("falcon_user", "test-falcon-user");
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Collection<String> hgComponents1 = new HashSet<>();
+    hgComponents1.add("DATANODE");
     hgComponents1.add("OOZIE_SERVER");
     hgComponents1.add("FALCON_SERVER");
     TestHostGroup group1 = new TestHostGroup("group1", hgComponents1, Collections.singleton("host1"));
@@ -5894,12 +5942,13 @@
     coreSiteProperties.put("hadoop.proxyuser.test-oozie-user.hosts", "testOozieHostsVal");
 
     Configuration parentClusterConfig = new Configuration(parentProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
 
     Collection<String> hgComponents1 = new HashSet<>();
+    hgComponents1.add("DATANODE");
     hgComponents1.add("OOZIE_SERVER");
     hgComponents1.add("FALCON_SERVER");
     TestHostGroup group1 = new TestHostGroup("group1", hgComponents1, Collections.singleton("host1"));
@@ -5939,9 +5988,9 @@
 
     Map<String, Map<String, String>> parentProperties = new HashMap<>();
     Configuration parentClusterConfig = new Configuration(parentProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
 
 
     Collection<String> hgComponents1 = new HashSet<>();
@@ -5958,7 +6007,7 @@
     assertEquals(null, clusterConfig.getPropertyValue("hive-site", "atlas.cluster.name"));
     assertEquals(null, clusterConfig.getPropertyValue("hive-site", "atlas.rest.address"));
   }
-  
+
   @Test
   public void testAtlasHiveProperties() throws Exception {
     Map<String, Map<String, String>> properties = getAtlasHivePropertiesForTestCase();
@@ -5967,6 +6016,7 @@
 
   /**
    * If the Hive Exec Hooks property doesn't contain the Atlas Hook, then add it.
+   *
    * @throws Exception
    */
   @Test
@@ -5995,6 +6045,7 @@
 
   /**
    * Generate sample collection of properties for some of the test cases.
+   *
    * @return Map of sample properties
    */
   private Map<String, Map<String, String>> getAtlasHivePropertiesForTestCase() {
@@ -6021,15 +6072,16 @@
 
   /**
    * For several test cases, validate that org.apache.atlas.hive.hook.HiveHook has the correct value.
+   *
    * @param properties Map of properties to validate
    * @throws Exception
    */
   private void validateAtlasHivePropertiesForTestCase(Map<String, Map<String, String>> properties) throws Exception {
     Map<String, Map<String, String>> parentProperties = new HashMap<>();
     Configuration parentClusterConfig = new Configuration(parentProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
 
     Collection<String> hgComponents1 = new HashSet<>();
     hgComponents1.add("ATLAS_SERVER");
@@ -6074,9 +6126,9 @@
 
     Map<String, Map<String, String>> parentProperties = new HashMap<>();
     Configuration parentClusterConfig = new Configuration(parentProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
 
 
     Collection<String> hgComponents1 = new HashSet<>();
@@ -6107,9 +6159,9 @@
 
     Map<String, Map<String, String>> parentProperties = new HashMap<>();
     Configuration parentClusterConfig = new Configuration(parentProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
 
     Collection<String> hgComponents1 = new HashSet<>();
     hgComponents1.add("METRICS_COLLECTOR");
@@ -6138,9 +6190,9 @@
 
     Map<String, Map<String, String>> parentProperties = new HashMap<>();
     Configuration parentClusterConfig = new Configuration(parentProperties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
     Configuration clusterConfig = new Configuration(properties,
-        Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
 
     Collection<String> hgComponents1 = new HashSet<>();
     hgComponents1.add("METRICS_COLLECTOR");
@@ -6391,7 +6443,7 @@
     // WHEN
     Set<String> configTypes = configProcessor.doUpdateForClusterCreate();
     // THEN
-    assertEquals(expectedHostName + ":" + expectedPortNum, clusterConfig.getPropertyValue("core-site","fs.default.name"));
+    assertEquals(expectedHostName + ":" + expectedPortNum, clusterConfig.getPropertyValue("core-site", "fs.default.name"));
     assertEquals("stackDefaultUpgraded", clusterConfig.getPropertyValue("core-site", "fs.stackDefault.key1"));
     // verify that fs.stackDefault.key2 is removed
     assertNull(clusterConfig.getPropertyValue("core-site", "fs.stackDefault.key2"));
@@ -6466,7 +6518,6 @@
     rangerAdminProperties.put("policymgr_external_url", "http://%HOSTGROUP::group1%:100");
 
 
-
     Map<String, Map<String, String>> parentProperties = new HashMap<>();
     Configuration parentClusterConfig = new Configuration(parentProperties,
       Collections.<String, Map<String, Map<String, String>>>emptyMap());
@@ -6503,7 +6554,6 @@
     rangerAdminProperties.put("policymgr_external_url", "http://localhost:100");
 
 
-
     Map<String, Map<String, String>> parentProperties = new HashMap<>();
     Configuration parentClusterConfig = new Configuration(parentProperties,
       Collections.<String, Map<String, Map<String, String>>>emptyMap());
@@ -6540,7 +6590,6 @@
     rangerAdminProperties.put("policymgr_external_url", "http://my.ranger.loadbalancer.com");
 
 
-
     Map<String, Map<String, String>> parentProperties = new HashMap<>();
     Configuration parentClusterConfig = new Configuration(parentProperties,
       Collections.<String, Map<String, Map<String, String>>>emptyMap());
@@ -6582,7 +6631,7 @@
     );
     Map<String, Map<String, String>> clusterConfigProperties = new HashMap<>();
 
-    for (String configType: configTypesWithRangerHdfsAuditDir) {
+    for (String configType : configTypesWithRangerHdfsAuditDir) {
       Map<String, String> configProperties = new HashMap<>();
       configProperties.put("xasecure.audit.destination.hdfs.dir", "hdfs://localhost:100");
 
@@ -6590,7 +6639,6 @@
     }
 
 
-
     Map<String, Map<String, String>> parentProperties = new HashMap<>();
     Configuration parentClusterConfig = new Configuration(parentProperties, new HashMap<String, Map<String, Map<String, String>>>());
     Configuration clusterConfig = new Configuration(clusterConfigProperties, new HashMap<String, Map<String, Map<String, String>>>(), parentClusterConfig);
@@ -6648,7 +6696,7 @@
     );
     Map<String, Map<String, String>> clusterConfigProperties = new HashMap<>();
 
-    for (String configType: configTypesWithRangerHdfsAuditDir) {
+    for (String configType : configTypesWithRangerHdfsAuditDir) {
       Map<String, String> configProperties = new HashMap<>();
       configProperties.put("xasecure.audit.destination.hdfs.dir", "hdfs://localhost:100");
 
@@ -6708,7 +6756,7 @@
     );
     Map<String, Map<String, String>> clusterConfigProperties = new HashMap<>();
 
-    for (String configType: configTypesWithRangerHdfsAuditDir) {
+    for (String configType : configTypesWithRangerHdfsAuditDir) {
       Map<String, String> configProperties = new HashMap<>();
       configProperties.put("xasecure.audit.destination.hdfs.dir", "hdfs://%HOSTGROUP::group2%:100");
 
@@ -6774,7 +6822,7 @@
     );
     Map<String, Map<String, String>> clusterConfigProperties = new HashMap<>();
 
-    for (String configType: configTypesWithRangerHdfsAuditDir) {
+    for (String configType : configTypesWithRangerHdfsAuditDir) {
       Map<String, String> configProperties = new HashMap<>();
       configProperties.put("xasecure.audit.destination.hdfs.dir", "hdfs://my_name_service:100");
 
@@ -6848,7 +6896,7 @@
     );
     Map<String, Map<String, String>> clusterConfigProperties = new HashMap<>();
 
-    for (String configType: configTypesWithRangerHdfsAuditDir) {
+    for (String configType : configTypesWithRangerHdfsAuditDir) {
       Map<String, String> configProperties = new HashMap<>();
       configProperties.put("xasecure.audit.destination.hdfs.dir", "hdfs://nn_host:100");
 
@@ -6913,7 +6961,7 @@
     );
     Map<String, Map<String, String>> clusterConfigProperties = new HashMap<>();
 
-    for (String configType: configTypesWithRangerHdfsAuditDir) {
+    for (String configType : configTypesWithRangerHdfsAuditDir) {
       Map<String, String> configProperties = new HashMap<>();
       configProperties.put("xasecure.audit.destination.hdfs.dir", "hdfs://my_name_service:100");
 
@@ -7094,10 +7142,10 @@
     String updatedVal = clusterConfig.getPropertyValue(configType, "dfs.encryption.key.provider.uri");
     Assert.assertTrue(updatedVal.startsWith("kms://http@"));
     Assert.assertTrue(updatedVal.endsWith(":9292/kms"));
-    String hostsString = updatedVal.substring(11,updatedVal.length()-9);
+    String hostsString = updatedVal.substring(11, updatedVal.length() - 9);
 
     List<String> hostArray = Arrays.asList(hostsString.split(";"));
-    List<String> expected = Arrays.asList("host1","host2");
+    List<String> expected = Arrays.asList("host1", "host2");
 
     // Then
     Assert.assertTrue(hostArray.containsAll(expected) && expected.containsAll(hostArray));
@@ -7239,10 +7287,10 @@
     String updatedVal = clusterConfig.getPropertyValue(configType, "dfs.encryption.key.provider.uri");
     Assert.assertTrue(updatedVal.startsWith("kms://http@"));
     Assert.assertTrue(updatedVal.endsWith(":9292/kms"));
-    String hostsString = updatedVal.substring(11,updatedVal.length()-9);
+    String hostsString = updatedVal.substring(11, updatedVal.length() - 9);
 
     List<String> hostArray = Arrays.asList(hostsString.split(";"));
-    List<String> expected = Arrays.asList("host1","host2");
+    List<String> expected = Arrays.asList("host1", "host2");
 
     // Then
     Assert.assertTrue(hostArray.containsAll(expected) && expected.containsAll(hostArray));
@@ -7293,10 +7341,10 @@
     String updatedVal = clusterConfig.getPropertyValue(configType, "dfs.encryption.key.provider.uri");
     Assert.assertTrue(updatedVal.startsWith("kms://http@"));
     Assert.assertTrue(updatedVal.endsWith(":9292/kms"));
-    String hostsString = updatedVal.substring(11,updatedVal.length()-9);
+    String hostsString = updatedVal.substring(11, updatedVal.length() - 9);
 
     List<String> hostArray = Arrays.asList(hostsString.split(";"));
-    List<String> expected = Arrays.asList("host1","host2");
+    List<String> expected = Arrays.asList("host1", "host2");
 
     // Then
     Assert.assertTrue(hostArray.containsAll(expected) && expected.containsAll(hostArray));
@@ -7320,9 +7368,9 @@
 
     Map<String, Map<String, String>> parentProperties = new HashMap<>();
     Configuration parentClusterConfig = new Configuration(parentProperties,
-                                                          Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
     Configuration clusterConfig = new Configuration(properties,
-                                                    Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
 
 
     Collection<String> stormComponents = new HashSet<>();
@@ -7432,7 +7480,6 @@
     BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
 
-
     // When
     configProcessor.doUpdateForClusterCreate();
 
@@ -7487,7 +7534,7 @@
   @Test
   public void testYamlMultiValueWithSingleQuoteFlowStyleFormatSingleValue() throws Exception {
     // Given
-    BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator  yamlMultiValuePropertyDecorator = new BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator(null);
+    BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator yamlMultiValuePropertyDecorator = new BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator(null);
     String originalValue = "test_value";
 
 
@@ -7495,14 +7542,14 @@
     String newValue = yamlMultiValuePropertyDecorator.doFormat(originalValue);
 
     // Then
-    String expectedValue =  "['test_value']";
+    String expectedValue = "['test_value']";
     assertEquals(expectedValue, newValue);
   }
 
   @Test
   public void testYamlMultiValueWithPlainFlowStyleFormatSingleValue() throws Exception {
     // Given
-    BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator  yamlMultiValuePropertyDecorator = new BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator(null, BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator.FlowStyle.PLAIN);
+    BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator yamlMultiValuePropertyDecorator = new BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator(null, BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator.FlowStyle.PLAIN);
     String originalValue = "test_value";
 
 
@@ -7510,14 +7557,14 @@
     String newValue = yamlMultiValuePropertyDecorator.doFormat(originalValue);
 
     // Then
-    String expectedValue =  "[test_value]";
+    String expectedValue = "[test_value]";
     assertEquals(expectedValue, newValue);
   }
 
   @Test
   public void testYamlMultiValueWithSingleQuoteFlowStyleFormatMultiValue() throws Exception {
     // Given
-    BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator  yamlMultiValuePropertyDecorator = new BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator(null);
+    BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator yamlMultiValuePropertyDecorator = new BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator(null);
     String originalValue = "test_value1,test_value2";
 
 
@@ -7525,14 +7572,14 @@
     String newValue = yamlMultiValuePropertyDecorator.doFormat(originalValue);
 
     // Then
-    String expectedValue =  "['test_value1','test_value2']";
+    String expectedValue = "['test_value1','test_value2']";
     assertEquals(expectedValue, newValue);
   }
 
   @Test
   public void testYamlMultiValueWithPlainFlowStyleFormatMultiValue() throws Exception {
     // Given
-    BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator  yamlMultiValuePropertyDecorator = new BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator(null, BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator.FlowStyle.PLAIN);
+    BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator yamlMultiValuePropertyDecorator = new BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator(null, BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator.FlowStyle.PLAIN);
     String originalValue = "test_value1,test_value2";
 
 
@@ -7540,14 +7587,14 @@
     String newValue = yamlMultiValuePropertyDecorator.doFormat(originalValue);
 
     // Then
-    String expectedValue =  "[test_value1,test_value2]";
+    String expectedValue = "[test_value1,test_value2]";
     assertEquals(expectedValue, newValue);
   }
 
   @Test
   public void testYamlMultiValueWithSingleQuoteFlowStyleFormatSingleValueInSquareBrackets() throws Exception {
     // Given
-    BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator  yamlMultiValuePropertyDecorator = new BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator(null);
+    BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator yamlMultiValuePropertyDecorator = new BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator(null);
     String originalValue = "['test_value']";
 
 
@@ -7555,7 +7602,7 @@
     String newValue = yamlMultiValuePropertyDecorator.doFormat(originalValue);
 
     // Then
-    String expectedValue =  "['test_value']";
+    String expectedValue = "['test_value']";
     assertEquals(expectedValue, newValue);
   }
 
@@ -7563,7 +7610,7 @@
   @Test
   public void testYamlMultiValueFormatWithPlainFlowStyleSingleValueInSquareBrackets() throws Exception {
     // Given
-    BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator  yamlMultiValuePropertyDecorator = new BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator(null, BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator.FlowStyle.PLAIN);
+    BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator yamlMultiValuePropertyDecorator = new BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator(null, BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator.FlowStyle.PLAIN);
     String originalValue = "[test_value]";
 
 
@@ -7571,7 +7618,7 @@
     String newValue = yamlMultiValuePropertyDecorator.doFormat(originalValue);
 
     // Then
-    String expectedValue =  "[test_value]";
+    String expectedValue = "[test_value]";
     assertEquals(expectedValue, newValue);
   }
 
@@ -7579,7 +7626,7 @@
   @Test
   public void testYamlMultiValueWithSingleQuoteFlowStyleFormatMultiValueInSquareBrackets() throws Exception {
     // Given
-    BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator  yamlMultiValuePropertyDecorator = new BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator(null);
+    BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator yamlMultiValuePropertyDecorator = new BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator(null);
     String originalValue = "['test_value1','test_value2']";
 
 
@@ -7587,14 +7634,14 @@
     String newValue = yamlMultiValuePropertyDecorator.doFormat(originalValue);
 
     // Then
-    String expectedValue =  "['test_value1','test_value2']";
+    String expectedValue = "['test_value1','test_value2']";
     assertEquals(expectedValue, newValue);
   }
 
   @Test
   public void testYamlMultiValueWithPlainFlowStyleFormatMultiValueInSquareBrackets() throws Exception {
     // Given
-    BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator  yamlMultiValuePropertyDecorator = new BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator(null, BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator.FlowStyle.PLAIN);
+    BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator yamlMultiValuePropertyDecorator = new BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator(null, BlueprintConfigurationProcessor.YamlMultiValuePropertyDecorator.FlowStyle.PLAIN);
     String originalValue = "[test_value1,test_value2]";
 
 
@@ -7602,7 +7649,7 @@
     String newValue = yamlMultiValuePropertyDecorator.doFormat(originalValue);
 
     // Then
-    String expectedValue =  "[test_value1,test_value2]";
+    String expectedValue = "[test_value1,test_value2]";
     assertEquals(expectedValue, newValue);
   }
 
@@ -7626,7 +7673,6 @@
   }
 
 
-
   @Test
   public void testMultipleHostTopologyUpdaterWithYamlPropertyMultiHostValue() throws Exception {
     // Given
@@ -7769,7 +7815,7 @@
 
     assertEquals(expectedHostNameHawqMaster, hawqSite.get("hawq_master_address_host"));
     assertFalse("hawq_standby_address_host should have been filtered out of this non-HAWQ HA configuration",
-            hawqSite.containsKey("hawq_standby_address_host"));
+      hawqSite.containsKey("hawq_standby_address_host"));
   }
 
   @Test
@@ -7938,37 +7984,37 @@
   }
 
   @Test
-  public void testStackPasswordPropertyFilter() throws Exception{
-	Map<String, Map<String, String>> properties = new HashMap<>();
-	Map<String, String> rangerAdminSiteProps = new HashMap<>();
-	rangerAdminSiteProps.put("ranger.service.https.attrib.keystore.pass", "SECRET:admin-prp:1:ranger.service.pass");
-	properties.put("ranger-admin-site", rangerAdminSiteProps);
-	Map<String, Map<String, String>> parentProperties = new HashMap<>();
-	Configuration parentClusterConfig = new Configuration(parentProperties,
-	Collections.<String, Map<String, Map<String, String>>>emptyMap());
+  public void testStackPasswordPropertyFilter() throws Exception {
+    Map<String, Map<String, String>> properties = new HashMap<>();
+    Map<String, String> rangerAdminSiteProps = new HashMap<>();
+    rangerAdminSiteProps.put("ranger.service.https.attrib.keystore.pass", "SECRET:admin-prp:1:ranger.service.pass");
+    properties.put("ranger-admin-site", rangerAdminSiteProps);
+    Map<String, Map<String, String>> parentProperties = new HashMap<>();
+    Configuration parentClusterConfig = new Configuration(parentProperties,
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
-	Configuration clusterConfig = new Configuration(properties,
-	Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
-	Collection<String> hgComponents = new HashSet<>();
-	hgComponents.add("NAMENODE");
-        hgComponents.add("SECONDARY_NAMENODE");
-	hgComponents.add("RESOURCEMANAGER");
-	TestHostGroup group1 = new TestHostGroup("group1", hgComponents, Collections.singleton("testhost"));
+    Configuration clusterConfig = new Configuration(properties,
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
+    Collection<String> hgComponents = new HashSet<>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    TestHostGroup group1 = new TestHostGroup("group1", hgComponents, Collections.singleton("testhost"));
 
-	Collection<String> hgComponents2 = new HashSet<>();
-	hgComponents2.add("DATANODE");
-	hgComponents2.add("HDFS_CLIENT");
-	TestHostGroup group2 = new TestHostGroup("group2", hgComponents2, Collections.singleton("testhost2"));
-	Collection<TestHostGroup> hostGroups = new HashSet<>();
-	hostGroups.add(group1);
-	hostGroups.add(group2);
+    Collection<String> hgComponents2 = new HashSet<>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    TestHostGroup group2 = new TestHostGroup("group2", hgComponents2, Collections.singleton("testhost2"));
+    Collection<TestHostGroup> hostGroups = new HashSet<>();
+    hostGroups.add(group1);
+    hostGroups.add(group2);
 
-	expect(stack.isPasswordProperty((String) anyObject(),(String) anyObject(),(String) anyObject())).andReturn(true).once();
-	ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-	BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
-	configProcessor.doUpdateForBlueprintExport();
+    expect(stack.isPasswordProperty((String) anyObject(), (String) anyObject(), (String) anyObject())).andReturn(true).once();
+    ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+    configProcessor.doUpdateForBlueprintExport();
 
-	assertFalse(properties.get("ranger-admin-site").containsKey("ranger.service.https.attrib.keystore.pass"));
+    assertFalse(properties.get("ranger-admin-site").containsKey("ranger.service.https.attrib.keystore.pass"));
   }
 
   private Map<String, AdvisedConfiguration> createAdvisedConfigMap() {
@@ -8019,19 +8065,19 @@
     valueAttributesInfoHost.setType("host");
 
     propertyConfigs.put("test.directories", new Stack.ConfigProperty(
-            new StackConfigurationResponse(null,null,null,null,"hdfs-site",null,null,null,valueAttributesInfoDirs,null)));
+      new StackConfigurationResponse(null, null, null, null, "hdfs-site", null, null, null, valueAttributesInfoDirs, null)));
     propertyConfigs.put("test.password", new Stack.ConfigProperty(
-            new StackConfigurationResponse(null,null,null,null,"hdfs-site",null,Collections.singleton(PropertyInfo.PropertyType.PASSWORD),null,null,null)));
+      new StackConfigurationResponse(null, null, null, null, "hdfs-site", null, Collections.singleton(PropertyInfo.PropertyType.PASSWORD), null, null, null)));
     propertyConfigs.put("test.host", new Stack.ConfigProperty(
-            new StackConfigurationResponse(null,null,null,null,"hdfs-site",null,null,null,valueAttributesInfoHost,null)));
+      new StackConfigurationResponse(null, null, null, null, "hdfs-site", null, null, null, valueAttributesInfoHost, null)));
     expect(stack.getServiceForConfigType("hdfs-site")).andReturn("HDFS").anyTimes();
     expect(stack.getConfigurationPropertiesWithMetadata("HDFS", "hdfs-site")).andReturn(propertyConfigs).anyTimes();
 
     Map<String, Map<String, String>> parentProperties = new HashMap<>();
     Configuration parentClusterConfig = new Configuration(parentProperties,
-            Collections.<String, Map<String, Map<String, String>>>emptyMap());
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
     Configuration clusterConfig = new Configuration(properties,
-            Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
+      Collections.<String, Map<String, Map<String, String>>>emptyMap(), parentClusterConfig);
 
     Collection<String> hgComponents1 = new HashSet<>();
     TestHostGroup group1 = new TestHostGroup("group1", hgComponents1, Collections.singleton("host1"));
@@ -8044,15 +8090,15 @@
     configProcessor.doUpdateForClusterCreate();
 
     assertEquals(" spaces at    the end should be deleted",
-            clusterConfig.getPropertyValue("hdfs-site", "test.spaces"));
+      clusterConfig.getPropertyValue("hdfs-site", "test.spaces"));
     assertEquals("/all/spaces,should/be,deleted",
-            clusterConfig.getPropertyValue("hdfs-site", "test.directories"));
-    assertEquals( "  stays,   same    ",
-            clusterConfig.getPropertyValue("hdfs-site", "test.password"));
+      clusterConfig.getPropertyValue("hdfs-site", "test.directories"));
+    assertEquals("  stays,   same    ",
+      clusterConfig.getPropertyValue("hdfs-site", "test.password"));
     assertEquals(" https://just.trims ".trim(),
-            clusterConfig.getPropertyValue("hdfs-site", "test.host"));
+      clusterConfig.getPropertyValue("hdfs-site", "test.host"));
     assertEquals(" ",
-            clusterConfig.getPropertyValue("hdfs-site", "test.single.space"));
+      clusterConfig.getPropertyValue("hdfs-site", "test.single.space"));
   }
 
   private static String createExportedAddress(String expectedPortNum, String expectedHostGroupName) {
@@ -8092,10 +8138,10 @@
 
   private ClusterTopology createClusterTopology(Blueprint blueprint, Configuration configuration,
                                                 Collection<TestHostGroup> hostGroups)
-      throws InvalidTopologyException {
+    throws InvalidTopologyException {
 
 
-    replay(stack, serviceInfo, ambariContext);
+    replay(stack, serviceInfo, ambariContext, controller, kerberosHelper, kerberosDescriptor, clusters, cluster);
 
     Map<String, HostGroupInfo> hostGroupInfo = new HashMap<>();
     Collection<String> allServices = new HashSet<>();
@@ -8114,7 +8160,7 @@
 
       //create host group which is set on topology
       allHostGroups.put(hostGroup.name, new HostGroupImpl(hostGroup.name, "test-bp", stack,
-          componentList, EMPTY_CONFIG, "1"));
+        componentList, EMPTY_CONFIG, "1"));
 
       hostGroupInfo.put(hostGroup.name, groupInfo);
 
@@ -8135,10 +8181,14 @@
 
     expect(bp.getHostGroups()).andReturn(allHostGroups).anyTimes();
 
-    replay(bp);
+    expect(topologyRequestMock.getClusterId()).andReturn(1L).anyTimes();
+    expect(topologyRequestMock.getBlueprint()).andReturn(blueprint).anyTimes();
+    expect(topologyRequestMock.getConfiguration()).andReturn(configuration).anyTimes();
+    expect(topologyRequestMock.getHostGroupInfo()).andReturn(hostGroupInfo).anyTimes();
 
-    ClusterTopology topology = new ClusterTopologyImpl
-      (ambariContext, 1L, blueprint, configuration, hostGroupInfo);
+    replay(bp, topologyRequestMock);
+
+    ClusterTopology topology = new ClusterTopologyImpl(ambariContext, topologyRequestMock);
     topology.setConfigRecommendationStrategy(ConfigRecommendationStrategy.NEVER_APPLY);
 
     return topology;
@@ -8155,7 +8205,7 @@
       this.components = components;
       this.hosts = hosts;
       this.configuration = new Configuration(Collections.<String, Map<String, String>>emptyMap(),
-          Collections.<String, Map<String, Map<String, String>>>emptyMap());
+        Collections.<String, Map<String, Map<String, String>>>emptyMap());
     }
 
     public TestHostGroup(String name, Collection<String> components, Collection<String> hosts, Configuration configuration) {
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/CalculatedStatusTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/CalculatedStatusTest.java
index c399a4c..a0ec67f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/CalculatedStatusTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/CalculatedStatusTest.java
@@ -743,7 +743,7 @@
     private final List<HostRoleCommand> hostRoleCommands = new LinkedList<>();
 
     private TestStage() {
-      super(1L, "", "", 1L, "", "", "", "", hostRoleCommandFactory, ecwFactory);
+      super(1L, "", "", 1L, "", "", "", hostRoleCommandFactory, ecwFactory);
     }
 
     void setHostRoleCommands(Collection<HostRoleCommandEntity> tasks) {
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
index f890e17..eaf54c3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
@@ -19,111 +19,112 @@
 package org.apache.ambari.server.controller.internal;
 
  import static org.easymock.EasyMock.anyLong;
- import static org.easymock.EasyMock.anyObject;
- import static org.easymock.EasyMock.capture;
- import static org.easymock.EasyMock.createMock;
- import static org.easymock.EasyMock.createNiceMock;
- import static org.easymock.EasyMock.eq;
- import static org.easymock.EasyMock.expect;
- import static org.easymock.EasyMock.expectLastCall;
- import static org.easymock.EasyMock.replay;
- import static org.easymock.EasyMock.verify;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.createMock;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.eq;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.verify;
 
- import java.io.File;
- import java.io.FileInputStream;
- import java.lang.reflect.Field;
- import java.sql.SQLException;
- import java.util.ArrayList;
- import java.util.Arrays;
- import java.util.Collections;
- import java.util.HashMap;
- import java.util.LinkedHashMap;
- import java.util.LinkedHashSet;
- import java.util.List;
- import java.util.Map;
- import java.util.Properties;
- import java.util.Set;
+import java.io.File;
+import java.io.FileInputStream;
+import java.lang.reflect.Field;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
 
- import org.apache.ambari.annotations.Experimental;
- import org.apache.ambari.annotations.ExperimentalFeature;
- import org.apache.ambari.server.AmbariException;
- import org.apache.ambari.server.H2DatabaseCleaner;
- import org.apache.ambari.server.Role;
- import org.apache.ambari.server.actionmanager.ActionManager;
- import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
- import org.apache.ambari.server.actionmanager.HostRoleCommand;
- import org.apache.ambari.server.actionmanager.Stage;
- import org.apache.ambari.server.actionmanager.StageFactory;
- import org.apache.ambari.server.agent.CommandReport;
- import org.apache.ambari.server.agent.ExecutionCommand;
- import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
- import org.apache.ambari.server.api.services.AmbariMetaInfo;
- import org.apache.ambari.server.configuration.Configuration;
- import org.apache.ambari.server.controller.AmbariManagementController;
- import org.apache.ambari.server.controller.ExecuteActionRequest;
- import org.apache.ambari.server.controller.RequestStatusResponse;
- import org.apache.ambari.server.controller.ResourceProviderFactory;
- import org.apache.ambari.server.controller.spi.Request;
- import org.apache.ambari.server.controller.spi.RequestStatus;
- import org.apache.ambari.server.controller.spi.Resource;
- import org.apache.ambari.server.controller.spi.ResourceProvider;
- import org.apache.ambari.server.controller.utilities.PropertyHelper;
- import org.apache.ambari.server.orm.GuiceJpaInitializer;
- import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
- import org.apache.ambari.server.orm.dao.ClusterVersionDAO;
- import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
- import org.apache.ambari.server.orm.dao.HostVersionDAO;
- import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
- import org.apache.ambari.server.orm.dao.ResourceTypeDAO;
- import org.apache.ambari.server.orm.dao.StackDAO;
- import org.apache.ambari.server.orm.entities.ClusterEntity;
- import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
- import org.apache.ambari.server.orm.entities.HostVersionEntity;
- import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
- import org.apache.ambari.server.orm.entities.ResourceEntity;
- import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
- import org.apache.ambari.server.orm.entities.StackEntity;
- import org.apache.ambari.server.orm.entities.UpgradeEntity;
- import org.apache.ambari.server.security.TestAuthenticationFactory;
- import org.apache.ambari.server.security.authorization.AuthorizationException;
- import org.apache.ambari.server.security.authorization.ResourceType;
- import org.apache.ambari.server.serveraction.upgrades.FinalizeUpgradeAction;
- import org.apache.ambari.server.state.Cluster;
- import org.apache.ambari.server.state.Clusters;
- import org.apache.ambari.server.state.ConfigHelper;
- import org.apache.ambari.server.state.Host;
- import org.apache.ambari.server.state.MaintenanceState;
- import org.apache.ambari.server.state.RepositoryType;
- import org.apache.ambari.server.state.RepositoryVersionState;
- import org.apache.ambari.server.state.Service;
- import org.apache.ambari.server.state.ServiceComponent;
- import org.apache.ambari.server.state.ServiceComponentHost;
- import org.apache.ambari.server.state.ServiceInfo;
- import org.apache.ambari.server.state.ServiceOsSpecific;
- import org.apache.ambari.server.state.StackId;
- import org.apache.ambari.server.state.cluster.ClusterImpl;
- import org.apache.ambari.server.state.stack.upgrade.Direction;
- import org.apache.ambari.server.topology.TopologyManager;
- import org.apache.ambari.server.utils.StageUtils;
- import org.apache.commons.io.IOUtils;
- import org.easymock.Capture;
- import org.easymock.EasyMock;
- import org.easymock.IAnswer;
- import org.junit.After;
- import org.junit.Assert;
- import org.junit.Before;
- import org.junit.Ignore;
- import org.junit.Test;
- import org.springframework.security.core.Authentication;
- import org.springframework.security.core.context.SecurityContextHolder;
+import org.apache.ambari.annotations.Experimental;
+import org.apache.ambari.annotations.ExperimentalFeature;
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.H2DatabaseCleaner;
+import org.apache.ambari.server.Role;
+import org.apache.ambari.server.actionmanager.ActionManager;
+import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
+import org.apache.ambari.server.actionmanager.HostRoleCommand;
+import org.apache.ambari.server.actionmanager.Stage;
+import org.apache.ambari.server.actionmanager.StageFactory;
+import org.apache.ambari.server.agent.CommandReport;
+import org.apache.ambari.server.agent.ExecutionCommand;
+import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.ExecuteActionRequest;
+import org.apache.ambari.server.controller.RequestStatusResponse;
+import org.apache.ambari.server.controller.ResourceProviderFactory;
+import org.apache.ambari.server.controller.spi.Request;
+import org.apache.ambari.server.controller.spi.RequestStatus;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.spi.ResourceProvider;
+import org.apache.ambari.server.controller.utilities.PropertyHelper;
+import org.apache.ambari.server.orm.GuiceJpaInitializer;
+import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.dao.ClusterVersionDAO;
+import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
+import org.apache.ambari.server.orm.dao.HostVersionDAO;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.dao.ResourceTypeDAO;
+import org.apache.ambari.server.orm.dao.StackDAO;
+import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
+import org.apache.ambari.server.orm.entities.HostVersionEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.orm.entities.ResourceEntity;
+import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
+import org.apache.ambari.server.orm.entities.StackEntity;
+import org.apache.ambari.server.orm.entities.UpgradeEntity;
+import org.apache.ambari.server.security.TestAuthenticationFactory;
+import org.apache.ambari.server.security.authorization.AuthorizationException;
+import org.apache.ambari.server.security.authorization.ResourceType;
+import org.apache.ambari.server.serveraction.upgrades.FinalizeUpgradeAction;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.MaintenanceState;
+import org.apache.ambari.server.state.RepositoryType;
+import org.apache.ambari.server.state.RepositoryVersionState;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.ServiceOsSpecific;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.cluster.ClusterImpl;
+import org.apache.ambari.server.state.repository.VersionDefinitionXml;
+import org.apache.ambari.server.state.stack.upgrade.Direction;
+import org.apache.ambari.server.topology.TopologyManager;
+import org.apache.ambari.server.utils.StageUtils;
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang.StringUtils;
+import org.easymock.Capture;
+import org.easymock.EasyMock;
+import org.easymock.IAnswer;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.springframework.security.core.Authentication;
+import org.springframework.security.core.context.SecurityContextHolder;
 
- import com.google.gson.JsonArray;
- import com.google.gson.JsonObject;
- import com.google.gson.JsonParser;
- import com.google.inject.AbstractModule;
- import com.google.inject.Guice;
- import com.google.inject.Injector;
- import com.google.inject.util.Modules;
+import com.google.gson.JsonArray;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParser;
+import com.google.inject.AbstractModule;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.util.Modules;
 
 
  /**
@@ -222,6 +223,9 @@
     repoVersion.setId(1l);
     repoVersion.setOperatingSystems(OS_JSON);
 
+    final String hostWithoutVersionableComponents = "host2";
+
+    List<Host> hostsNeedingInstallCommands = new ArrayList<>();
     Map<String, Host> hostsForCluster = new HashMap<>();
     int hostCount = 10;
     for (int i = 0; i < hostCount; i++) {
@@ -236,6 +240,10 @@
 
       replay(host);
       hostsForCluster.put(hostname, host);
+
+      if (!StringUtils.equals(hostWithoutVersionableComponents, hostname)) {
+        hostsNeedingInstallCommands.add(host);
+      }
     }
 
     final ServiceComponentHost schDatanode = createMock(ServiceComponentHost.class);
@@ -300,7 +308,7 @@
       @Override
       public List<ServiceComponentHost> answer() throws Throwable {
         String hostname = (String) EasyMock.getCurrentArguments()[0];
-        if (hostname.equals("host2")) {
+            if (hostname.equals(hostWithoutVersionableComponents)) {
           return schsH2;
         } else {
           return schsH1;
@@ -308,6 +316,10 @@
       }
     }).anyTimes();
 
+    expect(cluster.transitionHostsToInstalling(anyObject(ClusterVersionEntity.class),
+        anyObject(RepositoryVersionEntity.class), anyObject(VersionDefinitionXml.class),
+        eq(false))).andReturn(hostsNeedingInstallCommands).atLeastOnce();
+
     ExecutionCommand executionCommand = createNiceMock(ExecutionCommand.class);
     ExecutionCommandWrapper executionCommandWrapper = createNiceMock(ExecutionCommandWrapper.class);
 
@@ -325,7 +337,7 @@
     // Check that we create proper stage count
     expect(stageFactory.createNew(anyLong(), anyObject(String.class),
             anyObject(String.class), anyLong(),
-            anyObject(String.class), anyObject(String.class), anyObject(String.class),
+            anyObject(String.class), anyObject(String.class),
             anyObject(String.class))).andReturn(stage).
             times((int) Math.ceil(hostCount / MAX_TASKS_PER_STAGE));
 
@@ -392,7 +404,6 @@
   }
 
   @Experimental(feature=ExperimentalFeature.PATCH_UPGRADES)
-  @Ignore
   public void testCreateResourcesForPatch() throws Exception {
     Resource.Type type = Resource.Type.ClusterStackVersion;
 
@@ -544,7 +555,7 @@
     // Check that we create proper stage count
     expect(stageFactory.createNew(anyLong(), anyObject(String.class),
             anyObject(String.class), anyLong(),
-            anyObject(String.class), anyObject(String.class), anyObject(String.class),
+            anyObject(String.class), anyObject(String.class),
             anyObject(String.class))).andReturn(stage).
             times((int) Math.ceil(hostCount / MAX_TASKS_PER_STAGE));
 
@@ -645,7 +656,8 @@
 
     ambariMetaInfo.getComponent("HDP", "2.1.1", "HBASE", "HBASE_MASTER").setVersionAdvertised(true);
 
-
+    final String hostWithoutVersionableComponents = "host3";
+    List<Host> hostsNeedingInstallCommands = new ArrayList<>();
     Map<String, Host> hostsForCluster = new HashMap<>();
     int hostCount = 10;
     for (int i = 0; i < hostCount; i++) {
@@ -660,6 +672,11 @@
 
       replay(host);
       hostsForCluster.put(hostname, host);
+
+
+      if (!StringUtils.equals(hostWithoutVersionableComponents, hostname)) {
+        hostsNeedingInstallCommands.add(host);
+      }
     }
 
     Service hdfsService = createNiceMock(Service.class);
@@ -736,7 +753,7 @@
       @Override
       public List<ServiceComponentHost> answer() throws Throwable {
         String hostname = (String) EasyMock.getCurrentArguments()[0];
-        if (hostname.equals("host2")) {
+            if (hostname.equals("host2")) {
           return schsH2;
         } else if (hostname.equals("host3")) {
           return schsH3;
@@ -746,6 +763,10 @@
       }
     }).anyTimes();
 
+    expect(cluster.transitionHostsToInstalling(anyObject(ClusterVersionEntity.class),
+        anyObject(RepositoryVersionEntity.class), anyObject(VersionDefinitionXml.class),
+        eq(false))).andReturn(hostsNeedingInstallCommands).atLeastOnce();
+
 //    ExecutionCommand executionCommand = createNiceMock(ExecutionCommand.class);
     ExecutionCommand executionCommand = new ExecutionCommand();
     ExecutionCommandWrapper executionCommandWrapper = createNiceMock(ExecutionCommandWrapper.class);
@@ -763,7 +784,7 @@
     // Check that we create proper stage count
     expect(stageFactory.createNew(anyLong(), anyObject(String.class),
             anyObject(String.class), anyLong(),
-            anyObject(String.class), anyObject(String.class), anyObject(String.class),
+            anyObject(String.class), anyObject(String.class),
             anyObject(String.class))).andReturn(stage).
             times((int) Math.ceil(hostCount / MAX_TASKS_PER_STAGE));
 
@@ -878,6 +899,9 @@
 
     ambariMetaInfo.getComponent("HDP", "2.1.1", "HBASE", "HBASE_MASTER").setVersionAdvertised(true);
 
+    final String hostWithoutVersionableComponents = "host3";
+    List<Host> hostsNeedingInstallCommands = new ArrayList<>();
+
     Map<String, Host> hostsForCluster = new HashMap<>();
     int hostCount = 10;
     for (int i = 0; i < hostCount; i++) {
@@ -892,6 +916,10 @@
 
       replay(host);
       hostsForCluster.put(hostname, host);
+
+      if (!StringUtils.equals(hostWithoutVersionableComponents, hostname)) {
+        hostsNeedingInstallCommands.add(host);
+      }
     }
 
     Service hdfsService = createNiceMock(Service.class);
@@ -978,6 +1006,10 @@
       }
     }).anyTimes();
 
+    expect(cluster.transitionHostsToInstalling(anyObject(ClusterVersionEntity.class),
+        anyObject(RepositoryVersionEntity.class), anyObject(VersionDefinitionXml.class),
+        eq(false))).andReturn(hostsNeedingInstallCommands).atLeastOnce();
+
 //    ExecutionCommand executionCommand = createNiceMock(ExecutionCommand.class);
     ExecutionCommand executionCommand = new ExecutionCommand();
     ExecutionCommandWrapper executionCommandWrapper = createNiceMock(ExecutionCommandWrapper.class);
@@ -995,14 +1027,12 @@
     // Check that we create proper stage count
     expect(stageFactory.createNew(anyLong(), anyObject(String.class),
             anyObject(String.class), anyLong(),
-            anyObject(String.class), anyObject(String.class), anyObject(String.class),
+            anyObject(String.class), anyObject(String.class),
             anyObject(String.class))).andReturn(stage).
             times((int) Math.ceil(hostCount / MAX_TASKS_PER_STAGE));
 
-    expect(
-            repositoryVersionDAOMock.findByStackAndVersion(
-                    anyObject(StackId.class),
-                    anyObject(String.class))).andReturn(repoVersion);
+    expect(repositoryVersionDAOMock.findByStackAndVersion(anyObject(StackId.class),
+        anyObject(String.class))).andReturn(repoVersion);
 
     Capture<org.apache.ambari.server.actionmanager.Request> c = Capture.newInstance();
     Capture<ExecuteActionRequest> ear = Capture.newInstance();
@@ -1527,7 +1557,7 @@
     // Check that we create proper stage count
     expect(stageFactory.createNew(anyLong(), anyObject(String.class),
         anyObject(String.class), anyLong(),
-        anyObject(String.class), anyObject(String.class), anyObject(String.class),
+        anyObject(String.class), anyObject(String.class),
         anyObject(String.class))).andReturn(stage).
         times((int) Math.ceil(hostCount / MAX_TASKS_PER_STAGE));
 
@@ -1637,7 +1667,9 @@
     repoVersionEntity.setVersionXml(IOUtils.toString(new FileInputStream(f)));
     repoVersionEntity.setVersionXsd("version_definition.xsd");
     repoVersionEntity.setType(RepositoryType.STANDARD);
+    repoVersionEntity.setVersion(repoVersion);
 
+    List<Host> hostsNeedingInstallCommands = new ArrayList<>();
     Map<String, Host> hostsForCluster = new HashMap<>();
     List<HostVersionEntity> hostVersionEntitiesMergedWithNotRequired = new ArrayList<>();
     int hostCount = 10;
@@ -1653,6 +1685,7 @@
       // transition correct into the not required state
       if (i < hostCount - 2) {
         expect(host.hasComponentsAdvertisingVersions(eq(stackId))).andReturn(true).atLeastOnce();
+        hostsNeedingInstallCommands.add(host);
       } else {
         expect(host.hasComponentsAdvertisingVersions(eq(stackId))).andReturn(false).atLeastOnce();
 
@@ -1663,7 +1696,8 @@
         replay(hostVersionEntity);
 
         hostVersionEntitiesMergedWithNotRequired.add(hostVersionEntity);
-        expect(host.getAllHostVersions()).andReturn(hostVersionEntitiesMergedWithNotRequired).anyTimes();
+        expect(host.getAllHostVersions()).andReturn(
+            hostVersionEntitiesMergedWithNotRequired).anyTimes();
       }
 
       replay(host);
@@ -1736,15 +1770,15 @@
     // then return the real one it's going to use
     expect(clusterVersionDAO.findByClusterAndStackAndVersion(anyObject(String.class),
         anyObject(StackId.class), anyObject(String.class))).andReturn(null).once();
-    expect(clusterVersionDAO.findByClusterAndStackAndVersion(anyObject(String.class),
-        anyObject(StackId.class), anyObject(String.class))).andReturn(cve).once();
+
+    expect(cluster.createClusterVersion(anyObject(StackId.class), eq(repoVersion),
+        EasyMock.anyString(), eq(RepositoryVersionState.INSTALLED))).andReturn(cve).once();
 
     // now the important expectations - that the cluster transition methods were
     // called correctly
-    cluster.transitionHosts(cve, RepositoryVersionState.INSTALLED);
-    for (HostVersionEntity hostVersionEntity : hostVersionEntitiesMergedWithNotRequired) {
-      expect(hostVersionDAO.merge(hostVersionEntity)).andReturn(hostVersionEntity).once();
-    }
+    expect(cluster.transitionHostsToInstalling(cve, repoVersionEntity,
+        repoVersionEntity.getRepositoryXml(), true)).andReturn(
+            hostsNeedingInstallCommands).once();
 
     // replay
     replay(managementController, response, clusters, hdfsService, resourceProviderFactory,
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ComponentResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ComponentResourceProviderTest.java
index 7c34521..d1705d8 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ComponentResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ComponentResourceProviderTest.java
@@ -68,6 +68,7 @@
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ComponentInfo;
 import org.apache.ambari.server.state.MaintenanceState;
+import org.apache.ambari.server.state.RepositoryVersionState;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentFactory;
@@ -245,13 +246,13 @@
 
     expect(serviceComponent1.convertToResponse()).andReturn(
       new ServiceComponentResponse(100L, "Cluster100", "Service100", "Component100", null, "", serviceComponentStateCountMap,
-              true /* recovery enabled */, "Component100 Client"));
+              true /* recovery enabled */, "Component100 Client", null, null));
     expect(serviceComponent2.convertToResponse()).andReturn(
       new ServiceComponentResponse(100L, "Cluster100", "Service100", "Component101", null, "", serviceComponentStateCountMap,
-              false /* recovery not enabled */, "Component101 Client"));
+              false /* recovery not enabled */, "Component101 Client", null, null));
     expect(serviceComponent3.convertToResponse()).andReturn(
       new ServiceComponentResponse(100L, "Cluster100", "Service100", "Component102", null, "", serviceComponentStateCountMap,
-              true /* recovery enabled */, "Component102 Client"));
+              true /* recovery enabled */, "Component102 Client", "1.1", RepositoryVersionState.CURRENT));
 
     expect(ambariMetaInfo.getComponent(null, null, null, "Component100")).andReturn(componentInfo1);
     expect(ambariMetaInfo.getComponent(null, null, null, "Component101")).andReturn(componentInfo2);
@@ -284,6 +285,8 @@
     propertyIds.add(ComponentResourceProvider.COMPONENT_INIT_COUNT_PROPERTY_ID);
     propertyIds.add(ComponentResourceProvider.COMPONENT_UNKNOWN_COUNT_PROPERTY_ID);
     propertyIds.add(ComponentResourceProvider.COMPONENT_RECOVERY_ENABLED_ID);
+    propertyIds.add(ComponentResourceProvider.COMPONENT_DESIRED_VERSION);
+    propertyIds.add(ComponentResourceProvider.COMPONENT_REPOSITORY_STATE);
 
     Predicate predicate = new PredicateBuilder()
       .property(ComponentResourceProvider.COMPONENT_CLUSTER_NAME_PROPERTY_ID)
@@ -316,6 +319,17 @@
           ComponentResourceProvider.COMPONENT_UNKNOWN_COUNT_PROPERTY_ID));
       Assert.assertEquals(String.valueOf(true), resource.getPropertyValue(
         ComponentResourceProvider.COMPONENT_RECOVERY_ENABLED_ID));
+
+      if (resource.getPropertyValue(
+          ComponentResourceProvider.COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("Component102")) {
+        Assert.assertNotNull(resource.getPropertyValue(ComponentResourceProvider.COMPONENT_REPOSITORY_STATE));
+        Assert.assertNotNull(resource.getPropertyValue(ComponentResourceProvider.COMPONENT_DESIRED_VERSION));
+        Assert.assertEquals(RepositoryVersionState.CURRENT, resource.getPropertyValue(ComponentResourceProvider.COMPONENT_REPOSITORY_STATE));
+        Assert.assertEquals("1.1", resource.getPropertyValue(ComponentResourceProvider.COMPONENT_DESIRED_VERSION));
+      } else {
+        Assert.assertNull(resource.getPropertyValue(ComponentResourceProvider.COMPONENT_REPOSITORY_STATE));
+        Assert.assertNull(resource.getPropertyValue(ComponentResourceProvider.COMPONENT_DESIRED_VERSION));
+      }
     }
 
     // verify
@@ -407,13 +421,13 @@
 
     expect(serviceComponent1.convertToResponse()).andReturn(
       new ServiceComponentResponse(100L, "Cluster100", "Service100", "Component101", null, "", serviceComponentStateCountMap,
-              false /* recovery not enabled */, "Component101 Client"));
+              false /* recovery not enabled */, "Component101 Client", null, null));
     expect(serviceComponent2.convertToResponse()).andReturn(
       new ServiceComponentResponse(100L, "Cluster100", "Service100", "Component102", null, "", serviceComponentStateCountMap,
-              false /* recovery not enabled */, "Component102 Client"));
+              false /* recovery not enabled */, "Component102 Client", null, null));
     expect(serviceComponent3.convertToResponse()).andReturn(
       new ServiceComponentResponse(100L, "Cluster100", "Service100", "Component103", null, "", serviceComponentStateCountMap,
-              false /* recovery not enabled */, "Component103 Client"));
+              false /* recovery not enabled */, "Component103 Client", null, null));
     expect(serviceComponent1.getDesiredState()).andReturn(State.INSTALLED).anyTimes();
     expect(serviceComponent2.getDesiredState()).andReturn(State.INSTALLED).anyTimes();
     expect(serviceComponent3.getDesiredState()).andReturn(State.INSTALLED).anyTimes();
@@ -723,7 +737,7 @@
 
     expect(serviceComponent1.convertToResponse()).andReturn(
         new ServiceComponentResponse(100L, "Cluster100", "Service100", "Component101", null, "", serviceComponentStateCountMap,
-            false /* recovery not enabled */, "Component101 Client"));
+            false /* recovery not enabled */, "Component101 Client", null, null));
     expect(serviceComponent1.getDesiredState()).andReturn(State.INSTALLED).anyTimes();
 
     expect(serviceComponentHost.getState()).andReturn(State.INSTALLED).anyTimes();
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java
index 01dd8e3..78752dc 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java
@@ -1127,6 +1127,8 @@
     HostResponse response = createNiceMock(HostResponse.class);
 
     Set<Cluster> setCluster = Collections.singleton(cluster);
+    Map<String, DesiredConfig> desiredConfigs = new HashMap<>();
+    Map<String, HostConfig> desiredHostConfigs = new HashMap<>();
 
     // requests
     HostRequest request1 = new HostRequest("host1", "cluster1");
@@ -1142,7 +1144,8 @@
     expect(clusters.getClustersForHost("host1")).andReturn(setCluster);
     expect(clusters.getCluster("Cluster100")).andReturn(cluster).anyTimes();
     expect(cluster.getClusterId()).andReturn(2L).anyTimes();
-    expect(cluster.getDesiredConfigs()).andReturn(new HashMap<String, DesiredConfig>()).anyTimes();
+    expect(cluster.getDesiredConfigs()).andReturn(desiredConfigs);
+    expect(host.getDesiredHostConfigs(cluster, desiredConfigs)).andReturn(desiredHostConfigs);
     expect(host.getHostName()).andReturn("host1").anyTimes();
     expect(host.convertToResponse()).andReturn(response);
     response.setClusterName("cluster1");
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequestTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequestTest.java
index 055e1b5..02bfd2b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequestTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequestTest.java
@@ -50,8 +50,6 @@
 import org.apache.ambari.server.topology.HostGroupInfo;
 import org.apache.ambari.server.topology.InvalidTopologyTemplateException;
 import org.apache.ambari.server.topology.TopologyRequest;
-import org.apache.ambari.server.topology.TopologyValidator;
-import org.apache.ambari.server.topology.validators.RequiredPasswordValidator;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -112,7 +110,6 @@
     assertSame(blueprint, provisionClusterRequest.getBlueprint());
     Map<String, HostGroupInfo> hostGroupInfo = provisionClusterRequest.getHostGroupInfo();
     assertEquals(1, hostGroupInfo.size());
-    assertEquals(3, provisionClusterRequest.getTopologyValidators().size());
 
     // group1
     // host info
@@ -164,7 +161,6 @@
     assertSame(blueprint, provisionClusterRequest.getBlueprint());
     Map<String, HostGroupInfo> hostGroupInfo = provisionClusterRequest.getHostGroupInfo();
     assertEquals(1, hostGroupInfo.size());
-    assertEquals(3, provisionClusterRequest.getTopologyValidators().size());
 
     // group2
     HostGroupInfo group2Info = hostGroupInfo.get("group2");
@@ -216,7 +212,6 @@
     assertSame(blueprint, provisionClusterRequest.getBlueprint());
     Map<String, HostGroupInfo> hostGroupInfo = provisionClusterRequest.getHostGroupInfo();
     assertEquals(2, hostGroupInfo.size());
-    assertEquals(3, provisionClusterRequest.getTopologyValidators().size());
 
     // group1
     // host info
@@ -367,33 +362,6 @@
     new ProvisionClusterRequest(properties, null);
   }
 
-  @Test
-  public void testGetValidators_noDefaultPassword() throws Exception {
-    Map<String, Object> properties = createBlueprintRequestProperties(CLUSTER_NAME, BLUEPRINT_NAME);
-    //properties.put("default_password", "pwd");
-    TopologyRequest request = new ProvisionClusterRequest(properties, null);
-    List<TopologyValidator> validators = request.getTopologyValidators();
-
-    assertEquals(3, validators.size());
-    TopologyValidator pwdValidator = validators.get(0);
-
-    TopologyValidator noDefaultPwdValidator = new RequiredPasswordValidator(null);
-    assertEquals(pwdValidator, noDefaultPwdValidator);
-  }
-
-  @Test
-  public void testGetValidators_defaultPassword() throws Exception {
-    Map<String, Object> properties = createBlueprintRequestProperties(CLUSTER_NAME, BLUEPRINT_NAME);
-    properties.put("default_password", "pwd");
-    TopologyRequest request = new ProvisionClusterRequest(properties, null);
-    List<TopologyValidator> validators = request.getTopologyValidators();
-
-    assertEquals(3, validators.size());
-    TopologyValidator pwdValidator = validators.get(0);
-
-    TopologyValidator defaultPwdValidator = new RequiredPasswordValidator("pwd");
-    assertEquals(pwdValidator, defaultPwdValidator);
-  }
 
   @Test(expected = InvalidTopologyTemplateException.class)
   public void testInvalidPredicateProperty() throws Exception {
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestImplTest.java
index fb508ea..4aacf91 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestImplTest.java
@@ -128,8 +128,8 @@
     Assert.assertTrue(validPropertyIds.contains("ServiceComponentInfo/component_name"));
     Assert.assertTrue(validPropertyIds.contains("ServiceComponentInfo/cluster_name"));
     Assert.assertTrue(validPropertyIds.contains("ServiceComponentInfo/state"));
-    Assert.assertTrue(validPropertyIds.contains("ServiceComponents/display_name"));
-    Assert.assertTrue(validPropertyIds.contains("ServiceComponents/description"));
+    Assert.assertTrue(validPropertyIds.contains("ServiceComponentInfo/display_name"));
+    Assert.assertTrue(validPropertyIds.contains("ServiceComponentInfo/description"));
     Assert.assertTrue(validPropertyIds.contains("params/run_smoke_test"));
 
     request = PropertyHelper.getReadRequest(PropertyHelper.getPropertyIds(Resource.Type.Action));
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
index dc9e5ed..6bc856d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
@@ -78,10 +78,12 @@
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.topology.Blueprint;
 import org.apache.ambari.server.topology.ClusterTopology;
+import org.apache.ambari.server.topology.HostGroup;
 import org.apache.ambari.server.topology.HostGroupInfo;
 import org.apache.ambari.server.topology.LogicalRequest;
 import org.apache.ambari.server.topology.TopologyManager;
 import org.apache.ambari.server.topology.TopologyRequest;
+import org.apache.ambari.server.utils.SecretReference;
 import org.easymock.Capture;
 import org.easymock.EasyMock;
 import org.junit.After;
@@ -97,6 +99,8 @@
 
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Lists;
+import com.google.gson.Gson;
+import com.google.gson.reflect.TypeToken;
 
 /**
  * RequestResourceProvider tests.
@@ -141,6 +145,10 @@
     field.setAccessible(true);
     field.set(null, topologyManager);
 
+    field = SecretReference.class.getDeclaredField("gson");
+    field.setAccessible(true);
+    field.set(null, new Gson());
+
     AuthorizationHelperInitializer.viewInstanceDAOReturningNull();
   }
 
@@ -257,12 +265,38 @@
   public void testGetResources() throws Exception {
     Resource.Type type = Resource.Type.Request;
 
+    String storedInputs = "{" +
+        " \"hosts\": \"host1\"," +
+        " \"check_execute_list\": \"last_agent_env_check,installed_packages,existing_repos,transparentHugePage\"," +
+        " \"jdk_location\": \"http://ambari_server.home:8080/resources/\"," +
+        " \"threshold\": \"20\"," +
+        " \"password\": \"for your eyes only\"," +
+        " \"foo_password\": \"for your eyes only\"," +
+        " \"passwd\": \"for your eyes only\"," +
+        " \"foo_passwd\": \"for your eyes only\"" +
+        " }";
+    String cleanedInputs = SecretReference.maskPasswordInPropertyMap(storedInputs);
+
+    // Make sure SecretReference.maskPasswordInPropertyMap properly masked the password fields in cleanedInputs...
+    Gson gson = new Gson();
+    Map<String, String> map = gson.fromJson(cleanedInputs, new TypeToken<Map<String, String>>() {}.getType());
+    for (Map.Entry<String, String> entry : map.entrySet()) {
+      String name = entry.getKey();
+      if (name.contains("password") || name.contains("passwd")) {
+        Assert.assertEquals("SECRET", entry.getValue());
+      }
+      else {
+        Assert.assertFalse("SECRET".equals(entry.getValue()));
+      }
+    }
+
     AmbariManagementController managementController = createMock(AmbariManagementController.class);
     ActionManager actionManager = createNiceMock(ActionManager.class);
     RequestEntity requestMock = createNiceMock(RequestEntity.class);
 
     expect(requestMock.getRequestContext()).andReturn("this is a context").anyTimes();
     expect(requestMock.getRequestId()).andReturn(100L).anyTimes();
+    expect(requestMock.getInputs()).andReturn(storedInputs).anyTimes();
 
     Capture<Collection<Long>> requestIdsCapture = newCapture();
 
@@ -286,6 +320,7 @@
 
     propertyIds.add(RequestResourceProvider.REQUEST_ID_PROPERTY_ID);
     propertyIds.add(RequestResourceProvider.REQUEST_STATUS_PROPERTY_ID);
+    propertyIds.add(RequestResourceProvider.REQUEST_INPUTS_ID);
 
     Predicate predicate = new PredicateBuilder().property(RequestResourceProvider.REQUEST_ID_PROPERTY_ID).equals("100").
       toPredicate();
@@ -296,6 +331,7 @@
     for (Resource resource : resources) {
       Assert.assertEquals(100L, (long) (Long) resource.getPropertyValue(RequestResourceProvider.REQUEST_ID_PROPERTY_ID));
       Assert.assertEquals("IN_PROGRESS", resource.getPropertyValue(RequestResourceProvider.REQUEST_STATUS_PROPERTY_ID));
+      Assert.assertEquals(cleanedInputs, resource.getPropertyValue(RequestResourceProvider.REQUEST_INPUTS_ID));
     }
 
     // verify
@@ -1649,7 +1685,12 @@
 
 
     ClusterTopology topology = createNiceMock(ClusterTopology.class);
+
+    HostGroup hostGroup = createNiceMock(HostGroup.class);
+    expect(hostGroup.getName()).andReturn("host_group_1").anyTimes();
+
     Blueprint blueprint = createNiceMock(Blueprint.class);
+    expect(blueprint.getHostGroup("host_group_1")).andReturn(hostGroup).anyTimes();
     expect(topology.getClusterId()).andReturn(2L).anyTimes();
 
     Long clusterId = 2L;
@@ -1666,8 +1707,13 @@
     expect(hrcDAO.findAggregateCounts((Long) anyObject())).andReturn(
       Collections.<Long, HostRoleCommandStatusSummaryDTO>emptyMap()).anyTimes();
 
+    Map<String, HostGroupInfo> hostGroupInfoMap = new HashMap<>();
+    HostGroupInfo hostGroupInfo = new HostGroupInfo("host_group_1");
+    hostGroupInfo.setRequestedCount(1);
+    hostGroupInfoMap.put("host_group_1", hostGroupInfo);
+
     TopologyRequest topologyRequest = createNiceMock(TopologyRequest.class);
-    expect(topologyRequest.getHostGroupInfo()).andReturn(Collections.<String, HostGroupInfo>emptyMap()).anyTimes();
+    expect(topologyRequest.getHostGroupInfo()).andReturn(hostGroupInfoMap).anyTimes();
     expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(blueprint.shouldSkipFailure()).andReturn(true).anyTimes();
 
@@ -1677,24 +1723,28 @@
     expect(AmbariServer.getController()).andReturn(managementController).anyTimes();
 
     PowerMock.replayAll(
-      topologyRequest,
-      topology,
-      blueprint,
-      managementController,
-      clusters);
+            topologyRequest,
+            topology,
+            blueprint,
+            managementController,
+            clusters);
 
 
-    LogicalRequest logicalRequest = new LogicalRequest(200L, topologyRequest, topology);
+    LogicalRequest logicalRequest = createNiceMock(LogicalRequest.class);
+    expect(logicalRequest.hasPendingHostRequests()).andReturn(true).anyTimes();
+    expect(logicalRequest.constructNewPersistenceEntity()).andReturn(requestMock).anyTimes();
 
     reset(topologyManager);
 
     expect(topologyManager.getRequest(100L)).andReturn(logicalRequest).anyTimes();
+
+
     expect(topologyManager.getRequests(eq(Collections.singletonList(100L)))).andReturn(
       Collections.singletonList(logicalRequest)).anyTimes();
     expect(topologyManager.getStageSummaries(EasyMock.<Long>anyObject())).andReturn(
       Collections.<Long, HostRoleCommandStatusSummaryDTO>emptyMap()).anyTimes();
 
-    replay(actionManager, requestMock, requestDAO, hrcDAO, topologyManager);
+    replay(actionManager, requestMock, requestDAO, hrcDAO, topologyManager, logicalRequest);
 
     ResourceProvider provider = AbstractControllerResourceProvider.getResourceProvider(
       type,
@@ -1722,7 +1772,7 @@
 
     // verify
     PowerMock.verifyAll();
-    verify(actionManager, requestMock, requestDAO, hrcDAO, topologyManager);
+    verify(actionManager, requestMock, requestDAO, hrcDAO, topologyManager, logicalRequest);
 
     Assert.assertEquals(1, resources.size());
     for (Resource resource : resources) {
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestStageContainerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestStageContainerTest.java
index f60915c..c001ab0 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestStageContainerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestStageContainerTest.java
@@ -143,7 +143,7 @@
     stages.add(stage2);
 
     //expectations
-    expect(requestFactory.createNewFromStages(stages)).andReturn(request);
+    expect(requestFactory.createNewFromStages(stages, "{}")).andReturn(request);
     expect(request.getStages()).andReturn(stages).anyTimes();
     actionManager.sendActions(request, null);
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ScaleClusterRequestTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ScaleClusterRequestTest.java
index c5d17bd..4dee1c5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ScaleClusterRequestTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ScaleClusterRequestTest.java
@@ -116,7 +116,6 @@
     assertSame(blueprint, scaleClusterRequest.getBlueprint());
     Map<String, HostGroupInfo> hostGroupInfo = scaleClusterRequest.getHostGroupInfo();
     assertEquals(1, hostGroupInfo.size());
-    assertEquals(0, scaleClusterRequest.getTopologyValidators().size());
 
     // group1
     // host info
@@ -147,7 +146,6 @@
     assertSame(blueprint, scaleClusterRequest.getBlueprint());
     Map<String, HostGroupInfo> hostGroupInfo = scaleClusterRequest.getHostGroupInfo();
     assertEquals(1, hostGroupInfo.size());
-    assertEquals(0, scaleClusterRequest.getTopologyValidators().size());
 
     // group1
     // host info
@@ -176,7 +174,6 @@
     assertSame(blueprint, scaleClusterRequest.getBlueprint());
     Map<String, HostGroupInfo> hostGroupInfo = scaleClusterRequest.getHostGroupInfo();
     assertEquals(1, hostGroupInfo.size());
-    assertEquals(0, scaleClusterRequest.getTopologyValidators().size());
 
     // group2
     // host info
@@ -203,7 +200,6 @@
     assertSame(blueprint, scaleClusterRequest.getBlueprint());
     Map<String, HostGroupInfo> hostGroupInfo = scaleClusterRequest.getHostGroupInfo();
     assertEquals(1, hostGroupInfo.size());
-    assertEquals(0, scaleClusterRequest.getTopologyValidators().size());
 
     // group2
     // host info
@@ -226,7 +222,6 @@
     assertSame(blueprint, scaleClusterRequest.getBlueprint());
     Map<String, HostGroupInfo> hostGroupInfo = scaleClusterRequest.getHostGroupInfo();
     assertEquals(1, hostGroupInfo.size());
-    assertEquals(0, scaleClusterRequest.getTopologyValidators().size());
 
     // group3
     // host info
@@ -253,7 +248,6 @@
     assertSame(blueprint, scaleClusterRequest.getBlueprint());
     Map<String, HostGroupInfo> hostGroupInfo = scaleClusterRequest.getHostGroupInfo();
     assertEquals(3, hostGroupInfo.size());
-    assertEquals(0, scaleClusterRequest.getTopologyValidators().size());
 
     // group
     // host info
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/TaskResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/TaskResourceProviderTest.java
index 8a52b8a..2fafa5c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/TaskResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/TaskResourceProviderTest.java
@@ -125,6 +125,7 @@
     hostRoleCommandEntity.setRole(Role.DATANODE);
     hostRoleCommandEntity.setCustomCommandName("customCommandName");
     hostRoleCommandEntity.setCommandDetail("commandDetail");
+    hostRoleCommandEntity.setOpsDisplayName("opsDisplayName");
     entities.add(hostRoleCommandEntity);
 
     // set expectations
@@ -139,6 +140,7 @@
     propertyIds.add(TaskResourceProvider.TASK_ID_PROPERTY_ID);
     propertyIds.add(TaskResourceProvider.TASK_REQUEST_ID_PROPERTY_ID);
     propertyIds.add(TaskResourceProvider.TASK_COMMAND_DET_PROPERTY_ID);
+    propertyIds.add(TaskResourceProvider.TASK_COMMAND_OPS_DISPLAY_NAME);
 
     Predicate predicate = new PredicateBuilder().property(TaskResourceProvider.TASK_ID_PROPERTY_ID).equals("100").
                           and().property(TaskResourceProvider.TASK_REQUEST_ID_PROPERTY_ID).equals("100").toPredicate();
@@ -153,6 +155,8 @@
           .TASK_CUST_CMD_NAME_PROPERTY_ID));
       Assert.assertEquals("commandDetail", resource.getPropertyValue(TaskResourceProvider
           .TASK_COMMAND_DET_PROPERTY_ID));
+      Assert.assertEquals("opsDisplayName",resource.getPropertyValue(TaskResourceProvider
+          .TASK_COMMAND_OPS_DISPLAY_NAME));
     }
 
     // verify
@@ -188,6 +192,7 @@
     hostRoleCommandEntity.setRole(Role.DATANODE);
     hostRoleCommandEntity.setCustomCommandName("customCommandName");
     hostRoleCommandEntity.setCommandDetail("commandDetail");
+    hostRoleCommandEntity.setOpsDisplayName("opsDisplayName");
     commands.add(new HostRoleCommand(hostRoleCommandEntity, hostDAO, executionCommandDAO, ecwFactory));
 
     // set expectations
@@ -203,6 +208,7 @@
     propertyIds.add(TaskResourceProvider.TASK_ID_PROPERTY_ID);
     propertyIds.add(TaskResourceProvider.TASK_REQUEST_ID_PROPERTY_ID);
     propertyIds.add(TaskResourceProvider.TASK_COMMAND_DET_PROPERTY_ID);
+    propertyIds.add(TaskResourceProvider.TASK_COMMAND_OPS_DISPLAY_NAME);
 
     Predicate predicate = new PredicateBuilder().property(TaskResourceProvider.TASK_ID_PROPERTY_ID).equals("100").
       and().property(TaskResourceProvider.TASK_REQUEST_ID_PROPERTY_ID).equals("100").toPredicate();
@@ -218,6 +224,8 @@
         .TASK_CUST_CMD_NAME_PROPERTY_ID));
       Assert.assertEquals("commandDetail", resource.getPropertyValue(TaskResourceProvider
         .TASK_COMMAND_DET_PROPERTY_ID));
+      Assert.assertEquals("opsDisplayName",resource.getPropertyValue(TaskResourceProvider
+          .TASK_COMMAND_OPS_DISPLAY_NAME));
     }
 
     // verify
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
index 999b7a7..e587f28 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
@@ -42,9 +42,11 @@
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.actionmanager.ActionManager;
+import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.actionmanager.Stage;
+import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.audit.AuditLogger;
 import org.apache.ambari.server.configuration.Configuration;
@@ -160,9 +162,10 @@
 
     EasyMock.replay(configHelper);
 
+    InMemoryDefaultTestModule module = new InMemoryDefaultTestModule();
+
     // create an injector which will inject the mocks
-    injector = Guice.createInjector(Modules.override(
-        new InMemoryDefaultTestModule()).with(new MockModule()));
+    injector = Guice.createInjector(Modules.override(module).with(new MockModule()));
 
     H2DatabaseCleaner.resetSequences(injector);
     injector.getInstance(GuiceJpaInitializer.class);
@@ -250,9 +253,12 @@
     sch = component.addServiceComponentHost("h1");
     sch.setVersion("2.1.1.0");
 
+    Configuration configuration = injector.getInstance(Configuration.class);
+    configuration.setProperty("upgrade.parameter.zk-server.timeout", "824");
+
     topologyManager = injector.getInstance(TopologyManager.class);
     StageUtils.setTopologyManager(topologyManager);
-    StageUtils.setConfiguration(injector.getInstance(Configuration.class));
+    StageUtils.setConfiguration(configuration);
     ActionManager.setTopologyManager(topologyManager);
     EasyMock.replay(injector.getInstance(AuditLogger.class));
   }
@@ -1650,6 +1656,60 @@
         HostRoleStatus.IN_PROGRESS_STATUSES);
   }
 
+  @Test
+  public void testTimeouts() throws Exception {
+    Cluster cluster = clusters.getCluster("c1");
+
+    StackEntity stackEntity = stackDAO.find("HDP", "2.1.1");
+    RepositoryVersionEntity repoVersionEntity = new RepositoryVersionEntity();
+    repoVersionEntity.setDisplayName("My New Version 3");
+    repoVersionEntity.setOperatingSystems("");
+    repoVersionEntity.setStack(stackEntity);
+    repoVersionEntity.setVersion("2.2.2.3");
+    repoVersionDao.create(repoVersionEntity);
+
+    Map<String, Object> requestProps = new HashMap<>();
+    requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1");
+    requestProps.put(UpgradeResourceProvider.UPGRADE_VERSION, "2.2.2.3");
+    requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test");
+    requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, "true");
+    requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.UPGRADE.name());
+
+    ResourceProvider upgradeResourceProvider = createProvider(amc);
+
+    Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null);
+    RequestStatus status = upgradeResourceProvider.createResources(request);
+
+
+    Set<Resource> createdResources = status.getAssociatedResources();
+    assertEquals(1, createdResources.size());
+    Resource res = createdResources.iterator().next();
+    Long id = (Long) res.getPropertyValue("Upgrade/request_id");
+    assertNotNull(id);
+    assertEquals(Long.valueOf(1), id);
+
+
+    ActionManager am = injector.getInstance(ActionManager.class);
+
+    List<HostRoleCommand> commands = am.getRequestTasks(id);
+
+    boolean found = false;
+
+    for (HostRoleCommand command : commands) {
+      ExecutionCommandWrapper wrapper = command.getExecutionCommandWrapper();
+
+      if (command.getRole().equals(Role.ZOOKEEPER_SERVER) && command.getRoleCommand().equals(RoleCommand.CUSTOM_COMMAND)) {
+        Map<String, String> commandParams = wrapper.getExecutionCommand().getCommandParams();
+        assertTrue(commandParams.containsKey(KeyNames.COMMAND_TIMEOUT));
+        assertEquals("824",commandParams.get(KeyNames.COMMAND_TIMEOUT));
+        found = true;
+      }
+    }
+
+    assertTrue("ZooKeeper timeout override was found", found);
+
+  }
+
   /**
    *
    */
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalServiceTest.java
index d60596b..3370173 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalServiceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalServiceTest.java
@@ -17,6 +17,8 @@
  */
 package org.apache.ambari.server.controller.logging;
 
+import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.expectLastCall;
 import static org.easymock.EasyMock.isA;
@@ -25,11 +27,15 @@
 import static org.junit.Assert.assertTrue;
 
 import java.util.Collections;
+import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.Executor;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.AmbariManagementController;
+import org.easymock.Capture;
+import org.easymock.EasyMock;
 import org.easymock.EasyMockSupport;
 import org.junit.Test;
 
@@ -166,6 +172,161 @@
 
     assertTrue("Incorrect HostComponent set on request set",
                 retrievalService.getCurrentRequests().contains(expectedComponentName + "+" + expectedHostName));
+    assertEquals("Incorrect size for failure counts for components, should be 0",
+                 0, retrievalService.getComponentRequestFailureCounts().size());
+
+    mockSupport.verifyAll();
+  }
+
+  @Test
+  public void testGetLogFileNamesExistingFailuresLessThanThreshold() throws Exception {
+    final String expectedHostName = "c6401.ambari.apache.org";
+    final String expectedComponentName = "DATANODE";
+    final String expectedClusterName = "clusterone";
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    LoggingRequestHelperFactory helperFactoryMock = mockSupport.createMock(LoggingRequestHelperFactory.class);
+
+    Executor executorMock = mockSupport.createMock(Executor.class);
+
+    Injector injectorMock =
+      mockSupport.createMock(Injector.class);
+
+    Configuration configurationMock =
+      mockSupport.createMock(Configuration.class);
+
+    // expect the executor to be called to execute the LogSearch request
+    executorMock.execute(isA(LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable.class));
+    // executor should only be called once
+    expectLastCall().once();
+
+    expect(injectorMock.getInstance(LoggingRequestHelperFactory.class)).andReturn(helperFactoryMock);
+
+    expect(configurationMock.getLogSearchMetadataCacheExpireTimeout()).andReturn(1).atLeastOnce();
+
+    mockSupport.replayAll();
+
+    LogSearchDataRetrievalService retrievalService = new LogSearchDataRetrievalService();
+    retrievalService.setLoggingRequestHelperFactory(helperFactoryMock);
+    retrievalService.setInjector(injectorMock);
+    retrievalService.setConfiguration(configurationMock);
+    // call the initialization routine called by the Google framework
+    retrievalService.doStart();
+    retrievalService.setExecutor(executorMock);
+    // initialize the comopnent-based failure count to have a count > 0, but less than threshold (10)
+    retrievalService.getComponentRequestFailureCounts().put(expectedComponentName, new AtomicInteger(5));
+
+
+    assertEquals("Default request set should be empty", 0, retrievalService.getCurrentRequests().size());
+
+    Set<String> resultSet = retrievalService.getLogFileNames(expectedComponentName, expectedHostName, expectedClusterName);
+
+    assertNull("Inital query on the retrieval service should be null, since cache is empty by default", resultSet);
+    assertEquals("Incorrect number of entries in the current request set", 1, retrievalService.getCurrentRequests().size());
+
+    assertTrue("Incorrect HostComponent set on request set",
+      retrievalService.getCurrentRequests().contains(expectedComponentName + "+" + expectedHostName));
+    assertEquals("Incorrect size for failure counts for components, should be 0",
+      1, retrievalService.getComponentRequestFailureCounts().size());
+    assertEquals("Incorrect failure count for component",
+      5, retrievalService.getComponentRequestFailureCounts().get(expectedComponentName).get());
+
+    mockSupport.verifyAll();
+  }
+
+  @Test
+  public void testGetLogFileNamesExistingFailuresAtThreshold() throws Exception {
+    final String expectedHostName = "c6401.ambari.apache.org";
+    final String expectedComponentName = "DATANODE";
+    final String expectedClusterName = "clusterone";
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    LoggingRequestHelperFactory helperFactoryMock = mockSupport.createMock(LoggingRequestHelperFactory.class);
+
+    Executor executorMock = mockSupport.createMock(Executor.class);
+
+    Injector injectorMock =
+      mockSupport.createMock(Injector.class);
+
+    Configuration configurationMock =
+      mockSupport.createMock(Configuration.class);
+
+    expect(configurationMock.getLogSearchMetadataCacheExpireTimeout()).andReturn(1).atLeastOnce();
+
+    mockSupport.replayAll();
+
+    LogSearchDataRetrievalService retrievalService = new LogSearchDataRetrievalService();
+    retrievalService.setLoggingRequestHelperFactory(helperFactoryMock);
+    retrievalService.setInjector(injectorMock);
+    retrievalService.setConfiguration(configurationMock);
+    // call the initialization routine called by the Google framework
+    retrievalService.doStart();
+    retrievalService.setExecutor(executorMock);
+    // initialize the comopnent-based failure count to have a count at the threshold
+    retrievalService.getComponentRequestFailureCounts().put(expectedComponentName, new AtomicInteger(10));
+
+    assertEquals("Default request set should be empty", 0, retrievalService.getCurrentRequests().size());
+
+    Set<String> resultSet =
+      retrievalService.getLogFileNames(expectedComponentName, expectedHostName, expectedClusterName);
+
+    assertNull("Inital query on the retrieval service should be null, since cache is empty by default", resultSet);
+    assertEquals("Incorrect number of entries in the current request set", 0, retrievalService.getCurrentRequests().size());
+
+    assertEquals("Incorrect size for failure counts for components, should be 0",
+      1, retrievalService.getComponentRequestFailureCounts().size());
+    assertEquals("Incorrect failure count for component",
+      10, retrievalService.getComponentRequestFailureCounts().get(expectedComponentName).get());
+
+    mockSupport.verifyAll();
+  }
+
+  @Test
+  public void testGetLogFileNamesExistingFailuresOverThreshold() throws Exception {
+    final String expectedHostName = "c6401.ambari.apache.org";
+    final String expectedComponentName = "DATANODE";
+    final String expectedClusterName = "clusterone";
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    LoggingRequestHelperFactory helperFactoryMock = mockSupport.createMock(LoggingRequestHelperFactory.class);
+
+    Executor executorMock = mockSupport.createMock(Executor.class);
+
+    Injector injectorMock =
+      mockSupport.createMock(Injector.class);
+
+    Configuration configurationMock =
+      mockSupport.createMock(Configuration.class);
+
+    expect(configurationMock.getLogSearchMetadataCacheExpireTimeout()).andReturn(1).atLeastOnce();
+
+    mockSupport.replayAll();
+
+    LogSearchDataRetrievalService retrievalService = new LogSearchDataRetrievalService();
+    retrievalService.setLoggingRequestHelperFactory(helperFactoryMock);
+    retrievalService.setInjector(injectorMock);
+    retrievalService.setConfiguration(configurationMock);
+    // call the initialization routine called by the Google framework
+    retrievalService.doStart();
+    retrievalService.setExecutor(executorMock);
+    // initialize the comopnent-based failure count to have a count over the threshold
+    retrievalService.getComponentRequestFailureCounts().put(expectedComponentName, new AtomicInteger(20));
+
+    assertEquals("Default request set should be empty", 0, retrievalService.getCurrentRequests().size());
+
+    Set<String> resultSet =
+      retrievalService.getLogFileNames(expectedComponentName, expectedHostName, expectedClusterName);
+
+    assertNull("Inital query on the retrieval service should be null, since cache is empty by default", resultSet);
+    assertEquals("Incorrect number of entries in the current request set", 0, retrievalService.getCurrentRequests().size());
+
+    assertEquals("Incorrect size for failure counts for components, should be 0",
+      1, retrievalService.getComponentRequestFailureCounts().size());
+    assertEquals("Incorrect failure count for component",
+      20, retrievalService.getComponentRequestFailureCounts().get(expectedComponentName).get());
 
     mockSupport.verifyAll();
   }
@@ -225,6 +386,7 @@
 
     Cache<String, Set<String>> cacheMock = mockSupport.createMock(Cache.class);
     Set<String> currentRequestsMock = mockSupport.createMock(Set.class);
+    Map<String, AtomicInteger> componentFailureCounts = mockSupport.createMock(Map.class);
 
     expect(helperFactoryMock.getHelper(controllerMock, expectedClusterName)).andReturn(helperMock);
     expect(helperMock.sendGetLogFileNamesRequest(expectedComponentName, expectedHostName)).andReturn(Collections.singleton("/this/is/just/a/test/directory"));
@@ -237,7 +399,7 @@
 
     LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable loggingRunnable =
       new LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable(expectedHostName, expectedComponentName, expectedClusterName,
-          cacheMock, currentRequestsMock, helperFactoryMock, controllerMock);
+          cacheMock, currentRequestsMock, helperFactoryMock, componentFailureCounts, controllerMock);
     loggingRunnable.run();
 
     mockSupport.verifyAll();
@@ -258,6 +420,7 @@
 
     Cache<String, Set<String>> cacheMock = mockSupport.createMock(Cache.class);
     Set<String> currentRequestsMock = mockSupport.createMock(Set.class);
+    Map<String, AtomicInteger> componentFailureCounts = mockSupport.createMock(Map.class);
 
     // return null to simulate an error during helper instance creation
     expect(helperFactoryMock.getHelper(controllerMock, expectedClusterName)).andReturn(null);
@@ -269,7 +432,7 @@
 
     LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable loggingRunnable =
       new LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable(expectedHostName, expectedComponentName, expectedClusterName,
-          cacheMock, currentRequestsMock, helperFactoryMock, controllerMock);
+          cacheMock, currentRequestsMock, helperFactoryMock, componentFailureCounts, controllerMock);
     loggingRunnable.run();
 
     mockSupport.verifyAll();
@@ -283,6 +446,7 @@
     final String expectedComponentName = "DATANODE";
     final String expectedClusterName = "clusterone";
     final String expectedComponentAndHostName = expectedComponentName + "+" + expectedHostName;
+    final AtomicInteger testInteger = new AtomicInteger(0);
 
     EasyMockSupport mockSupport = new EasyMockSupport();
 
@@ -292,6 +456,9 @@
 
     Cache<String, Set<String>> cacheMock = mockSupport.createMock(Cache.class);
     Set<String> currentRequestsMock = mockSupport.createMock(Set.class);
+    Map<String, AtomicInteger> componentFailureCounts = mockSupport.createMock(Map.class);
+
+    Capture<AtomicInteger> captureFailureCount = EasyMock.newCapture();
 
     expect(helperFactoryMock.getHelper(controllerMock, expectedClusterName)).andReturn(helperMock);
     // return null to simulate an error occurring during the LogSearch data request
@@ -299,14 +466,72 @@
     // expect that the completed request is removed from the current request set,
     // even in the event of a failure to obtain the LogSearch data
     expect(currentRequestsMock.remove(expectedComponentAndHostName)).andReturn(true).once();
+    // expect that the component failure map is initially empty
+    expect(componentFailureCounts.containsKey(expectedComponentName)).andReturn(false);
+    // expect that the component map is updated with a new count
+    expect(componentFailureCounts.put(eq(expectedComponentName), capture(captureFailureCount))).andReturn(new AtomicInteger(0));
+    // expect that the runnable will obtain an increment the failure count
+    expect(componentFailureCounts.get(expectedComponentName)).andReturn(testInteger);
+
 
     mockSupport.replayAll();
 
     LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable loggingRunnable =
       new LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable(expectedHostName, expectedComponentName, expectedClusterName,
-          cacheMock, currentRequestsMock, helperFactoryMock, controllerMock);
+          cacheMock, currentRequestsMock, helperFactoryMock, componentFailureCounts, controllerMock);
     loggingRunnable.run();
 
+    assertEquals("Initial count set by Runnable should be 0",
+                 0, captureFailureCount.getValue().get());
+    assertEquals("Failure count should have been incremented",
+                 1, testInteger.get());
+
+    mockSupport.verifyAll();
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testRunnableWithFailedCallNullResultExistingFailureCount() throws Exception {
+    final String expectedHostName = "c6401.ambari.apache.org";
+    final String expectedComponentName = "DATANODE";
+    final String expectedClusterName = "clusterone";
+    final String expectedComponentAndHostName = expectedComponentName + "+" + expectedHostName;
+    final AtomicInteger testFailureCount = new AtomicInteger(2);
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    LoggingRequestHelperFactory helperFactoryMock = mockSupport.createMock(LoggingRequestHelperFactory.class);
+    AmbariManagementController controllerMock = mockSupport.createMock(AmbariManagementController.class);
+    LoggingRequestHelper helperMock = mockSupport.createMock(LoggingRequestHelper.class);
+
+    Cache<String, Set<String>> cacheMock = mockSupport.createMock(Cache.class);
+    Set<String> currentRequestsMock = mockSupport.createMock(Set.class);
+    Map<String, AtomicInteger> componentFailureCounts = mockSupport.createMock(Map.class);
+
+    expect(helperFactoryMock.getHelper(controllerMock, expectedClusterName)).andReturn(helperMock);
+    // return null to simulate an error occurring during the LogSearch data request
+    expect(helperMock.sendGetLogFileNamesRequest(expectedComponentName, expectedHostName)).andReturn(null);
+    // expect that the completed request is removed from the current request set,
+    // even in the event of a failure to obtain the LogSearch data
+    expect(currentRequestsMock.remove(expectedComponentAndHostName)).andReturn(true).once();
+    // expect that the component failure map is initially empty
+    expect(componentFailureCounts.containsKey(expectedComponentName)).andReturn(true);
+    // expect that the runnable will obtain an increment the existing failure count
+    expect(componentFailureCounts.get(expectedComponentName)).andReturn(testFailureCount);
+
+    mockSupport.replayAll();
+
+    assertEquals("Initial count should be 2",
+                 2, testFailureCount.get());
+
+    LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable loggingRunnable =
+      new LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable(expectedHostName, expectedComponentName, expectedClusterName,
+        cacheMock, currentRequestsMock, helperFactoryMock, componentFailureCounts, controllerMock);
+    loggingRunnable.run();
+
+    assertEquals("Failure count should have been incremented",
+                 3, testFailureCount.get());
+
     mockSupport.verifyAll();
   }
 
@@ -317,6 +542,7 @@
     final String expectedComponentName = "DATANODE";
     final String expectedClusterName = "clusterone";
     final String expectedComponentAndHostName = expectedComponentName + "+" + expectedHostName;
+    final AtomicInteger testInteger = new AtomicInteger(0);
 
     EasyMockSupport mockSupport = new EasyMockSupport();
 
@@ -326,6 +552,9 @@
 
     Cache<String, Set<String>> cacheMock = mockSupport.createMock(Cache.class);
     Set<String> currentRequestsMock = mockSupport.createMock(Set.class);
+    Map<String, AtomicInteger> componentFailureCounts = mockSupport.createMock(Map.class);
+
+    Capture<AtomicInteger> captureFailureCount = EasyMock.newCapture();
 
     expect(helperFactoryMock.getHelper(controllerMock, expectedClusterName)).andReturn(helperMock);
     // return null to simulate an error occurring during the LogSearch data request
@@ -333,14 +562,25 @@
     // expect that the completed request is removed from the current request set,
     // even in the event of a failure to obtain the LogSearch data
     expect(currentRequestsMock.remove(expectedComponentAndHostName)).andReturn(true).once();
+    // expect that the component failure map is initially empty
+    expect(componentFailureCounts.containsKey(expectedComponentName)).andReturn(false);
+    // expect that the component map is updated with a new count
+    expect(componentFailureCounts.put(eq(expectedComponentName), capture(captureFailureCount))).andReturn(new AtomicInteger(0));
+    // expect that the runnable will obtain an increment the failure count
+    expect(componentFailureCounts.get(expectedComponentName)).andReturn(testInteger);
 
     mockSupport.replayAll();
 
     LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable loggingRunnable =
       new LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable(expectedHostName, expectedComponentName, expectedClusterName,
-          cacheMock, currentRequestsMock, helperFactoryMock, controllerMock);
+          cacheMock, currentRequestsMock, helperFactoryMock, componentFailureCounts, controllerMock);
     loggingRunnable.run();
 
+    assertEquals("Initial count set by Runnable should be 0",
+      0, captureFailureCount.getValue().get());
+    assertEquals("Failure count should have been incremented",
+      1, testInteger.get());
+
     mockSupport.verifyAll();
   }
 }
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImplTest.java
index b24500f..1221518 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LoggingRequestHelperImplTest.java
@@ -154,6 +154,7 @@
     Capture<HttpURLConnection> captureURLConnectionForAuthentication = EasyMock.newCapture();
 
     expect(clusterMock.getDesiredConfigByType("logsearch-admin-json")).andReturn(adminPropertiesConfigMock).atLeastOnce();
+    expect(clusterMock.getClusterName()).andReturn("clusterone").atLeastOnce();
     expect(adminPropertiesConfigMock.getProperties()).andReturn(testConfigProperties).atLeastOnce();
     expect(networkConnectionMock.readQueryResponseFromServer(capture(captureURLConnection))).andReturn(new StringBuffer(TEST_JSON_INPUT_TWO_LIST_ENTRIES)).atLeastOnce();
 
@@ -181,6 +182,8 @@
       "http", httpURLConnection.getURL().getProtocol());
     assertEquals("URLConnection did not have the expected method set",
       "GET", httpURLConnection.getRequestMethod());
+    assertTrue("URLConnection's URL did not have the expected query parameter string",
+      httpURLConnection.getURL().getQuery().contains("clusters=clusterone"));
 
     assertSame("HttpUrlConnection instances passed into NetworkConnection mock should have been the same instance",
       httpURLConnection, captureURLConnectionForAuthentication.getValue());
@@ -335,6 +338,7 @@
     Capture<HttpURLConnection> captureURLConnectionForAuthentication = EasyMock.newCapture();
 
     expect(clusterMock.getDesiredConfigByType("logsearch-admin-json")).andReturn(adminPropertiesConfigMock).atLeastOnce();
+    expect(clusterMock.getClusterName()).andReturn("clusterone").atLeastOnce();
     expect(adminPropertiesConfigMock.getProperties()).andReturn(testConfigProperties).atLeastOnce();
     expect(networkConnectionMock.readQueryResponseFromServer(capture(captureURLConnection))).andReturn(new StringBuffer(TEST_JSON_INPUT_TWO_LIST_ENTRIES)).atLeastOnce();
 
@@ -362,6 +366,8 @@
       "http", httpURLConnection.getURL().getProtocol());
     assertEquals("URLConnection did not have the expected method set",
       "GET", httpURLConnection.getRequestMethod());
+    assertTrue("URLConnection's URL did not have the expected query parameter string",
+      httpURLConnection.getURL().getQuery().contains("clusters=clusterone"));
 
     assertSame("HttpUrlConnection instances passed into NetworkConnection mock should have been the same instance",
       httpURLConnection, captureURLConnectionForAuthentication.getValue());
@@ -417,6 +423,7 @@
     Capture<HttpURLConnection> captureURLConnectionForAuthentication = new Capture<>();
 
     expect(clusterMock.getDesiredConfigByType("logsearch-admin-json")).andReturn(adminPropertiesConfigMock).atLeastOnce();
+    expect(clusterMock.getClusterName()).andReturn("clusterone").atLeastOnce();
     expect(adminPropertiesConfigMock.getProperties()).andReturn(testConfigProperties).atLeastOnce();
     expect(networkConnectionMock.readQueryResponseFromServer(capture(captureURLConnection))).andReturn(new StringBuffer(TEST_JSON_INPUT_NULL_LOG_LIST)).atLeastOnce();
 
@@ -444,6 +451,8 @@
       "http", httpURLConnection.getURL().getProtocol());
     assertEquals("URLConnection did not have the expected method set",
       "GET", httpURLConnection.getRequestMethod());
+    assertTrue("URLConnection's URL did not have the expected query parameter string",
+      httpURLConnection.getURL().getQuery().contains("clusters=clusterone"));
 
     assertSame("HttpUrlConnection instances passed into NetworkConnection mock should have been the same instance",
       httpURLConnection, captureURLConnectionForAuthentication.getValue());
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/credentialapi/CredentialUtilTest.java b/ambari-server/src/test/java/org/apache/ambari/server/credentialapi/CredentialUtilTest.java
index a09f037..b82941a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/credentialapi/CredentialUtilTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/credentialapi/CredentialUtilTest.java
@@ -28,7 +28,6 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.ToolRunner;
-
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java b/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
index 98510db..6fb0028 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
@@ -376,10 +376,6 @@
     Assert.assertEquals(AmbariEventType.CLUSTER_RENAME, ambariEvents.get(0).getType());
   }
 
-  /**
-   * Calls {@link Service#persist()} to mock a service install along with
-   * creating a single {@link Host} and {@link ServiceComponentHost}.
-   */
   private void installHdfsService() throws Exception {
     String serviceName = "HDFS";
     Service service = m_serviceFactory.createNew(m_cluster, serviceName);
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/AlertMaintenanceModeListenerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/AlertMaintenanceModeListenerTest.java
index bdc662a..f1d774b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/AlertMaintenanceModeListenerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/AlertMaintenanceModeListenerTest.java
@@ -32,6 +32,7 @@
 import org.apache.ambari.server.orm.dao.AlertsDAO;
 import org.apache.ambari.server.orm.entities.AlertCurrentEntity;
 import org.apache.ambari.server.orm.entities.AlertHistoryEntity;
+import org.apache.ambari.server.state.AlertState;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Host;
@@ -185,10 +186,12 @@
     EasyMock.expect(hostHistory.getHostName()).andReturn(HOSTNAME).atLeastOnce();
     EasyMock.expect(hostHistory.getServiceName()).andReturn(null).atLeastOnce();
     EasyMock.expect(hostHistory.getComponentName()).andReturn(null).atLeastOnce();
+    EasyMock.expect(hostHistory.getAlertState()).andReturn(AlertState.OK).atLeastOnce();
 
     EasyMock.expect(serviceHistory.getHostName()).andReturn(null).atLeastOnce();
     EasyMock.expect(serviceHistory.getServiceName()).andReturn(SERVICE).atLeastOnce();
     EasyMock.expect(serviceHistory.getComponentName()).andReturn(null).atLeastOnce();
+    EasyMock.expect(serviceHistory.getAlertState()).andReturn(AlertState.OK).atLeastOnce();
 
     if (testType.equals("SCH")) {
       EasyMock.expect(componentHistory.getHostName()).andReturn(HOSTNAME).atLeastOnce();
@@ -199,6 +202,7 @@
       EasyMock.expect(componentHistory.getServiceName()).andReturn(null).atLeastOnce();
       EasyMock.expect(componentHistory.getComponentName()).andReturn(COMPONENT).atLeastOnce();
     }
+    EasyMock.expect(componentHistory.getAlertState()).andReturn(AlertState.OK).atLeastOnce();
 
     List<AlertCurrentEntity> currentAlerts = new ArrayList<>();
     currentAlerts.add(hostAlert);
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java
index dc9ce5e..fef9276 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java
@@ -71,6 +71,8 @@
   private final String stackId = "HDP-2.2.0";
   private final String yetAnotherStackId = "HDP-2.1.1";
 
+  private final String CURRENT_VERSION = "2.2.0-2086";
+
   private Injector injector;
 
   @Inject
@@ -122,9 +124,6 @@
    * @throws AmbariException
    */
   private void createClusterAndHosts(String INSTALLED_VERSION, StackId stackId) throws AmbariException {
-    // Configuring 3-node cluster with 2 repo versions
-    String CURRENT_VERSION = "2.2.0-2086";
-
     Host h1 = clusters.getHost("h1");
     h1.setState(HostState.HEALTHY);
 
@@ -133,6 +132,10 @@
     addHost("h3");
     clusters.mapHostToCluster("h3", "c1");
 
+    // create the new repo version
+    RepositoryVersionEntity repositoryVersionEntity = helper.getOrCreateRepositoryVersion(stackId,
+        INSTALLED_VERSION);
+
     c1.createClusterVersion(stackId, INSTALLED_VERSION, "admin", RepositoryVersionState.INSTALLING);
     c1.setCurrentStackVersion(stackId);
     c1.recalculateAllClusterVersionStates();
@@ -151,13 +154,14 @@
     zkTopology.put("ZOOKEEPER_SERVER", new ArrayList<>(zkServerHosts));
     addService(c1, hostList, zkTopology, "ZOOKEEPER");
 
-    // Register and install new version
-    RepositoryVersionEntity repositoryVersionEntity = helper.getOrCreateRepositoryVersion(stackId,
-        INSTALLED_VERSION);
+    // install new version
     helper.createHostVersion("h1", repositoryVersionEntity, RepositoryVersionState.INSTALLED);
+    helper.createHostVersion("h2", repositoryVersionEntity, RepositoryVersionState.INSTALLED);
+    helper.createHostVersion("h3", repositoryVersionEntity, RepositoryVersionState.INSTALLED);
+
     c1.recalculateAllClusterVersionStates();
-    assertRepoVersionState(stackId.getStackId(), INSTALLED_VERSION,
-        RepositoryVersionState.INSTALLED);
+
+    assertRepoVersionState(stackId.getStackId(), INSTALLED_VERSION, RepositoryVersionState.INSTALLED);
     assertRepoVersionState(stackId.getStackId(), CURRENT_VERSION, RepositoryVersionState.CURRENT);
 
     // Add new host and verify that it has all host versions present
@@ -210,9 +214,15 @@
     StackId stackId = new StackId(this.stackId);
     StackId yaStackId = new StackId(yetAnotherStackId);
 
+    // get new hosts installed with the first repo
     createClusterAndHosts(INSTALLED_VERSION, stackId);
+
+    // register the new repo
     addRepoVersion(INSTALLED_VERSION_2, yaStackId);
 
+    assertRepoVersionState(stackId.getStackId(), INSTALLED_VERSION,RepositoryVersionState.INSTALLED);
+    assertRepoVersionState(yaStackId.getStackId(), INSTALLED_VERSION_2,RepositoryVersionState.INSTALLED);
+    assertRepoVersionState(yaStackId.getStackId(), CURRENT_VERSION, RepositoryVersionState.CURRENT);
 
     //Add HDFS service
     List<String> hostList = new ArrayList<>();
@@ -233,10 +243,6 @@
 
     List<HostVersionEntity> hostVersions = hostVersionDAO.findAll();
 
-    assertRepoVersionState(stackId.getStackId(), INSTALLED_VERSION,
-        RepositoryVersionState.INSTALLED);
-    assertRepoVersionState(yaStackId.getStackId(), INSTALLED_VERSION_2,
-        RepositoryVersionState.INSTALLED);
     for (HostVersionEntity hostVersionEntity : hostVersions) {
       if (hostVersionEntity.getRepositoryVersion().getVersion().equals(INSTALLED_VERSION) ||
               hostVersionEntity.getRepositoryVersion().getVersion().equals(INSTALLED_VERSION_2)) {
@@ -247,6 +253,11 @@
         }
       }
     }
+
+
+    assertRepoVersionState(stackId.getStackId(), INSTALLED_VERSION,RepositoryVersionState.OUT_OF_SYNC);
+    assertRepoVersionState(yaStackId.getStackId(), INSTALLED_VERSION_2,RepositoryVersionState.OUT_OF_SYNC);
+    assertRepoVersionState(yaStackId.getStackId(), CURRENT_VERSION, RepositoryVersionState.CURRENT);
   }
 
 
@@ -306,6 +317,9 @@
     createClusterAndHosts(INSTALLED_VERSION, stackId);
     addRepoVersion(INSTALLED_VERSION_2, yaStackId);
 
+    assertRepoVersionState(stackId.getStackId(), INSTALLED_VERSION, RepositoryVersionState.INSTALLED);
+    assertRepoVersionState(stackId.getStackId(), INSTALLED_VERSION_2, RepositoryVersionState.INSTALLED);
+    
     //Add ZOOKEEPER_CLIENT component
     List<String> hostList = new ArrayList<>();
     hostList.add("h1");
@@ -319,8 +333,8 @@
     changedHosts.add("h2");
     changedHosts.add("h3");
 
-    assertRepoVersionState(stackId.getStackId(), INSTALLED_VERSION,
-        RepositoryVersionState.INSTALLED);
+    assertRepoVersionState(stackId.getStackId(), INSTALLED_VERSION,RepositoryVersionState.OUT_OF_SYNC);
+    
     List<HostVersionEntity> hostVersions = hostVersionDAO.findAll();
 
     for (HostVersionEntity hostVersionEntity : hostVersions) {
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/hooks/users/UserHookServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/hooks/users/UserHookServiceTest.java
index e8eb3e8..56dd1e2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/hooks/users/UserHookServiceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/hooks/users/UserHookServiceTest.java
@@ -217,8 +217,8 @@
     // TBD refine expectations to validate the logic / eg capture arguments
     stageMock.addServerActionCommand(EasyMock.anyString(), EasyMock.anyString(), EasyMock.anyObject(Role.class), EasyMock.anyObject(RoleCommand.class), EasyMock.anyString(), EasyMock.anyObject(ServiceComponentHostServerActionEvent.class),
         EasyMock.<Map<String, String>>anyObject(), EasyMock.anyString(), EasyMock.<Map<String, Map<String, String>>>anyObject(), EasyMock.anyInt(), EasyMock.anyBoolean(), EasyMock.anyBoolean());
-    EasyMock.expect(requestFactoryMock.createNewFromStages(Arrays.asList(stageMock))).andReturn(null);
-    EasyMock.expect(stageFactoryMock.createNew(1, "/var/lib/ambari-server/tmp:1", "test-cluster", 1, "Post user creation hook for [ 1 ] users", "{}", "{}", "{}")).andReturn(stageMock);
+    EasyMock.expect(requestFactoryMock.createNewFromStages(Arrays.asList(stageMock), "{}")).andReturn(null);
+    EasyMock.expect(stageFactoryMock.createNew(1, "/var/lib/ambari-server/tmp:1", "test-cluster", 1, "Post user creation hook for [ 1 ] users", "{}", "{}")).andReturn(stageMock);
 
 
     replayAll();
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/metadata/AgentAlertDefinitionsTest.java b/ambari-server/src/test/java/org/apache/ambari/server/metadata/AgentAlertDefinitionsTest.java
index 7378b8c..e893503f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/metadata/AgentAlertDefinitionsTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/metadata/AgentAlertDefinitionsTest.java
@@ -77,7 +77,7 @@
   public void testLoadingServertAlerts() {
     AmbariServiceAlertDefinitions ambariServiceAlertDefinitions = m_injector.getInstance(AmbariServiceAlertDefinitions.class);
     List<AlertDefinition> definitions = ambariServiceAlertDefinitions.getServerDefinitions();
-    Assert.assertEquals(3, definitions.size());
+    Assert.assertEquals(4, definitions.size());
 
     for (AlertDefinition definition : definitions) {
       Assert.assertEquals(Components.AMBARI_SERVER.name(),
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/metric/system/impl/TestAmbariMetricsSinkImpl.java b/ambari-server/src/test/java/org/apache/ambari/server/metric/system/impl/TestAmbariMetricsSinkImpl.java
index 4a60892..3ee8ebc 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/metric/system/impl/TestAmbariMetricsSinkImpl.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/metric/system/impl/TestAmbariMetricsSinkImpl.java
@@ -74,6 +74,16 @@
   }
 
   @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return true;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return 61888;
+  }
+
+  @Override
   public void init(MetricsConfiguration configuration) {
 
   }
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
index 8e50b5f..574ffa4 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
@@ -75,6 +75,7 @@
 import org.apache.ambari.server.orm.entities.StageEntity;
 import org.apache.ambari.server.orm.entities.UserEntity;
 import org.apache.ambari.server.security.authorization.ResourceType;
+import org.apache.ambari.server.security.authorization.UserName;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Host;
@@ -227,7 +228,7 @@
     PasswordEncoder encoder = injector.getInstance(PasswordEncoder.class);
 
     UserEntity admin = new UserEntity();
-    admin.setUserName("administrator");
+    admin.setUserName(UserName.fromString("administrator"));
     admin.setUserPassword(encoder.encode("admin"));
     admin.setPrincipal(principalEntity);
 
@@ -242,7 +243,7 @@
     getEntityManager().persist(principalEntity);
 
     UserEntity userWithoutRoles = new UserEntity();
-    userWithoutRoles.setUserName("userWithoutRoles");
+    userWithoutRoles.setUserName(UserName.fromString("userWithoutRoles"));
     userWithoutRoles.setUserPassword(encoder.encode("test"));
     userWithoutRoles.setPrincipal(principalEntity);
     userDAO.create(userWithoutRoles);
@@ -431,10 +432,6 @@
     serviceComponentHost.setDesiredState(State.INSTALLED);
   }
 
-  /**
-   * Calls {@link Service#persist()} to mock a service install along with
-   * creating a single {@link Host} and {@link ServiceComponentHost}.
-   */
   public void installHdfsService(Cluster cluster,
       ServiceFactory serviceFactory, ServiceComponentFactory componentFactory,
       ServiceComponentHostFactory schFactory, String hostName) throws Exception {
@@ -469,10 +466,6 @@
     sch.setStackVersion(new StackId("HDP-2.0.6"));
   }
 
-  /**
-   * Calls {@link Service#persist()} to mock a service install along with
-   * creating a single {@link Host} and {@link ServiceComponentHost}.
-   */
   public void installYarnService(Cluster cluster,
       ServiceFactory serviceFactory, ServiceComponentFactory componentFactory,
       ServiceComponentHostFactory schFactory, String hostName) throws Exception {
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAOTest.java
index d5fa793..f6f3269 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAOTest.java
@@ -175,8 +175,6 @@
    * @param hostEntity
    * @param requestEntity
    * @param status
-   * @param skipStage
-   * @param supportsAutoSkipOnFailure
    * @return
    */
   private void createStage(long startStageId, int count, HostEntity hostEntity,
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/UserDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/UserDAOTest.java
index bb0b0cf..800bef1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/UserDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/UserDAOTest.java
@@ -34,6 +34,7 @@
 
 import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.orm.entities.UserEntity;
+import org.apache.ambari.server.security.authorization.UserName;
 import org.apache.ambari.server.security.authorization.UserType;
 import org.junit.Test;
 
@@ -114,7 +115,7 @@
 
   private static final UserEntity user(String name, UserType type) {
     UserEntity userEntity = new UserEntity();
-    userEntity.setUserName(name);
+    userEntity.setUserName(UserName.fromString(name));
     userEntity.setUserType(type);
     return userEntity;
   }
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/entities/HostRoleCommandEntityTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/entities/HostRoleCommandEntityTest.java
new file mode 100644
index 0000000..e37b582
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/entities/HostRoleCommandEntityTest.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.orm.entities;
+
+import static org.junit.Assert.assertEquals;
+
+import org.junit.Test;
+
+/**
+ * HostRoleCommandEntity unit tests.
+ */
+public class HostRoleCommandEntityTest {
+  @Test
+  public void testSetCustomCommandName() {
+    HostRoleCommandEntity entity = new HostRoleCommandEntity();
+    entity.setCustomCommandName("foo");
+    assertEquals("foo", entity.getCustomCommandName());
+  }
+
+  @Test
+  public void testSetCommandDetail() {
+    HostRoleCommandEntity entity = new HostRoleCommandEntity();
+    entity.setCommandDetail("foo");
+    assertEquals("foo", entity.getCommandDetail());
+  }
+
+  @Test
+  public void testSetOpsDisplayName() {
+    HostRoleCommandEntity entity = new HostRoleCommandEntity();
+    entity.setOpsDisplayName("foo");
+    assertEquals("foo", entity.getOpsDisplayName());
+  }
+}
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/security/SecurityHelperImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/security/SecurityHelperImplTest.java
index d69d49a..43d2ed2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/security/SecurityHelperImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/security/SecurityHelperImplTest.java
@@ -24,6 +24,7 @@
 import org.apache.ambari.server.orm.entities.UserEntity;
 import org.apache.ambari.server.security.authorization.AmbariUserAuthentication;
 import org.apache.ambari.server.security.authorization.User;
+import org.apache.ambari.server.security.authorization.UserName;
 import org.junit.Assert;
 import org.junit.Test;
 import org.springframework.security.authentication.UsernamePasswordAuthenticationToken;
@@ -43,7 +44,7 @@
     SecurityContext ctx = SecurityContextHolder.getContext();
     UserEntity userEntity = new UserEntity();
     userEntity.setPrincipal(new PrincipalEntity());
-    userEntity.setUserName("userName");
+    userEntity.setUserName(UserName.fromString("userName"));
     userEntity.setUserId(1);
     User user = new User(userEntity);
     Authentication auth = new AmbariUserAuthentication(null, user, null);
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariAuthorizationProviderDisableUserTest.java b/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariAuthorizationProviderDisableUserTest.java
index 6b98a5b..891ab38 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariAuthorizationProviderDisableUserTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariAuthorizationProviderDisableUserTest.java
@@ -90,7 +90,7 @@
     UserEntity activeUser = new UserEntity();
     activeUser.setUserId(1);
     activeUser.setActive(isActive);
-    activeUser.setUserName(login);
+    activeUser.setUserName(UserName.fromString(login));
     activeUser.setUserPassword(encoder.encode("pwd"));
     activeUser.setPrincipal(principalEntity);
     Mockito.when(userDAO.findLocalUserByName(login)).thenReturn(activeUser);
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariLocalUserProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariLocalUserProviderTest.java
index 9ff381f..f43ca90 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariLocalUserProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariLocalUserProviderTest.java
@@ -155,7 +155,7 @@
     PrincipalEntity principalEntity = new PrincipalEntity();
     UserEntity userEntity = new UserEntity();
     userEntity.setUserId(1);
-    userEntity.setUserName(TEST_USER_NAME);
+    userEntity.setUserName(UserName.fromString(TEST_USER_NAME));
     userEntity.setUserPassword(passwordEncoder.encode(TEST_USER_PASS));
     userEntity.setUserType(UserType.LOCAL);
     userEntity.setPrincipal(principalEntity);
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariPamAuthenticationProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariPamAuthenticationProviderTest.java
index 5b3acd0..c623000 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariPamAuthenticationProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariPamAuthenticationProviderTest.java
@@ -17,17 +17,21 @@
  */
 package org.apache.ambari.server.security.authorization;
 
-import static junit.framework.Assert.assertEquals;
 import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.expect;
 
 import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.HashSet;
 
 import org.apache.ambari.server.H2DatabaseCleaner;
 import org.apache.ambari.server.audit.AuditLoggerModule;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
+import org.apache.ambari.server.orm.dao.UserDAO;
+import org.apache.ambari.server.orm.entities.PrincipalEntity;
+import org.apache.ambari.server.orm.entities.UserEntity;
 import org.apache.ambari.server.security.ClientSecurityType;
 import org.easymock.EasyMock;
 import org.junit.After;
@@ -35,9 +39,9 @@
 import org.junit.Test;
 import org.jvnet.libpam.PAM;
 import org.jvnet.libpam.UnixUser;
-import org.springframework.security.authentication.UsernamePasswordAuthenticationToken;
 import org.springframework.security.core.Authentication;
 import org.springframework.security.core.AuthenticationException;
+import org.springframework.security.crypto.password.PasswordEncoder;
 
 import com.google.inject.Guice;
 import com.google.inject.Inject;
@@ -50,10 +54,16 @@
   private static Injector injector;
 
   @Inject
+  PasswordEncoder passwordEncoder;
+  @Inject
   private AmbariPamAuthenticationProvider authenticationProvider;
   @Inject
   Configuration configuration;
 
+  private static final String TEST_USER_NAME = "userName";
+  private static final String TEST_USER_PASS = "userPass";
+  private static final String TEST_USER_INCORRECT_PASS = "userIncorrectPass";
+
   @Before
   public void setUp() {
     injector = Guice.createInjector(new AuditLoggerModule(), new AuthorizationTestModule());
@@ -70,7 +80,10 @@
 
   @Test(expected = AuthenticationException.class)
   public void testBadCredential() throws Exception {
-    Authentication authentication = new UsernamePasswordAuthenticationToken("notFound", "wrong");
+    UserEntity userEntity = combineUserEntity();
+    User user = new User(userEntity);
+    Collection<AmbariGrantedAuthority> userAuthorities = Collections.singletonList(createNiceMock(AmbariGrantedAuthority.class));
+    Authentication authentication = new AmbariUserAuthentication("wrong", user, userAuthorities);
     authenticationProvider.authenticate(authentication);
   }
 
@@ -78,20 +91,42 @@
   public void testAuthenticate() throws Exception {
     PAM pam = createNiceMock(PAM.class);
     UnixUser unixUser = createNiceMock(UnixUser.class);
+    UserEntity userEntity = combineUserEntity();
+    User user = new User(userEntity);
+    UserDAO userDAO = createNiceMock(UserDAO.class);
+    Collection<AmbariGrantedAuthority> userAuthorities = Collections.singletonList(createNiceMock(AmbariGrantedAuthority.class));
     expect(pam.authenticate(EasyMock.anyObject(String.class), EasyMock.anyObject(String.class))).andReturn(unixUser).atLeastOnce();
     expect(unixUser.getGroups()).andReturn(new HashSet<>(Arrays.asList("group"))).atLeastOnce();
     EasyMock.replay(unixUser);
     EasyMock.replay(pam);
-    Authentication authentication = new UsernamePasswordAuthenticationToken("allowedUser", "password");
+    Authentication authentication = new AmbariUserAuthentication("userPass", user, userAuthorities);
     Authentication result = authenticationProvider.authenticateViaPam(pam,authentication);
-    assertEquals("allowedUser", result.getName());
+    expect(userDAO.findUserByName("userName")).andReturn(null).once();
+    Assert.assertNotNull(result);
+    Assert.assertEquals(true, result.isAuthenticated());
+    Assert.assertTrue(result instanceof AmbariUserAuthentication);
   }
 
   @Test
   public void testDisabled() throws Exception {
+    UserEntity userEntity = combineUserEntity();
+    User user = new User(userEntity);
+    Collection<AmbariGrantedAuthority> userAuthorities = Collections.singletonList(createNiceMock(AmbariGrantedAuthority.class));
     configuration.setClientSecurityType(ClientSecurityType.LOCAL);
-    Authentication authentication = new UsernamePasswordAuthenticationToken("allowedUser", "password");
+    Authentication authentication = new AmbariUserAuthentication("userPass", user, userAuthorities);
     Authentication auth = authenticationProvider.authenticate(authentication);
     Assert.assertTrue(auth == null);
   }
+
+  private UserEntity combineUserEntity() {
+    PrincipalEntity principalEntity = new PrincipalEntity();
+    UserEntity userEntity = new UserEntity();
+    userEntity.setUserId(1);
+    userEntity.setUserName(UserName.fromString(TEST_USER_NAME));
+    userEntity.setUserPassword(passwordEncoder.encode(TEST_USER_PASS));
+    userEntity.setUserType(UserType.PAM);
+    userEntity.setPrincipal(principalEntity);
+    return userEntity;
+  }
+
 }
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariUserAuthenticationFilterTest.java b/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariUserAuthenticationFilterTest.java
index 6541a59..629be46 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariUserAuthenticationFilterTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariUserAuthenticationFilterTest.java
@@ -208,7 +208,7 @@
     PrincipalEntity principalEntity = new PrincipalEntity();
     UserEntity userEntity = new UserEntity();
     userEntity.setUserId(TEST_USER_ID);
-    userEntity.setUserName(TEST_USER_NAME);
+    userEntity.setUserName(UserName.fromString(TEST_USER_NAME));
     userEntity.setUserType(UserType.LOCAL);
     userEntity.setPrincipal(principalEntity);
     User user = new User(userEntity);
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/UserNameTest.java b/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/UserNameTest.java
new file mode 100644
index 0000000..578acdb
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/UserNameTest.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.security.authorization;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.util.Arrays;
+import java.util.Collection;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+@RunWith(value = Parameterized.class)
+public class UserNameTest {
+  private final String name;
+  private final boolean valid;
+
+  @Parameterized.Parameters
+  public static Collection userNames() {
+    return Arrays.asList(new Object[][] {
+      {"", false},
+      {null, false},
+      {"invalid<", false},
+      {">invalid", false},
+      {"inv&lid", false},
+      {"i\\nvalid", false},
+      {"inva`lid", false},
+      {"inval|d", false},
+      {"user01", true},
+      {"user_name", true},
+    });
+  }
+
+  public UserNameTest(String name, boolean valid) {
+    this.name = name;
+    this.valid = valid;
+  }
+
+  @Test
+  public void testRejectsForbiddenCharacters() throws Exception {
+    try {
+      assertEquals(name, UserName.fromString(name).toString());
+      if (!valid) {
+        fail("Expected user " + name + " to be invalid.");
+      }
+    } catch (IllegalArgumentException e) {
+      if (valid) {
+        fail("Expected user " + name + " to be valid. But was: " + e.getMessage());
+      }
+    }
+  }
+}
\ No newline at end of file
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/UsersTest.java b/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/UsersTest.java
index f426c85..ac91c90 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/UsersTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/UsersTest.java
@@ -161,7 +161,7 @@
   @Test(expected = AmbariException.class)
   public void testCreateUser_Duplicate() throws Exception {
     UserEntity existing = new UserEntity();
-    existing.setUserName(SERVICEOP_USER_NAME);
+    existing.setUserName(UserName.fromString(SERVICEOP_USER_NAME));
     existing.setUserType(UserType.LDAP);
     existing.setUserId(1);
     existing.setMemberEntities(Collections.<MemberEntity>emptySet());
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulatorTest.java
index e5e36f3..d4213a6 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulatorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulatorTest.java
@@ -59,6 +59,7 @@
 import org.apache.ambari.server.security.authorization.GroupType;
 import org.apache.ambari.server.security.authorization.LdapServerProperties;
 import org.apache.ambari.server.security.authorization.User;
+import org.apache.ambari.server.security.authorization.UserName;
 import org.apache.ambari.server.security.authorization.Users;
 import org.easymock.Capture;
 import org.easymock.EasyMock;
@@ -1970,7 +1971,7 @@
   private User createUser(String name, boolean ldapUser, GroupEntity group) {
     final UserEntity userEntity = new UserEntity();
     userEntity.setUserId(userIdCounter++);
-    userEntity.setUserName(name);
+    userEntity.setUserName(UserName.fromString(name));
     userEntity.setCreateTime(new Date());
     userEntity.setLdapUser(ldapUser);
     userEntity.setActive(true);
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/ServerActionExecutorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/ServerActionExecutorTest.java
index 2feef41..4d1b48b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/ServerActionExecutorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/ServerActionExecutorTest.java
@@ -127,7 +127,7 @@
     final Request request = createMockRequest();
     stageFactory = createNiceMock(StageFactory.class);
 
-    final Stage stage = stageFactory.createNew(1, "/tmp", "cluster1", 978, "context", CLUSTER_HOST_INFO,
+    final Stage stage = stageFactory.createNew(1, "/tmp", "cluster1", 978, "context",
         "{\"host_param\":\"param_value\"}", "{\"stage_param\":\"param_value\"}");
 
     stage.addServerActionCommand(ManualStageAction.class.getName(),
@@ -318,14 +318,13 @@
                                                 final int timeout) {
     stageFactory = createNiceMock(StageFactory.class);
     expect(stageFactory.createNew(anyLong(), anyObject(String.class), anyObject(String.class),
-        anyLong(), anyObject(String.class), anyObject(String.class),
-        anyObject(String.class), anyObject(String.class))).
+        anyLong(), anyObject(String.class), anyObject(String.class), anyObject(String.class))).
         andAnswer(new IAnswer<Stage>() {
 
           @Override
           public Stage answer() throws Throwable {
             Stage stage = stageFactory.createNew(requestId, "/tmp", "cluster1",
-                1L, requestContext, CLUSTER_HOST_INFO, "{}", "{}");
+                1L, requestContext, "{}", "{}");
 
             stage.setStageId(stageId);
             stage.addServerActionCommand(MockServerAction.class.getName(), null,
@@ -338,7 +337,7 @@
           }
         });
 
-    Stage stage = stageFactory.createNew(requestId, "", "", 1L, "", "", "", "");
+    Stage stage = stageFactory.createNew(requestId, "", "", 1L, "", "", "");
     return stage;
   }
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
index e02a1be..73ab5e6 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
@@ -292,7 +292,6 @@
     assertNotNull(report);
     assertEquals(HostRoleStatus.COMPLETED.name(), report.getStatus());
     assertEquals(0, report.getExitCode());
-
   }
 
   @Test
@@ -372,6 +371,74 @@
     assertEquals(-1, report.getExitCode());
   }
 
+  @Test
+  public void testMatchingPartialVersions() throws Exception {
+    StackId sourceStack = HDP_21_STACK;
+    StackId targetStack = HDP_21_STACK;
+    String sourceRepo = HDP_2_1_1_0;
+    String targetRepo = HDP_2_1_1_1;
+
+    makeUpgradeCluster(sourceStack, sourceRepo, targetStack, targetRepo);
+
+    Clusters clusters = m_injector.getInstance(Clusters.class);
+
+    Host host = clusters.getHost("h1");
+    Assert.assertNotNull(host);
+    host.setOsInfo("redhat6");
+
+    Cluster cluster = clusters.getCluster("c1");
+    clusters.mapHostToCluster("h1", "c1");
+
+    Service service = installService(cluster, "HDFS");
+    ServiceComponent sc = addServiceComponent(cluster, service, "NAMENODE");
+    sc.setDesiredVersion(HDP_2_1_1_0);
+
+    sc = addServiceComponent(cluster, service, "DATANODE");
+    sc.setDesiredVersion(HDP_2_1_1_0);
+
+    ServiceComponentHost sch = createNewServiceComponentHost(cluster, "HDFS", "NAMENODE", "h1");
+    sch.setVersion(HDP_2_1_1_0);
+    sch = createNewServiceComponentHost(cluster, "HDFS", "DATANODE", "h1");
+    sch.setVersion(HDP_2_1_1_0);
+
+    service = installService(cluster, "ZOOKEEPER");
+    sc = addServiceComponent(cluster, service, "ZOOKEEPER_SERVER");
+    sc.setDesiredVersion(HDP_2_1_1_1);
+
+    sch = createNewServiceComponentHost(cluster, "ZOOKEEPER", "ZOOKEEPER_SERVER", "h1");
+    sch.setVersion(HDP_2_1_1_1);
+
+    // Verify the repo before calling Finalize
+    AmbariMetaInfo metaInfo = m_injector.getInstance(AmbariMetaInfo.class);
+
+    RepositoryInfo repo = metaInfo.getRepository(sourceStack.getStackName(), sourceStack.getStackVersion(), "redhat6", sourceStack.getStackId());
+    assertEquals(HDP_211_CENTOS6_REPO_URL, repo.getBaseUrl());
+
+    // Finalize the upgrade
+    Map<String, String> commandParams = new HashMap<String, String>();
+    commandParams.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "upgrade");
+    commandParams.put(FinalizeUpgradeAction.VERSION_KEY, targetRepo);
+    commandParams.put(FinalizeUpgradeAction.SUPPORTED_SERVICES_KEY, "ZOOKEEPER");
+    commandParams.put(FinalizeUpgradeAction.TARGET_STACK_KEY, "HDP-2.1.1");
+
+    ExecutionCommand executionCommand = new ExecutionCommand();
+    executionCommand.setCommandParams(commandParams);
+    executionCommand.setClusterName("c1");
+
+    HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
+    hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
+
+    ComponentVersionCheckAction action = m_injector.getInstance(ComponentVersionCheckAction.class);
+    action.setExecutionCommand(executionCommand);
+    action.setHostRoleCommand(hostRoleCommand);
+
+    CommandReport report = action.execute(null);
+    assertNotNull(report);
+    assertEquals(HostRoleStatus.COMPLETED.name(), report.getStatus());
+    assertEquals(0, report.getExitCode());
+
+  }
+
   private ServiceComponentHost createNewServiceComponentHost(Cluster cluster, String svc,
                                                              String svcComponent, String hostName) throws AmbariException {
     Assert.assertNotNull(cluster.getConfigGroups());
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/QuickLinksConfigurationModuleTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/QuickLinksConfigurationModuleTest.java
index f6b7dfa..142efa3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/QuickLinksConfigurationModuleTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/QuickLinksConfigurationModuleTest.java
@@ -27,6 +27,7 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.state.quicklinks.Check;
@@ -37,11 +38,20 @@
 import org.apache.ambari.server.state.quicklinks.QuickLinksConfiguration;
 import org.junit.Test;
 
+import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Lists;
 
 public class QuickLinksConfigurationModuleTest {
 
   @Test
+  public void testAddErrors() {
+    Set<String> errors = ImmutableSet.of("one error", "two errors");
+    QuickLinksConfigurationModule module = new QuickLinksConfigurationModule((File) null);
+    module.addErrors(errors);
+    assertEquals(errors, ImmutableSet.copyOf(module.getErrors()));
+  }
+
+  @Test
   public void testResolveInherit() throws Exception{
     QuickLinks[] results = resolveQuickLinks("parent_quicklinks.json", "child_quicklinks_to_inherit.json");
     QuickLinks parentQuickLinks = results[0];
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerCommonServicesTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerCommonServicesTest.java
index 4e20c2b..1fd0921 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerCommonServicesTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerCommonServicesTest.java
@@ -35,6 +35,7 @@
 import java.util.Map;
 
 import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.AmbariManagementHelper;
 import org.apache.ambari.server.metadata.ActionMetadata;
 import org.apache.ambari.server.orm.dao.ExtensionDAO;
 import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
@@ -123,10 +124,11 @@
     osFamily = new OsFamily(config);
 
     replay(metaInfoDao, actionMetadata);
+    AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
 
     StackManager stackManager = new StackManager(new File(stackRoot), new File(
         commonServicesRoot), new File(extensionRoot), osFamily, true, metaInfoDao,
-        actionMetadata, stackDao, extensionDao, linkDao);
+        actionMetadata, stackDao, extensionDao, linkDao, helper);
 
     EasyMock.verify( config, stackDao );
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
index 9900eb5..33717cb 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
@@ -30,6 +30,7 @@
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.ambari.server.controller.AmbariManagementHelper;
 import org.apache.ambari.server.metadata.ActionMetadata;
 import org.apache.ambari.server.orm.dao.ExtensionDAO;
 import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
@@ -59,42 +60,57 @@
     ExtensionLinkDAO linkDao = createNiceMock(ExtensionLinkDAO.class);
     ActionMetadata actionMetadata = createNiceMock(ActionMetadata.class);
     OsFamily osFamily = createNiceMock(OsFamily.class);
-    StackEntity stackEntity = createNiceMock(StackEntity.class);
-    ExtensionEntity extensionEntity = createNiceMock(ExtensionEntity.class);
-    ExtensionLinkEntity linkEntity = createNiceMock(ExtensionLinkEntity.class);
+    StackEntity stack1 = new StackEntity();
+    stack1.setStackName("HDP");
+    stack1.setStackVersion("0.1");
+    StackEntity stack2 = new StackEntity();
+    stack2.setStackName("HDP");
+    stack2.setStackVersion("0.2");
+    StackEntity stack3 = new StackEntity();
+    stack3.setStackName("HDP");
+    stack3.setStackVersion("0.3");
+    ExtensionEntity extension1 = new ExtensionEntity();
+    extension1.setExtensionName("EXT");
+    extension1.setExtensionVersion("0.1");
+    ExtensionEntity extension2 = new ExtensionEntity();
+    extension2.setExtensionName("EXT");
+    extension2.setExtensionVersion("0.2");
+    ExtensionEntity extension3 = new ExtensionEntity();
+    extension3.setExtensionName("EXT");
+    extension3.setExtensionVersion("0.3");
     List<ExtensionLinkEntity> list = new ArrayList<>();
-    list.add(linkEntity);
 
-    expect(
-        stackDao.find(EasyMock.anyObject(String.class),
-            EasyMock.anyObject(String.class))).andReturn(stackEntity).atLeastOnce();
+    expect(stackDao.find("HDP", "0.1")).andReturn(stack1).atLeastOnce();
+    expect(stackDao.find("HDP", "0.2")).andReturn(stack2).atLeastOnce();
+    expect(stackDao.find("HDP", "0.3")).andReturn(stack3).atLeastOnce();
+    expect(extensionDao.find("EXT", "0.1")).andReturn(extension1).atLeastOnce();
+    expect(extensionDao.find("EXT", "0.2")).andReturn(extension2).atLeastOnce();
+    expect(extensionDao.find("EXT", "0.3")).andReturn(extension3).atLeastOnce();
 
-    expect(
-        extensionDao.find(EasyMock.anyObject(String.class),
-            EasyMock.anyObject(String.class))).andReturn(extensionEntity).atLeastOnce();
-
-    expect(
-        linkDao.findByStack(EasyMock.anyObject(String.class),
+    expect(linkDao.findByStack(EasyMock.anyObject(String.class),
             EasyMock.anyObject(String.class))).andReturn(list).atLeastOnce();
 
-    expect(
-        linkEntity.getExtension()).andReturn(extensionEntity).atLeastOnce();
+    expect(linkDao.findByStackAndExtension("HDP", "0.2", "EXT", "0.2")).andReturn(null).atLeastOnce();
 
-    expect(
-        extensionEntity.getExtensionName()).andReturn("EXT").atLeastOnce();
-
-    expect(
-        extensionEntity.getExtensionVersion()).andReturn("0.2").atLeastOnce();
-
-    replay(actionMetadata, stackDao, metaInfoDao, osFamily, extensionDao, linkDao, extensionEntity, linkEntity);
+    replay(actionMetadata, stackDao, metaInfoDao, osFamily, extensionDao, linkDao); //linkEntity
 
     String stacks = ClassLoader.getSystemClassLoader().getResource("stacks_with_extensions").getPath();
     String common = ClassLoader.getSystemClassLoader().getResource("common-services").getPath();
     String extensions = ClassLoader.getSystemClassLoader().getResource("extensions").getPath();
 
-    StackManager stackManager = new StackManager(new File(stacks),
+    AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
+
+    StackManager stackManager = null;
+    try {
+      stackManager = new StackManager(new File(stacks),
         new File(common), new File(extensions), osFamily, false,
-        metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
+        metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao, helper);
+    }
+    catch (Exception e) {
+      e.printStackTrace();
+    }
+
+    assertNotNull("Failed to create Stack Manager", stackManager);
 
     ExtensionInfo extension = stackManager.getExtension("EXT", "0.1");
     assertNull("EXT 0.1's parent: " + extension.getParentExtensionVersion(), extension.getParentExtensionVersion());
@@ -116,6 +132,7 @@
     assertNotNull("EXT 0.2's parent: " + extension.getParentExtensionVersion(), extension.getParentExtensionVersion());
     assertEquals("EXT 0.2's parent: " + extension.getParentExtensionVersion(), "0.1", extension.getParentExtensionVersion());
     assertNotNull(extension.getService("OOZIE2"));
+    assertTrue("Extension is not set to auto link", extension.isAutoLink());
     oozie = extension.getService("OOZIE2");
     assertNotNull("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder());
     assertTrue("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder().contains("extensions/EXT/0.1/services/OOZIE2/package"));
@@ -137,7 +154,13 @@
     assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 1);
     extension = stack.getExtensions().iterator().next();
     assertEquals("Extension name: " + extension.getName(), extension.getName(), "EXT");
-    assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.2");
+    assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.3");
+
+    stack = stackManager.getStack("HDP", "0.3");
+    assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 1);
+    extension = stack.getExtensions().iterator().next();
+    assertEquals("Extension name: " + extension.getName(), extension.getName(), "EXT");
+    assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.3");
   }
 
 }
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMiscTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMiscTest.java
index ca24cd9..6df46c3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMiscTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMiscTest.java
@@ -32,6 +32,7 @@
 import java.util.List;
 
 import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.AmbariManagementHelper;
 import org.apache.ambari.server.metadata.ActionMetadata;
 import org.apache.ambari.server.orm.dao.ExtensionDAO;
 import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
@@ -69,12 +70,13 @@
             EasyMock.anyObject(String.class))).andReturn(list).atLeastOnce();
 
     replay(actionMetadata, stackDao, extensionDao, linkDao, metaInfoDao, osFamily);
+    AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
 
     try {
       String stacksCycle1 = ClassLoader.getSystemClassLoader().getResource("stacks_with_cycle").getPath();
 
       StackManager stackManager = new StackManager(new File(stacksCycle1), null, null, osFamily, false,
-          metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
+          metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao, helper);
 
       fail("Expected exception due to cyclic stack");
     } catch (AmbariException e) {
@@ -86,7 +88,7 @@
           "stacks_with_cycle2").getPath();
 
       StackManager stackManager = new StackManager(new File(stacksCycle2),
-          null, null, osFamily, true, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
+          null, null, osFamily, true, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao, helper);
 
       fail("Expected exception due to cyclic stack");
     } catch (AmbariException e) {
@@ -124,10 +126,11 @@
     replay(metaInfoDao, stackDao, extensionDao, linkDao, actionMetadata, osFamily);
 
     String singleStack = ClassLoader.getSystemClassLoader().getResource("single_stack").getPath();
+    AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
 
     StackManager stackManager = new StackManager(new File(singleStack.replace(
         StackManager.PATH_DELIMITER, File.separator)), null, null, osFamily, false, metaInfoDao,
-        actionMetadata, stackDao, extensionDao, linkDao);
+        actionMetadata, stackDao, extensionDao, linkDao, helper);
 
     Collection<StackInfo> stacks = stackManager.getStacks();
     assertEquals(1, stacks.size());
@@ -161,11 +164,13 @@
 
     replay(metaInfoDao, stackDao, extensionDao, linkDao, actionMetadata, osFamily);
 
+    AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
+
     try {
       String upgradeCycle = ClassLoader.getSystemClassLoader().getResource("stacks_with_upgrade_cycle").getPath();
 
       StackManager stackManager = new StackManager(new File(upgradeCycle), null, null, osFamily, false,
-          metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
+          metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao, helper);
 
       fail("Expected exception due to cyclic service upgrade xml");
     } catch (AmbariException e) {
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMock.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMock.java
index 0fe1573..4a26020 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMock.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMock.java
@@ -27,6 +27,7 @@
 import javax.annotation.Nullable;
 
 import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.AmbariManagementHelper;
 import org.apache.ambari.server.metadata.ActionMetadata;
 import org.apache.ambari.server.orm.dao.ExtensionDAO;
 import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
@@ -134,8 +135,8 @@
       File commonServicesRoot, @Assisted("extensionRoot") @Nullable File extensionRoot,
                           @Assisted OsFamily osFamily, @Assisted boolean validate, MetainfoDAO metaInfoDAO,
                           ActionMetadata actionMetadata, StackDAO stackDao, ExtensionDAO extensionDao,
-                          ExtensionLinkDAO linkDao) throws AmbariException {
-    super(stackRoot, commonServicesRoot, extensionRoot, osFamily, validate, metaInfoDAO, actionMetadata, stackDao, extensionDao, linkDao);
+                          ExtensionLinkDAO linkDao, AmbariManagementHelper helper) throws AmbariException {
+    super(stackRoot, commonServicesRoot, extensionRoot, osFamily, validate, metaInfoDAO, actionMetadata, stackDao, extensionDao, linkDao, helper);
     currentStackRoot = stackRoot;
     currentCommonServicesRoot = commonServicesRoot;
     currentExtensionRoot = extensionRoot;
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
index 507c560..5149026 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
@@ -44,6 +44,7 @@
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.AmbariManagementHelper;
 import org.apache.ambari.server.metadata.ActionMetadata;
 import org.apache.ambari.server.orm.dao.ExtensionDAO;
 import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
@@ -58,6 +59,7 @@
 import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.ServiceOsSpecific;
 import org.apache.ambari.server.state.StackInfo;
+import org.apache.ambari.server.state.stack.ConfigUpgradePack;
 import org.apache.ambari.server.state.stack.MetricDefinition;
 import org.apache.ambari.server.state.stack.OsFamily;
 import org.apache.ambari.server.state.stack.UpgradePack;
@@ -118,9 +120,10 @@
     replay(config, metaInfoDao, stackDao, extensionDao, linkDao, actionMetadata);
 
     osFamily = new OsFamily(config);
+    AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
 
     StackManager stackManager = new StackManager(new File(stackRoot), null, null, osFamily, false,
-        metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
+        metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao, helper);
 
     verify(config, metaInfoDao, stackDao, actionMetadata);
 
@@ -728,7 +731,7 @@
   }
 
   /**
-   * Tests that {@link UpgradePack} instances are correctly initialized
+   * Tests that {@link UpgradePack} and {@link ConfigUpgradePack} instances are correctly initialized
    * post-unmarshalling.
    *
    * @throws Exception
@@ -745,6 +748,9 @@
       // reference equality (make sure it's the same list)
       assertTrue(upgradePack.getTasks() == upgradePack.getTasks());
     }
+    ConfigUpgradePack configUpgradePack = stack.getConfigUpgradePack();
+    assertNotNull(configUpgradePack);
+    assertNotNull(configUpgradePack.services);
   }
 
   @Test
@@ -790,9 +796,10 @@
     replay(config, metaInfoDao, stackDao, extensionDao, linkDao, actionMetadata);
 
     OsFamily osFamily = new OsFamily(config);
+    AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
 
     StackManager stackManager = new StackManager(stackRoot, commonServices, extensions,
-            osFamily, false, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
+            osFamily, false, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao, helper);
 
     for (StackInfo stackInfo : stackManager.getStacks()) {
       for (ServiceInfo serviceInfo : stackInfo.getServices()) {
@@ -855,9 +862,10 @@
     replay(config, metaInfoDao, stackDao, extensionDao, linkDao, actionMetadata);
 
     OsFamily osFamily = new OsFamily(config);
+    AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
 
     StackManager stackManager = new StackManager(stackRoot, commonServices, extensions, osFamily,
-        false, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
+        false, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao, helper);
 
     String rangerUserSyncRoleCommand = Role.RANGER_USERSYNC + "-" + RoleCommand.START;
     String rangerAdminRoleCommand = Role.RANGER_ADMIN + "-" + RoleCommand.START;
@@ -933,12 +941,12 @@
     ArrayList<String> rangerKmsBlockers = (ArrayList<String>)generalDeps.get(kmsRoleCommand);
 
     assertTrue(kmsRoleCommand + " should be dependent of " + rangerAdminRoleCommand, rangerKmsBlockers.contains(rangerAdminRoleCommand));
+    assertTrue(kmsRoleCommand + " should be dependent of " + nameNodeRoleCommand, rangerKmsBlockers.contains(nameNodeRoleCommand));
 
     // Ranger User Sync
     ArrayList<String> rangerUserSyncBlockers = (ArrayList<String>)generalDeps.get(rangerUserSyncRoleCommand);
 
     assertTrue(rangerUserSyncRoleCommand + " should be dependent of " + rangerAdminRoleCommand, rangerUserSyncBlockers.contains(rangerAdminRoleCommand));
-    assertTrue(rangerUserSyncRoleCommand + " should be dependent of " + kmsRoleCommand, rangerUserSyncBlockers.contains(kmsRoleCommand));
   }
   //todo: component override assertions
 
@@ -984,9 +992,10 @@
     replay(config, metaInfoDao, stackDao, extensionDao, linkDao, actionMetadata);
 
     OsFamily osFamily = new OsFamily(config);
+    AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
 
     StackManager stackManager = new StackManager(stackRoot, commonServices, extensions, osFamily,
-        false, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
+        false, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao, helper);
 
     String zookeeperServerRoleCommand = Role.ZOOKEEPER_SERVER + "-" + RoleCommand.START;
     String logsearchServerRoleCommand = Role.LOGSEARCH_SERVER + "-" + RoleCommand.START;
@@ -1025,4 +1034,16 @@
     assertTrue(logsearchLogfeederRoleCommand + " should be dependent of " + infraSolrRoleCommand, logsearchLogfeederBlockers.contains(infraSolrRoleCommand));
     assertTrue(logsearchLogfeederRoleCommand + " should be dependent of " + logsearchServerRoleCommand, logsearchLogfeederBlockers.contains(logsearchServerRoleCommand));
   }
+
+  @Test
+  public void testVersionDefinitionStackRepoUpdateLinkExists(){
+    // Get the base sqoop service
+    StackInfo stack = stackManager.getStack("HDP", "2.1.1");
+    String latestUri = stack.getRepositoryXml().getLatestURI();
+    assertTrue(latestUri != null);
+
+    stack = stackManager.getStack("HDP", "2.0.8");
+    latestUri = stack.getRepositoryXml().getLatestURI();
+    assertTrue(latestUri == null);
+  }
 }
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/ThemeModuleTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/ThemeModuleTest.java
index ec89a8c..e8b1b40 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/ThemeModuleTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/ThemeModuleTest.java
@@ -22,12 +22,14 @@
 import static org.junit.Assert.assertNotNull;
 
 import java.io.File;
+import java.util.Set;
 
 import org.apache.ambari.server.state.theme.Theme;
 import org.junit.Test;
 
-public class ThemeModuleTest {
+import com.google.common.collect.ImmutableSet;
 
+public class ThemeModuleTest {
 
   @Test
   public void testResolve() throws Exception {
@@ -49,7 +51,13 @@
 
     assertEquals(10, parentTheme.getThemeConfiguration().getWidgets().size());
     assertEquals(12, childTheme.getThemeConfiguration().getWidgets().size());
+  }
 
-
+  @Test
+  public void testAddErrors() {
+    Set<String> errors = ImmutableSet.of("one error", "two errors");
+    ThemeModule module = new ThemeModule((File) null);
+    module.addErrors(errors);
+    assertEquals(errors, ImmutableSet.copyOf(module.getErrors()));
   }
 }
\ No newline at end of file
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestStagePlanner.java b/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestStagePlanner.java
index 7063147..3e592b2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestStagePlanner.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestStagePlanner.java
@@ -126,7 +126,7 @@
     RoleGraph rg = roleGraphFactory.createNew(rco);
     long now = System.currentTimeMillis();
     Stage stage = stageFactory.createNew(1, "/tmp", "cluster1", 1L, "execution command wrapper test",
-      "clusterHostInfo", "commandParamsStage", "hostParamsStage");
+      "commandParamsStage", "hostParamsStage");
     stage.setStageId(1);
     stage.addServerActionCommand("RESTART", null, Role.HIVE_METASTORE,
       RoleCommand.CUSTOM_COMMAND, "cluster1",
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
index 8e8bed3..2d589ed 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
@@ -33,6 +33,7 @@
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.controller.ServiceComponentResponse;
+import org.apache.ambari.server.events.listeners.upgrade.StackVersionListener;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.OrmTestHelper;
@@ -640,6 +641,77 @@
     assertEquals(0, list.size());
   }
 
+
+  @Test
+  public void testUpdateStates() throws Exception {
+    ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO = injector.getInstance(
+        ServiceComponentDesiredStateDAO.class);
+
+    String componentName = "NAMENODE";
+
+    ServiceComponent component = serviceComponentFactory.createNew(service, componentName);
+    component.setDesiredStackVersion(new StackId("HDP-2.2.0"));
+    service.addServiceComponent(component);
+
+    ServiceComponent sc = service.getServiceComponent(componentName);
+    Assert.assertNotNull(sc);
+
+    ServiceComponentDesiredStateEntity entity = serviceComponentDesiredStateDAO.findByName(cluster.getClusterId(), serviceName, componentName);
+
+    helper.getOrCreateRepositoryVersion(component.getDesiredStackVersion(), "2.2.0.1");
+    helper.getOrCreateRepositoryVersion(component.getDesiredStackVersion(), "2.2.0.2");
+
+    addHostToCluster("h1", clusterName);
+    addHostToCluster("h2", clusterName);
+
+    sc.setDesiredState(State.INSTALLED);
+    Assert.assertEquals(State.INSTALLED, sc.getDesiredState());
+
+    ServiceComponentHost sch1 = sc.addServiceComponentHost("h1");
+    ServiceComponentHost sch2 = sc.addServiceComponentHost("h2");
+
+    // !!! case 1: component desired is UNKNOWN, mix of h-c versions
+    sc.setDesiredVersion(StackVersionListener.UNKNOWN_VERSION);
+    sch1.setVersion("2.2.0.1");
+    sch2.setVersion("2.2.0.2");
+    sc.updateRepositoryState("2.2.0.2");
+    entity = serviceComponentDesiredStateDAO.findByName(cluster.getClusterId(), serviceName, componentName);
+    assertEquals(RepositoryVersionState.OUT_OF_SYNC, entity.getRepositoryState());
+
+    // !!! case 2: component desired is UNKNOWN, all h-c same version
+    sc.setDesiredVersion(StackVersionListener.UNKNOWN_VERSION);
+    sch1.setVersion("2.2.0.1");
+    sch2.setVersion("2.2.0.1");
+    sc.updateRepositoryState("2.2.0.1");
+    entity = serviceComponentDesiredStateDAO.findByName(cluster.getClusterId(), serviceName, componentName);
+    assertEquals(RepositoryVersionState.CURRENT, entity.getRepositoryState());
+
+    // !!! case 3: component desired is known, any component reports different version
+    sc.setDesiredVersion("2.2.0.1");
+    sch1.setVersion("2.2.0.1");
+    sch2.setVersion("2.2.0.2");
+    sc.updateRepositoryState("2.2.0.2");
+    entity = serviceComponentDesiredStateDAO.findByName(cluster.getClusterId(), serviceName, componentName);
+    assertEquals(RepositoryVersionState.OUT_OF_SYNC, entity.getRepositoryState());
+
+    // !!! case 4: component desired is known, component reports same as desired, mix of h-c versions
+    sc.setDesiredVersion("2.2.0.1");
+    sch1.setVersion("2.2.0.1");
+    sch2.setVersion("2.2.0.2");
+    sc.updateRepositoryState("2.2.0.1");
+    entity = serviceComponentDesiredStateDAO.findByName(cluster.getClusterId(), serviceName, componentName);
+    assertEquals(RepositoryVersionState.OUT_OF_SYNC, entity.getRepositoryState());
+
+    // !!! case 5: component desired is known, component reports same as desired, all h-c the same
+    sc.setDesiredVersion("2.2.0.1");
+    sch1.setVersion("2.2.0.1");
+    sch2.setVersion("2.2.0.1");
+    sc.updateRepositoryState("2.2.0.1");
+    entity = serviceComponentDesiredStateDAO.findByName(cluster.getClusterId(), serviceName, componentName);
+    assertEquals(RepositoryVersionState.CURRENT, entity.getRepositoryState());
+  }
+
+
   /**
    * Creates an upgrade entity, asserting it was created correctly.
    *
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
index 0dd7f58..3dc34e3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
@@ -38,6 +38,8 @@
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.apache.ambari.annotations.Experimental;
+import org.apache.ambari.annotations.ExperimentalFeature;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.H2DatabaseCleaner;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
@@ -267,6 +269,7 @@
     ambariMetaInfo.init();
   }
 
+  @Experimental(feature=ExperimentalFeature.PATCH_UPGRADES)
   @Test
   public void testPartialUpgradeOrchestration() throws Exception {
     Map<String, UpgradePack> upgrades = ambariMetaInfo.getUpgradePacks("foo", "bar");
@@ -1039,12 +1042,9 @@
     UpgradePack upgrade = upgrades.get("upgrade_test_checks");
     assertNotNull(upgrade);
 
-    Cluster c = makeCluster();
     // HBASE and PIG have service checks, but not TEZ.
     Set<String> additionalServices = new HashSet<String>() {{ add("HBASE"); add("PIG"); add("TEZ"); add("AMBARI_METRICS"); }};
-    for(String service : additionalServices) {
-      c.addService(service);
-    }
+    Cluster c = makeCluster(true, additionalServices);
 
     int numServiceChecksExpected = 0;
     Collection<Service> services = c.getServices().values();
@@ -1071,9 +1071,9 @@
 
     List<UpgradeGroupHolder> groups = m_upgradeHelper.createSequence(upgrade, context);
 
-    assertEquals(7, groups.size());
+    assertEquals(8, groups.size());
 
-    UpgradeGroupHolder holder = groups.get(3);
+    UpgradeGroupHolder holder = groups.get(4);
     assertEquals(holder.name, "SERVICE_CHECK_1");
     assertEquals(7, holder.items.size());
     int numServiceChecksActual = 0;
@@ -1100,6 +1100,13 @@
     assertEquals(
         "This is a manual task with a placeholder of placeholder-rendered-properly",
         manualTask.messages.get(0));
+
+    UpgradeGroupHolder clusterGroup = groups.get(3);
+    assertEquals(clusterGroup.name, "HBASE");
+    assertEquals(clusterGroup.title, "Update HBase Configuration");
+    assertEquals(1, clusterGroup.items.size());
+    StageWrapper stage = clusterGroup.items.get(0);
+    assertEquals(stage.getText(), "Update HBase Configuration");
   }
 
   @Test
@@ -1225,6 +1232,14 @@
    * @throws AmbariException
    */
   private Cluster makeCluster(boolean clean) throws AmbariException, AuthorizationException {
+    return makeCluster(clean, new HashSet<String>());
+  }
+
+  /**
+   * Create an HA cluster
+   * @throws AmbariException
+   */
+  private Cluster makeCluster(boolean clean, Set<String> additionalServices) throws AmbariException, AuthorizationException {
     Clusters clusters = injector.getInstance(Clusters.class);
     ServiceFactory serviceFactory = injector.getInstance(ServiceFactory.class);
 
@@ -1369,6 +1384,15 @@
 
     expect(m_masterHostResolver.getCluster()).andReturn(c).anyTimes();
 
+    for(String service : additionalServices) {
+      c.addService(service);
+      if (service.equals("HBASE")) {
+        type = new HostsType();
+        type.hosts.addAll(Arrays.asList("h1", "h2"));
+        expect(m_masterHostResolver.getMasterAndHosts("HBASE", "HBASE_MASTER")).andReturn(type).anyTimes();
+      }
+    }
+
     replay(m_masterHostResolver);
 
     return c;
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertDefinitionHashTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertDefinitionHashTest.java
index 62fc19e..838cd6b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertDefinitionHashTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertDefinitionHashTest.java
@@ -275,7 +275,7 @@
   }
 
   /**
-   * Test {@link AlertDefinitionHash#isHashCached(String)}.
+   * Test {@link AlertDefinitionHash#isHashCached(String,String)}.
    */
   @Test
   public void testIsHashCached() {
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertEventPublisherTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertEventPublisherTest.java
index e50dba5..8c23b69 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertEventPublisherTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertEventPublisherTest.java
@@ -300,9 +300,6 @@
         source.getAlertName()));
   }
 
-  /**
-   * Calls {@link Service#persist()} to mock a service install.
-   */
   private void installHdfsService() throws Exception {
     String serviceName = "HDFS";
     Service service = serviceFactory.createNew(cluster, serviceName);
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/InitialAlertEventTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/InitialAlertEventTest.java
index 483aac5..890464d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/InitialAlertEventTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/InitialAlertEventTest.java
@@ -173,9 +173,6 @@
         m_listener.getAlertEventReceivedCount(InitialAlertEvent.class));
   }
 
-  /**
-   * Calls {@link Service#persist()} to mock a service install.
-   */
   private void installHdfsService() throws Exception {
     String serviceName = "HDFS";
     Service service = m_serviceFactory.createNew(m_cluster, serviceName);
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/AlertDataManagerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/AlertDataManagerTest.java
index 1e74658..8d7d8ad 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/AlertDataManagerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/AlertDataManagerTest.java
@@ -508,7 +508,7 @@
   /**
    * Test interface collects aggregate alert invocations
    */
-  private static interface TestListener {
-    public void catchIt(AlertReceivedEvent event);
+  private interface TestListener {
+    void catchIt(AlertReceivedEvent event);
   }
 }
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterEffectiveVersionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterEffectiveVersionTest.java
index d3c8acf..bba197f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterEffectiveVersionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterEffectiveVersionTest.java
@@ -143,6 +143,7 @@
     Cluster clusterSpy = Mockito.spy(m_cluster);
 
     UpgradeEntity upgradeEntity = createNiceMock(UpgradeEntity.class);
+    EasyMock.expect(upgradeEntity.getId()).andReturn(1L).atLeastOnce();
     EasyMock.expect(upgradeEntity.getUpgradeType()).andReturn(UpgradeType.ROLLING).atLeastOnce();
     EasyMock.expect(upgradeEntity.getFromVersion()).andReturn("2.3.0.0-1234").anyTimes();
     EasyMock.expect(upgradeEntity.getToVersion()).andReturn("2.4.0.0-1234").atLeastOnce();
@@ -184,6 +185,7 @@
 
     // from/to are switched on downgrade
     UpgradeEntity upgradeEntity = createNiceMock(UpgradeEntity.class);
+    EasyMock.expect(upgradeEntity.getId()).andReturn(1L).atLeastOnce();
     EasyMock.expect(upgradeEntity.getUpgradeType()).andReturn(UpgradeType.NON_ROLLING).atLeastOnce();
     EasyMock.expect(upgradeEntity.getToVersion()).andReturn("2.3.0.0-1234").atLeastOnce();
     EasyMock.expect(upgradeEntity.getFromVersion()).andReturn("2.4.0.0-1234").anyTimes();
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
index aecc031..c426684 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
@@ -224,8 +224,12 @@
   }
 
   private void createDefaultCluster(Set<String> hostNames) throws Exception {
-    // TODO, use common function
     StackId stackId = new StackId("HDP", "0.1");
+    createDefaultCluster(hostNames, stackId);
+  }
+
+  private void createDefaultCluster(Set<String> hostNames, StackId stackId) throws Exception {
+    // TODO, use common function
     StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
     org.junit.Assert.assertNotNull(stackEntity);
 
@@ -1638,19 +1642,20 @@
    * Tests that hosts can be correctly transitioned into the "INSTALLING" state.
    * This method also tests that hosts in MM will not be transitioned, as per
    * the contract of
-   * {@link Cluster#transitionHostsToInstalling(ClusterVersionEntity)}.
+   * {@link Cluster#transitionHostsToInstalling(ClusterVersionEntity, RepositoryVersionEntity, org.apache.ambari.server.state.repository.VersionDefinitionXml, boolean)}.
    *
    * @throws Exception
    */
   @Test
-  public void testTransitionHostVersions() throws Exception {
-    createDefaultCluster();
+  public void testTransitionHostsToInstalling() throws Exception {
+    // this will create a cluster with a few hosts and no host components
+    StackId originalStackId = new StackId("HDP", "2.0.5");
+    createDefaultCluster(Sets.newHashSet("h1", "h2"), originalStackId);
 
-    StackId stackId = new StackId("HDP", "0.2");
+    StackId stackId = new StackId("HDP", "2.0.6");
     helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
 
-    c1.createClusterVersion(stackId, "0.2", "admin",
-        RepositoryVersionState.INSTALLING);
+    c1.createClusterVersion(stackId, "2.0.6", "admin", RepositoryVersionState.INSTALLING);
 
     ClusterVersionEntity entityHDP2 = null;
     for (ClusterVersionEntity entity : c1.getAllClusterVersions()) {
@@ -1658,7 +1663,7 @@
       StackId repoVersionStackId = new StackId(repoVersionStackEntity);
 
       if (repoVersionStackId.getStackName().equals("HDP")
-          && repoVersionStackId.getStackVersion().equals("0.2")) {
+          && repoVersionStackId.getStackVersion().equals("2.0.6")) {
         entityHDP2 = entity;
         break;
       }
@@ -1669,7 +1674,9 @@
     List<HostVersionEntity> hostVersionsH1Before = hostVersionDAO.findByClusterAndHost("c1", "h1");
     assertEquals(1, hostVersionsH1Before.size());
 
-    c1.transitionHosts(entityHDP2, RepositoryVersionState.INSTALLING);
+    // this should move both to NOT_REQUIRED since they have no versionable
+    // components
+    c1.transitionHostsToInstalling(entityHDP2, entityHDP2.getRepositoryVersion(), null, false);
 
     List<HostVersionEntity> hostVersionsH1After = hostVersionDAO.findByClusterAndHost("c1", "h1");
     assertEquals(2, hostVersionsH1After.size());
@@ -1678,8 +1685,8 @@
     for (HostVersionEntity entity : hostVersionsH1After) {
       StackEntity repoVersionStackEntity = entity.getRepositoryVersion().getStack();
       if (repoVersionStackEntity.getStackName().equals("HDP")
-          && repoVersionStackEntity.getStackVersion().equals("0.2")) {
-        assertEquals(RepositoryVersionState.INSTALLING, entity.getState());
+          && repoVersionStackEntity.getStackVersion().equals("2.0.6")) {
+        assertEquals(RepositoryVersionState.NOT_REQUIRED, entity.getState());
         checked = true;
         break;
       }
@@ -1687,8 +1694,29 @@
 
     assertTrue(checked);
 
-    // Test for update of existing host stack version
-    c1.transitionHosts(entityHDP2, RepositoryVersionState.INSTALLING);
+    // add some host components
+    Service hdfs = serviceFactory.createNew(c1, "HDFS");
+    c1.addService(hdfs);
+
+    // Add HDFS components
+    ServiceComponent datanode = serviceComponentFactory.createNew(hdfs, "NAMENODE");
+    ServiceComponent namenode = serviceComponentFactory.createNew(hdfs, "DATANODE");
+    hdfs.addServiceComponent(datanode);
+    hdfs.addServiceComponent(namenode);
+
+    // add to hosts
+    ServiceComponentHost namenodeHost1 = serviceComponentHostFactory.createNew(namenode, "h1");
+    ServiceComponentHost datanodeHost2 = serviceComponentHostFactory.createNew(datanode, "h2");
+
+    assertNotNull(namenodeHost1);
+    assertNotNull(datanodeHost2);
+
+    c1.transitionClusterVersion(stackId, entityHDP2.getRepositoryVersion().getVersion(),
+        RepositoryVersionState.INSTALLING);
+
+    // with hosts now having components which report versions, we should have
+    // two in the INSTALLING state
+    c1.transitionHostsToInstalling(entityHDP2, entityHDP2.getRepositoryVersion(), null, false);
 
     hostVersionsH1After = hostVersionDAO.findByClusterAndHost("c1", "h1");
     assertEquals(2, hostVersionsH1After.size());
@@ -1697,7 +1725,7 @@
     for (HostVersionEntity entity : hostVersionsH1After) {
       StackEntity repoVersionStackEntity = entity.getRepositoryVersion().getStack();
       if (repoVersionStackEntity.getStackName().equals("HDP")
-          && repoVersionStackEntity.getStackVersion().equals("0.2")) {
+          && repoVersionStackEntity.getStackVersion().equals("2.0.6")) {
         assertEquals(RepositoryVersionState.INSTALLING, entity.getState());
         checked = true;
         break;
@@ -1727,7 +1755,7 @@
     hostInMaintenanceMode.setMaintenanceState(c1.getClusterId(), MaintenanceState.ON);
 
     // transition host versions to INSTALLING
-    c1.transitionHosts(entityHDP2, RepositoryVersionState.INSTALLING);
+    c1.transitionHostsToInstalling(entityHDP2, entityHDP2.getRepositoryVersion(), null, false);
 
     List<HostVersionEntity> hostInMaintModeVersions = hostVersionDAO.findByClusterAndHost("c1",
         hostInMaintenanceMode.getHostName());
@@ -1739,7 +1767,7 @@
     for (HostVersionEntity hostVersionEntity : hostInMaintModeVersions) {
       StackEntity repoVersionStackEntity = hostVersionEntity.getRepositoryVersion().getStack();
       if (repoVersionStackEntity.getStackName().equals("HDP")
-          && repoVersionStackEntity.getStackVersion().equals("0.2")) {
+          && repoVersionStackEntity.getStackVersion().equals("2.0.6")) {
         assertEquals(RepositoryVersionState.OUT_OF_SYNC, hostVersionEntity.getState());
       }
     }
@@ -1748,7 +1776,7 @@
     for (HostVersionEntity hostVersionEntity : otherHostVersions) {
       StackEntity repoVersionStackEntity = hostVersionEntity.getRepositoryVersion().getStack();
       if (repoVersionStackEntity.getStackName().equals("HDP")
-          && repoVersionStackEntity.getStackVersion().equals("0.2")) {
+          && repoVersionStackEntity.getStackVersion().equals("2.0.6")) {
       assertEquals(RepositoryVersionState.INSTALLING, hostVersionEntity.getState());
       }
     }
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
index e13dd70..8f37ad7 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
@@ -181,10 +181,6 @@
   private static final class ServiceComponentHostDeadlockWriter extends Thread {
     private List<ServiceComponentHost> serviceComponentHosts;
 
-    /**
-     * @param nameNodeSCH
-     *          the nameNodeSCH to set
-     */
     public void setServiceComponentHosts(List<ServiceComponentHost> serviceComponentHosts) {
       this.serviceComponentHosts = serviceComponentHosts;
     }
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintImplTest.java
index 255145c..5f4e00b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintImplTest.java
@@ -128,16 +128,6 @@
     assertTrue(entity.getSecurityDescriptorReference().equals("testRef"));
   }
 
-  @Test(expected = InvalidTopologyException.class)
-  public void testValidateConfigurations__basic_negative() throws Exception {
-    expect(group2.getConfiguration()).andReturn(EMPTY_CONFIGURATION).atLeastOnce();
-    replay(stack, group1, group2);
-
-    Blueprint blueprint = new BlueprintImpl("test", hostGroups, stack, configuration, null);
-    blueprint.validateRequiredProperties();
-    verify(stack, group1, group2);
-  }
-
   @Test
   public void testValidateConfigurations__hostGroupConfig() throws Exception {
     Map<String, Map<String, String>> group2Props = new HashMap<>();
@@ -157,7 +147,6 @@
     category2Props.put("prop2", "val");
     group1Components.add("NAMENODE");
     group2Components.add("NAMENODE");
-    expect(stack.getServiceForComponent("NAMENODE")).andReturn("SERVICE2").atLeastOnce();
     Map<String, String> hdfsProps = new HashMap<>();
     properties.put("hdfs-site", hdfsProps);
     hdfsProps.put("foo", "val");
@@ -195,8 +184,6 @@
     group1Components.add("ZKFC");
     group2Components.add("NAMENODE");
     group2Components.add("ZKFC");
-    expect(stack.getServiceForComponent("NAMENODE")).andReturn("SERVICE2").atLeastOnce();
-    expect(stack.getServiceForComponent("ZKFC")).andReturn("SERVICE2").atLeastOnce();
     Map<String, String> hdfsProps = new HashMap<>();
     properties.put("hdfs-site", hdfsProps);
     hdfsProps.put("foo", "val");
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
index 1440c4d..6c90d18 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
@@ -248,6 +248,8 @@
 
     expect(topology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.NEVER_APPLY).anyTimes();
     expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
+    expect(blueprint.isValidConfigType("testConfigType")).andReturn(true).anyTimes();
+
     expect(topology.getConfiguration()).andReturn(blueprintConfig).anyTimes();
     expect(topology.getHostGroupInfo()).andReturn(Collections.<String, HostGroupInfo>emptyMap()).anyTimes();
     expect(topology.getClusterId()).andReturn(Long.valueOf(1)).anyTimes();
@@ -368,9 +370,12 @@
     expect(topology.getHostGroupInfo()).andReturn(hostGroupInfoMap);
     expect(blueprint.getStack()).andReturn(stack).anyTimes();
     expect(blueprint.getServices()).andReturn(services).anyTimes();
-    expect(stack.getServiceForConfigType("hdfs-site")).andReturn("HDFS").anyTimes();
-    expect(stack.getServiceForConfigType("admin-properties")).andReturn("RANGER").anyTimes();
-    expect(stack.getServiceForConfigType("yarn-site")).andReturn("YARN").anyTimes();
+
+    expect(blueprint.isValidConfigType("hdfs-site")).andReturn(true).anyTimes();
+    expect(blueprint.isValidConfigType("admin-properties")).andReturn(true).anyTimes();
+    expect(blueprint.isValidConfigType("yarn-site")).andReturn(false).anyTimes();
+    expect(blueprint.isValidConfigType("cluster-env")).andReturn(true).anyTimes();
+    expect(blueprint.isValidConfigType("global")).andReturn(true).anyTimes();
 
     EasyMock.replay(stack, blueprint, topology);
     // WHEN
@@ -409,9 +414,10 @@
     expect(topology.getHostGroupInfo()).andReturn(hostGroupInfoMap);
     expect(blueprint.getStack()).andReturn(stack).anyTimes();
     expect(blueprint.getServices()).andReturn(services).anyTimes();
-    expect(stack.getServiceForConfigType("hdfs-site")).andReturn("HDFS").anyTimes();
-    expect(stack.getServiceForConfigType("admin-properties")).andReturn("RANGER").anyTimes();
-    expect(stack.getServiceForConfigType("yarn-site")).andReturn("YARN").anyTimes();
+
+    expect(blueprint.isValidConfigType("hdfs-site")).andReturn(true).anyTimes();
+    expect(blueprint.isValidConfigType("cluster-env")).andReturn(true).anyTimes();
+    expect(blueprint.isValidConfigType("global")).andReturn(true).anyTimes();
 
     EasyMock.replay(stack, blueprint, topology);
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterDeployWithStartOnlyTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterDeployWithStartOnlyTest.java
index a691cbc..c8d4d55 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterDeployWithStartOnlyTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterDeployWithStartOnlyTest.java
@@ -22,16 +22,12 @@
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.anyString;
 import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.createMockBuilder;
 import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.expectLastCall;
 import static org.easymock.EasyMock.isA;
 import static org.easymock.EasyMock.isNull;
 import static org.easymock.EasyMock.newCapture;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.reset;
-import static org.easymock.EasyMock.verify;
 import static org.junit.Assert.assertEquals;
 
 import java.lang.reflect.Field;
@@ -58,7 +54,6 @@
 import org.apache.ambari.server.controller.internal.ProvisionClusterRequest;
 import org.apache.ambari.server.controller.internal.Stack;
 import org.apache.ambari.server.controller.spi.ClusterController;
-import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.ResourceProvider;
 import org.apache.ambari.server.orm.entities.TopologyLogicalRequestEntity;
 import org.apache.ambari.server.security.encryption.CredentialStoreService;
@@ -67,6 +62,7 @@
 import org.apache.ambari.server.state.ComponentInfo;
 import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.topology.tasks.ConfigureClusterTaskFactory;
+import org.apache.ambari.server.topology.validators.TopologyValidatorService;
 import org.easymock.Capture;
 import org.easymock.EasyMockRule;
 import org.easymock.EasyMockSupport;
@@ -84,7 +80,7 @@
 
 @RunWith(PowerMockRunner.class)
 @PrepareForTest(AmbariServer.class)
-public class ClusterDeployWithStartOnlyTest {
+public class ClusterDeployWithStartOnlyTest extends EasyMockSupport {
   private static final String CLUSTER_NAME = "test-cluster";
   private static final long CLUSTER_ID = 1;
   private static final String BLUEPRINT_NAME = "test-bp";
@@ -106,7 +102,6 @@
   @Mock(type = MockType.NICE)
   private ProvisionClusterRequest request;
   private PersistedTopologyRequest persistedTopologyRequest;
-//  @Mock(type = MockType.STRICT)
   private LogicalRequestFactory logicalRequestFactory;
   @Mock(type = MockType.DEFAULT)
   private LogicalRequest logicalRequest;
@@ -161,6 +156,10 @@
   @Mock(type = MockType.STRICT)
   private Future mockFuture;
 
+  @Mock
+  private TopologyValidatorService topologyValidatorServiceMock;
+
+
   private final Configuration stackConfig = new Configuration(new HashMap<String, Map<String, String>>(),
     new HashMap<String, Map<String, Map<String, String>>>());
   private final Configuration bpConfiguration = new Configuration(new HashMap<String, Map<String, String>>(),
@@ -252,6 +251,7 @@
     expect(blueprint.getName()).andReturn(BLUEPRINT_NAME).anyTimes();
     expect(blueprint.getServices()).andReturn(Arrays.asList("service1", "service2")).anyTimes();
     expect(blueprint.getStack()).andReturn(stack).anyTimes();
+    expect(blueprint.isValidConfigType(anyString())).andReturn(true).anyTimes();
     // don't expect toEntity()
 
     expect(stack.getAllConfigurationTypes("service1")).andReturn(Arrays.asList("service1-site", "service1-env")).anyTimes();
@@ -290,7 +290,6 @@
     expect(request.getDescription()).andReturn("Provision Cluster Test").anyTimes();
     expect(request.getConfiguration()).andReturn(topoConfiguration).anyTimes();
     expect(request.getHostGroupInfo()).andReturn(groupInfoMap).anyTimes();
-    expect(request.getTopologyValidators()).andReturn(topologyValidators).anyTimes();
     expect(request.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.NEVER_APPLY);
     expect(request.getProvisionAction()).andReturn(ProvisionAction.START_ONLY).anyTimes();
     expect(request.getSecurityConfiguration()).andReturn(null).anyTimes();
@@ -391,7 +390,6 @@
     ambariContext.persistInstallStateForUI(CLUSTER_NAME, STACK_NAME, STACK_VERSION);
     expectLastCall().once();
 
-    expect(clusterController.ensureResourceProvider(anyObject(Resource.Type.class))).andReturn(resourceProvider);
     expect(executor.submit(anyObject(AsyncCallableService.class))).andReturn(mockFuture).times(2);
 
     persistedTopologyRequest = new PersistedTopologyRequest(1, request);
@@ -401,12 +399,9 @@
     persistedState.persistLogicalRequest((LogicalRequest) anyObject(), anyLong());
     expectLastCall().once();
 
-    replay(blueprint, stack, request, group1, group2, ambariContext, logicalRequestFactory, logicalRequest,
-      configurationRequest, configurationRequest2, configurationRequest3, requestStatusResponse, executor,
-      persistedState, securityConfigurationFactory, credentialStoreService, clusterController, resourceProvider,
-      mockFuture, managementController, clusters, cluster, hostRoleCommandInstallComponent3,
-      hostRoleCommandInstallComponent4, hostRoleCommandStartComponent1, hostRoleCommandStartComponent2,
-      serviceComponentInfo, clientComponentInfo);
+    topologyValidatorServiceMock.validateTopologyConfiguration(anyObject(ClusterTopology.class));
+
+    replayAll();
 
     Class clazz = TopologyManager.class;
 
@@ -419,17 +414,8 @@
 
   @After
   public void tearDown() {
-    verify(blueprint, stack, request, group1, group2, ambariContext, logicalRequestFactory,
-      logicalRequest, configurationRequest, configurationRequest2, configurationRequest3,
-      requestStatusResponse, executor, persistedState, mockFuture,
-      managementController, clusters, cluster, hostRoleCommandInstallComponent3, hostRoleCommandInstallComponent4,
-      hostRoleCommandStartComponent1, hostRoleCommandStartComponent2);
-
-    reset(blueprint, stack, request, group1, group2, ambariContext, logicalRequestFactory,
-      logicalRequest, configurationRequest, configurationRequest2, configurationRequest3,
-      requestStatusResponse, executor, persistedState, mockFuture,
-      managementController, clusters, cluster, hostRoleCommandInstallComponent3, hostRoleCommandInstallComponent4,
-      hostRoleCommandStartComponent1, hostRoleCommandStartComponent2);
+    verifyAll();
+    resetAll();
   }
 
   @Test
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartOnComponentLevelTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartOnComponentLevelTest.java
index 98ba592..4c9815c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartOnComponentLevelTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartOnComponentLevelTest.java
@@ -24,16 +24,12 @@
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.anyString;
 import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.createMockBuilder;
 import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.expectLastCall;
 import static org.easymock.EasyMock.isA;
 import static org.easymock.EasyMock.isNull;
 import static org.easymock.EasyMock.newCapture;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.reset;
-import static org.easymock.EasyMock.verify;
 
 import java.lang.reflect.Field;
 import java.util.ArrayList;
@@ -59,7 +55,6 @@
 import org.apache.ambari.server.controller.internal.ProvisionClusterRequest;
 import org.apache.ambari.server.controller.internal.Stack;
 import org.apache.ambari.server.controller.spi.ClusterController;
-import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.ResourceProvider;
 import org.apache.ambari.server.orm.entities.TopologyLogicalRequestEntity;
 import org.apache.ambari.server.security.encryption.CredentialStoreService;
@@ -68,6 +63,7 @@
 import org.apache.ambari.server.state.ComponentInfo;
 import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.topology.tasks.ConfigureClusterTaskFactory;
+import org.apache.ambari.server.topology.validators.TopologyValidatorService;
 import org.easymock.Capture;
 import org.easymock.EasyMockRule;
 import org.easymock.EasyMockSupport;
@@ -85,7 +81,7 @@
 
 @RunWith(PowerMockRunner.class)
 @PrepareForTest(AmbariServer.class)
-public class ClusterInstallWithoutStartOnComponentLevelTest {
+public class ClusterInstallWithoutStartOnComponentLevelTest extends EasyMockSupport {
   private static final String CLUSTER_NAME = "test-cluster";
   private static final long CLUSTER_ID = 1;
   private static final String BLUEPRINT_NAME = "test-bp";
@@ -107,7 +103,7 @@
   @Mock(type = MockType.NICE)
   private ProvisionClusterRequest request;
   private PersistedTopologyRequest persistedTopologyRequest;
-//  @Mock(type = MockType.STRICT)
+  //  @Mock(type = MockType.STRICT)
   private LogicalRequestFactory logicalRequestFactory;
   @Mock(type = MockType.DEFAULT)
   private LogicalRequest logicalRequest;
@@ -157,6 +153,9 @@
   @Mock(type = MockType.STRICT)
   private Future mockFuture;
 
+  @Mock
+  private TopologyValidatorService topologyValidatorServiceMock;
+
   private final Configuration stackConfig = new Configuration(new HashMap<String, Map<String, String>>(),
     new HashMap<String, Map<String, Map<String, String>>>());
   private final Configuration bpConfiguration = new Configuration(new HashMap<String, Map<String, String>>(),
@@ -248,6 +247,7 @@
     expect(blueprint.getName()).andReturn(BLUEPRINT_NAME).anyTimes();
     expect(blueprint.getServices()).andReturn(Arrays.asList("service1", "service2")).anyTimes();
     expect(blueprint.getStack()).andReturn(stack).anyTimes();
+    expect(blueprint.isValidConfigType(anyString())).andReturn(true).anyTimes();
     // don't expect toEntity()
 
     expect(stack.getAllConfigurationTypes("service1")).andReturn(Arrays.asList("service1-site", "service1-env")).anyTimes();
@@ -286,7 +286,6 @@
     expect(request.getDescription()).andReturn("Provision Cluster Test").anyTimes();
     expect(request.getConfiguration()).andReturn(topoConfiguration).anyTimes();
     expect(request.getHostGroupInfo()).andReturn(groupInfoMap).anyTimes();
-    expect(request.getTopologyValidators()).andReturn(topologyValidators).anyTimes();
     expect(request.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.NEVER_APPLY);
     expect(request.getProvisionAction()).andReturn(INSTALL_AND_START).anyTimes();
     expect(request.getSecurityConfiguration()).andReturn(null).anyTimes();
@@ -368,7 +367,6 @@
     ambariContext.persistInstallStateForUI(CLUSTER_NAME, STACK_NAME, STACK_VERSION);
     expectLastCall().once();
 
-    expect(clusterController.ensureResourceProvider(anyObject(Resource.Type.class))).andReturn(resourceProvider);
     expect(executor.submit(anyObject(AsyncCallableService.class))).andReturn(mockFuture).times(2);
 
     persistedTopologyRequest = new PersistedTopologyRequest(1, request);
@@ -378,10 +376,9 @@
     persistedState.persistLogicalRequest((LogicalRequest) anyObject(), anyLong());
     expectLastCall().once();
 
-    replay(blueprint, stack, request, group1, group2, ambariContext, logicalRequestFactory, logicalRequest,
-      configurationRequest, configurationRequest2, configurationRequest3, requestStatusResponse, executor,
-      persistedState, securityConfigurationFactory, credentialStoreService, clusterController, resourceProvider,
-      mockFuture, managementController, clusters, cluster, hostRoleCommand, serviceComponentInfo, clientComponentInfo);
+    topologyValidatorServiceMock.validateTopologyConfiguration(anyObject(ClusterTopology.class));
+
+    replayAll();
 
     Class clazz = TopologyManager.class;
 
@@ -394,15 +391,8 @@
 
   @After
   public void tearDown() {
-    verify(blueprint, stack, request, group1, group2, ambariContext, logicalRequestFactory,
-      logicalRequest, configurationRequest, configurationRequest2, configurationRequest3,
-      requestStatusResponse, executor, persistedState, mockFuture,
-      managementController, clusters, cluster, hostRoleCommand);
-
-    reset(blueprint, stack, request, group1, group2, ambariContext, logicalRequestFactory,
-      logicalRequest, configurationRequest, configurationRequest2, configurationRequest3,
-      requestStatusResponse, executor, persistedState, mockFuture,
-      managementController, clusters, cluster, hostRoleCommand);
+    verifyAll();
+    resetAll();
   }
 
   @Test
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java
index fc7ac27..1bdeb1b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java
@@ -24,16 +24,12 @@
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.anyString;
 import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.createMockBuilder;
 import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.expectLastCall;
 import static org.easymock.EasyMock.isA;
 import static org.easymock.EasyMock.isNull;
 import static org.easymock.EasyMock.newCapture;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.reset;
-import static org.easymock.EasyMock.verify;
 
 import java.lang.reflect.Field;
 import java.util.ArrayList;
@@ -59,7 +55,6 @@
 import org.apache.ambari.server.controller.internal.ProvisionClusterRequest;
 import org.apache.ambari.server.controller.internal.Stack;
 import org.apache.ambari.server.controller.spi.ClusterController;
-import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.ResourceProvider;
 import org.apache.ambari.server.orm.entities.TopologyLogicalRequestEntity;
 import org.apache.ambari.server.security.encryption.CredentialStoreService;
@@ -68,6 +63,7 @@
 import org.apache.ambari.server.state.ComponentInfo;
 import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.topology.tasks.ConfigureClusterTaskFactory;
+import org.apache.ambari.server.topology.validators.TopologyValidatorService;
 import org.easymock.Capture;
 import org.easymock.EasyMockRule;
 import org.easymock.EasyMockSupport;
@@ -85,7 +81,7 @@
 
 @RunWith(PowerMockRunner.class)
 @PrepareForTest(AmbariServer.class)
-public class ClusterInstallWithoutStartTest {
+public class ClusterInstallWithoutStartTest extends EasyMockSupport {
   private static final String CLUSTER_NAME = "test-cluster";
   private static final long CLUSTER_ID = 1;
   private static final String BLUEPRINT_NAME = "test-bp";
@@ -106,8 +102,9 @@
 
   @Mock(type = MockType.NICE)
   private ProvisionClusterRequest request;
+
   private PersistedTopologyRequest persistedTopologyRequest;
-//  @Mock(type = MockType.STRICT)
+  //  @Mock(type = MockType.STRICT)
   private LogicalRequestFactory logicalRequestFactory;
   @Mock(type = MockType.DEFAULT)
   private LogicalRequest logicalRequest;
@@ -157,6 +154,9 @@
   @Mock(type = MockType.STRICT)
   private Future mockFuture;
 
+  @Mock
+  private TopologyValidatorService topologyValidatorServiceMock;
+
   private final Configuration stackConfig = new Configuration(new HashMap<String, Map<String, String>>(),
     new HashMap<String, Map<String, Map<String, String>>>());
   private final Configuration bpConfiguration = new Configuration(new HashMap<String, Map<String, String>>(),
@@ -248,6 +248,7 @@
     expect(blueprint.getName()).andReturn(BLUEPRINT_NAME).anyTimes();
     expect(blueprint.getServices()).andReturn(Arrays.asList("service1", "service2")).anyTimes();
     expect(blueprint.getStack()).andReturn(stack).anyTimes();
+    expect(blueprint.isValidConfigType(anyString())).andReturn(true).anyTimes();
     // don't expect toEntity()
 
     expect(stack.getAllConfigurationTypes("service1")).andReturn(Arrays.asList("service1-site", "service1-env")).anyTimes();
@@ -286,7 +287,7 @@
     expect(request.getDescription()).andReturn("Provision Cluster Test").anyTimes();
     expect(request.getConfiguration()).andReturn(topoConfiguration).anyTimes();
     expect(request.getHostGroupInfo()).andReturn(groupInfoMap).anyTimes();
-    expect(request.getTopologyValidators()).andReturn(topologyValidators).anyTimes();
+
     expect(request.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.NEVER_APPLY);
     expect(request.getProvisionAction()).andReturn(INSTALL_ONLY).anyTimes();
     expect(request.getSecurityConfiguration()).andReturn(null).anyTimes();
@@ -335,7 +336,7 @@
 
     expect(ambariContext.getPersistedTopologyState()).andReturn(persistedState).anyTimes();
     //todo: don't ignore param
-    ambariContext.createAmbariResources(isA(ClusterTopology.class), eq(CLUSTER_NAME), (SecurityType) isNull(), (String)isNull());
+    ambariContext.createAmbariResources(isA(ClusterTopology.class), eq(CLUSTER_NAME), (SecurityType) isNull(), (String) isNull());
     expectLastCall().once();
     expect(ambariContext.getNextRequestId()).andReturn(1L).once();
     expect(ambariContext.isClusterKerberosEnabled(CLUSTER_ID)).andReturn(false).anyTimes();
@@ -361,7 +362,6 @@
     ambariContext.persistInstallStateForUI(CLUSTER_NAME, STACK_NAME, STACK_VERSION);
     expectLastCall().once();
 
-    expect(clusterController.ensureResourceProvider(anyObject(Resource.Type.class))).andReturn(resourceProvider);
     expect(executor.submit(anyObject(AsyncCallableService.class))).andReturn(mockFuture).times(2);
 
     persistedTopologyRequest = new PersistedTopologyRequest(1, request);
@@ -371,10 +371,9 @@
     persistedState.persistLogicalRequest((LogicalRequest) anyObject(), anyLong());
     expectLastCall().once();
 
-    replay(blueprint, stack, request, group1, group2, ambariContext, logicalRequestFactory, logicalRequest,
-      configurationRequest, configurationRequest2, configurationRequest3, requestStatusResponse, executor,
-      persistedState, securityConfigurationFactory, credentialStoreService, clusterController, resourceProvider,
-      mockFuture, managementController, clusters, cluster, hostRoleCommand, serviceComponentInfo, clientComponentInfo);
+    topologyValidatorServiceMock.validateTopologyConfiguration(anyObject(ClusterTopology.class));
+
+    replayAll();
 
     Class clazz = TopologyManager.class;
 
@@ -387,15 +386,8 @@
 
   @After
   public void tearDown() {
-    verify(blueprint, stack, request, group1, group2, ambariContext, logicalRequestFactory,
-      logicalRequest, configurationRequest, configurationRequest2, configurationRequest3,
-      requestStatusResponse, executor, persistedState, mockFuture,
-      managementController, clusters, cluster, hostRoleCommand);
-
-    reset(blueprint, stack, request, group1, group2, ambariContext, logicalRequestFactory,
-      logicalRequest, configurationRequest, configurationRequest2, configurationRequest3,
-      requestStatusResponse, executor, persistedState, mockFuture,
-      managementController, clusters, cluster, hostRoleCommand);
+    verifyAll();
+    resetAll();
   }
 
   @Test
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterTopologyImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterTopologyImplTest.java
index 3ea17b4..606303e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterTopologyImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterTopologyImplTest.java
@@ -19,20 +19,15 @@
 package org.apache.ambari.server.topology;
 
 import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.notNull;
 import static org.powermock.api.easymock.PowerMock.createNiceMock;
-import static org.powermock.api.easymock.PowerMock.createStrictMock;
 import static org.powermock.api.easymock.PowerMock.replay;
 import static org.powermock.api.easymock.PowerMock.reset;
 import static org.powermock.api.easymock.PowerMock.verify;
 
-import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
@@ -56,7 +51,7 @@
   private static final HostGroup group4 = createNiceMock(HostGroup.class);
   private final Map<String, HostGroupInfo> hostGroupInfoMap = new HashMap<>();
   private final Map<String, HostGroup> hostGroupMap = new HashMap<>();
-  private final List<TopologyValidator> topologyValidators = new ArrayList<>();
+
   private Configuration configuration;
   private Configuration bpconfiguration;
 
@@ -64,9 +59,9 @@
   public void setUp() throws Exception {
 
     configuration = new Configuration(new HashMap<String, Map<String, String>>(),
-        new HashMap<String, Map<String, Map<String, String>>>());
+      new HashMap<String, Map<String, Map<String, String>>>());
     bpconfiguration = new Configuration(new HashMap<String, Map<String, String>>(),
-            new HashMap<String, Map<String, Map<String, String>>>());
+      new HashMap<String, Map<String, Map<String, String>>>());
 
     HostGroupInfo group1Info = new HostGroupInfo("group1");
     HostGroupInfo group2Info = new HostGroupInfo("group2");
@@ -148,7 +143,7 @@
     verify(blueprint, group1, group2, group3, group4);
     reset(blueprint, group1, group2, group3, group4);
 
-    topologyValidators.clear();
+
     hostGroupInfoMap.clear();
     hostGroupMap.clear();
   }
@@ -157,36 +152,7 @@
     replay(blueprint, group1, group2, group3, group4);
   }
 
-  @Test(expected = InvalidTopologyException.class)
-  public void testCreate_validatorFails() throws Exception {
-    TestTopologyRequest request = new TestTopologyRequest(TopologyRequest.Type.PROVISION);
 
-    TopologyValidator validator = createStrictMock(TopologyValidator.class);
-    topologyValidators.add(validator);
-
-    validator.validate((ClusterTopology) notNull());
-    expectLastCall().andThrow(new InvalidTopologyException("test"));
-
-    replayAll();
-    replay(validator);
-    // should throw exception due to validation failure
-    new ClusterTopologyImpl(null, request);
-  }
-
-  @Test
-     public void testCreate_validatorSuccess() throws Exception {
-    TestTopologyRequest request = new TestTopologyRequest(TopologyRequest.Type.PROVISION);
-
-    TopologyValidator validator = createStrictMock(TopologyValidator.class);
-    topologyValidators.add(validator);
-
-    validator.validate((ClusterTopology) notNull());
-
-    replayAll();
-    replay(validator);
-
-    new ClusterTopologyImpl(null, request);
-  }
 
   @Test(expected = InvalidTopologyException.class)
   public void testCreate_duplicateHosts() throws Exception {
@@ -204,16 +170,11 @@
   public void test_GetHostAssigmentForComponents() throws Exception {
     TestTopologyRequest request = new TestTopologyRequest(TopologyRequest.Type.PROVISION);
 
-    TopologyValidator validator = createStrictMock(TopologyValidator.class);
-    topologyValidators.add(validator);
-
-    validator.validate((ClusterTopology) notNull());
-
     replayAll();
-    replay(validator);
 
     new ClusterTopologyImpl(null, request).getHostAssignmentsForComponent("component1");
   }
+
   @Test(expected = InvalidTopologyException.class)
   public void testCreate_NNHAInvaid() throws Exception {
     bpconfiguration.setProperty("hdfs-site", "dfs.nameservices", "val");
@@ -224,6 +185,7 @@
     new ClusterTopologyImpl(null, request);
     hostGroupInfoMap.get("group4").addHost("host5");
   }
+
   @Test(expected = IllegalArgumentException.class)
   public void testCreate_NNHAHostNameNotCorrectForStandby() throws Exception {
     expect(group4.getName()).andReturn("group4");
@@ -234,6 +196,7 @@
     replayAll();
     new ClusterTopologyImpl(null, request);
   }
+
   @Test(expected = IllegalArgumentException.class)
   public void testCreate_NNHAHostNameNotCorrectForActive() throws Exception {
     expect(group4.getName()).andReturn("group4");
@@ -244,6 +207,7 @@
     replayAll();
     new ClusterTopologyImpl(null, request);
   }
+
   @Test(expected = IllegalArgumentException.class)
   public void testCreate_NNHAHostNameNotCorrectForStandbyWithActiveAsVariable() throws Exception {
     expect(group4.getName()).andReturn("group4");
@@ -292,11 +256,6 @@
     }
 
     @Override
-    public List<TopologyValidator> getTopologyValidators() {
-      return topologyValidators;
-    }
-
-    @Override
     public String getDescription() {
       return "Test Request";
     }
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/LogicalRequestTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/LogicalRequestTest.java
index 42ef020..2019082 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/LogicalRequestTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/LogicalRequestTest.java
@@ -461,4 +461,68 @@
 
     assertTrue(hostReqHost1.isPresent() && hostReqHost2.isPresent() && hostReqHost3.isPresent() && !hostReqHost4.isPresent());
   }
+
+  @Test
+  public void testRemovePendingHostRequests() throws Exception {
+    // Given
+    Long requestId = 1L;
+
+    final TopologyHostInfoEntity host1 = new TopologyHostInfoEntity();
+    host1.setId(100L);
+    host1.setFqdn("host1");
+
+    final TopologyHostInfoEntity host2 = new TopologyHostInfoEntity();
+    host2.setId(102L);
+    host2.setFqdn("host2");
+
+    TopologyHostGroupEntity hostGroupEntity1 = new TopologyHostGroupEntity();
+    hostGroupEntity1.setTopologyHostInfoEntities(ImmutableSet.of(host1, host2));
+    hostGroupEntity1.setName("host_group_1");
+
+    // host request matched to a registered host
+    TopologyHostRequestEntity hostRequestEntityHost1Matched = new TopologyHostRequestEntity();
+    hostRequestEntityHost1Matched.setId(1L);
+    hostRequestEntityHost1Matched.setHostName(host1.getFqdn()); //host request matched host1
+    hostRequestEntityHost1Matched.setTopologyHostGroupEntity(hostGroupEntity1);
+    hostRequestEntityHost1Matched.setTopologyHostTaskEntities(Collections.<TopologyHostTaskEntity>emptySet());
+    expect(ambariContext.isHostRegisteredWithCluster(eq(clusterId), eq(host1.getFqdn()))).andReturn(true).anyTimes();
+
+
+    // host request that hasn't been matched to any registered host yet
+    TopologyHostRequestEntity hostRequestEntityHost2NotMatched = new TopologyHostRequestEntity();
+    hostRequestEntityHost2NotMatched.setId(2L);
+    hostRequestEntityHost2NotMatched.setTopologyHostGroupEntity(hostGroupEntity1);
+    hostRequestEntityHost2NotMatched.setTopologyHostTaskEntities(Collections.<TopologyHostTaskEntity>emptySet());
+    expect(ambariContext.isHostRegisteredWithCluster(eq(clusterId), eq(host2.getFqdn()))).andReturn(false).anyTimes();
+
+
+    Collection<TopologyHostRequestEntity> reservedHostRequestEntities = ImmutableSet.of(
+            hostRequestEntityHost1Matched,
+            hostRequestEntityHost2NotMatched);
+
+    hostGroupEntity1.setTopologyHostRequestEntities(reservedHostRequestEntities);
+
+    TopologyRequestEntity topologyRequestEntity = new TopologyRequestEntity();
+    topologyRequestEntity.setTopologyHostGroupEntities(Collections.singleton(hostGroupEntity1));
+
+
+    expect(logicalRequestEntity.getTopologyRequestEntity()).andReturn(topologyRequestEntity).atLeastOnce();
+    expect(logicalRequestEntity.getTopologyHostRequestEntities()).andReturn(reservedHostRequestEntities).atLeastOnce();
+    expect(blueprint.getHostGroup(eq("host_group_1"))).andReturn(hostGroup1).atLeastOnce();
+    expect(hostGroup1.containsMasterComponent()).andReturn(false).atLeastOnce();
+
+    replayAll();
+
+    // When
+
+    LogicalRequest req = new LogicalRequest(requestId, replayedTopologyRequest, clusterTopology, logicalRequestEntity);
+    req.removePendingHostRequests(null);
+
+    // Then
+    verifyAll();
+
+    Collection<HostRequest>  hostRequests = req.getHostRequests();
+    assertEquals(1, hostRequests.size());
+
+  }
 }
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/RequiredPasswordValidatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/RequiredPasswordValidatorTest.java
index 4c88247..efceef3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/RequiredPasswordValidatorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/RequiredPasswordValidatorTest.java
@@ -20,10 +20,6 @@
 
 import static junit.framework.Assert.assertEquals;
 import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.reset;
-import static org.powermock.api.easymock.PowerMock.createNiceMock;
-import static org.powermock.api.easymock.PowerMock.verify;
 
 import java.util.Arrays;
 import java.util.Collection;
@@ -35,20 +31,37 @@
 import org.apache.ambari.server.controller.internal.Stack;
 import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.topology.validators.RequiredPasswordValidator;
+import org.easymock.EasyMockRule;
+import org.easymock.EasyMockSupport;
+import org.easymock.Mock;
+import org.easymock.TestSubject;
 import org.junit.After;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
 
 /**
  * Unit tests for RequiredPasswordValidator.
  */
-public class RequiredPasswordValidatorTest {
+public class RequiredPasswordValidatorTest extends EasyMockSupport {
 
-  private static final ClusterTopology topology = createNiceMock(ClusterTopology.class);
-  private static final Blueprint blueprint = createNiceMock(Blueprint.class);
-  private static final Stack stack = createNiceMock(Stack.class);
-  private static final HostGroup group1 = createNiceMock(HostGroup.class);
-  private static final HostGroup group2 = createNiceMock(HostGroup.class);
+  @Rule
+  public EasyMockRule mocks = new EasyMockRule(this);
+
+  @Mock
+  private ClusterTopology topology;
+
+  @Mock
+  private Blueprint blueprint;
+
+  @Mock
+  private Stack stack;
+
+  @Mock
+  private HostGroup group1;
+
+  @Mock
+  private HostGroup group2;
 
   private static Configuration stackDefaults;
   private static Configuration bpClusterConfig;
@@ -71,30 +84,33 @@
   private static final Collection<Stack.ConfigProperty> service2RequiredPwdConfigs = new HashSet<>();
   private static final Collection<Stack.ConfigProperty> service3RequiredPwdConfigs = new HashSet<>();
 
+  @TestSubject
+  private RequiredPasswordValidator validator = new RequiredPasswordValidator();
+
 
   @Before
   public void setup() {
 
     stackDefaults = new Configuration(new HashMap<String, Map<String, String>>(),
-        new HashMap<String, Map<String, Map<String, String>>>());
+      new HashMap<String, Map<String, Map<String, String>>>());
 
     bpClusterConfig = new Configuration(new HashMap<String, Map<String, String>>(),
-        new HashMap<String, Map<String, Map<String, String>>>(), stackDefaults);
+      new HashMap<String, Map<String, Map<String, String>>>(), stackDefaults);
 
     topoClusterConfig = new Configuration(new HashMap<String, Map<String, String>>(),
-        new HashMap<String, Map<String, Map<String, String>>>(), bpClusterConfig);
+      new HashMap<String, Map<String, Map<String, String>>>(), bpClusterConfig);
 
     bpGroup1Config = new Configuration(new HashMap<String, Map<String, String>>(),
-        new HashMap<String, Map<String, Map<String, String>>>(), topoClusterConfig);
+      new HashMap<String, Map<String, Map<String, String>>>(), topoClusterConfig);
 
     bpGroup2Config = new Configuration(new HashMap<String, Map<String, String>>(),
-        new HashMap<String, Map<String, Map<String, String>>>(), topoClusterConfig);
+      new HashMap<String, Map<String, Map<String, String>>>(), topoClusterConfig);
 
     topoGroup1Config = new Configuration(new HashMap<String, Map<String, String>>(),
-        new HashMap<String, Map<String, Map<String, String>>>(), bpGroup1Config);
+      new HashMap<String, Map<String, Map<String, String>>>(), bpGroup1Config);
 
     topoGroup2Config = new Configuration(new HashMap<String, Map<String, String>>(),
-        new HashMap<String, Map<String, Map<String, String>>>(), bpGroup2Config);
+      new HashMap<String, Map<String, Map<String, String>>>(), bpGroup2Config);
 
     service1RequiredPwdConfigs.clear();
     service2RequiredPwdConfigs.clear();
@@ -149,45 +165,57 @@
     expect(stack.getRequiredConfigurationProperties("service2", PropertyInfo.PropertyType.PASSWORD)).andReturn(service2RequiredPwdConfigs).anyTimes();
     expect(stack.getRequiredConfigurationProperties("service3", PropertyInfo.PropertyType.PASSWORD)).andReturn(service3RequiredPwdConfigs).anyTimes();
 
-    replay(topology, blueprint, stack, group1, group2);
   }
 
   @After
   public void tearDown() {
-    verify(topology, blueprint, stack, group1, group2);
-    reset(topology, blueprint, stack, group1, group2);
+    verifyAll();
+    resetAll();
   }
 
 
   @Test
   public void testValidate_noRequiredProps__noDefaultPwd() throws Exception {
-    TopologyValidator validator = new RequiredPasswordValidator(null);
+    // GIVEN
     // no required pwd properties so shouldn't throw an exception
+    expect(topology.getDefaultPassword()).andReturn(null);
+    replayAll();
+
+    // WHEN
     validator.validate(topology);
   }
 
   @Test
   public void testValidate_noRequiredProps__defaultPwd() throws Exception {
-    TopologyValidator validator = new RequiredPasswordValidator("pwd");
-    // no required pwd properties so shouldn't throw an exception
+    // GIVEN
+    expect(topology.getDefaultPassword()).andReturn("pwd");
+    replayAll();
+
+    // WHEN
     validator.validate(topology);
+
   }
 
   @Test(expected = InvalidTopologyException.class)
   public void testValidate_missingPwd__NoDefaultPwd() throws Exception {
+    expect(topology.getDefaultPassword()).andReturn(null);
+    replayAll();
+
     Stack.ConfigProperty pwdProp = new Stack.ConfigProperty("test-type", "pwdProp", null);
     service1RequiredPwdConfigs.add(pwdProp);
 
-    TopologyValidator validator = new RequiredPasswordValidator(null);
+
     validator.validate(topology);
   }
 
   @Test
   public void testValidate_missingPwd__defaultPwd() throws Exception {
+    expect(topology.getDefaultPassword()).andReturn("default-pwd");
+    replayAll();
+
     Stack.ConfigProperty pwdProp = new Stack.ConfigProperty("test-type", "pwdProp", null);
     service1RequiredPwdConfigs.add(pwdProp);
 
-    TopologyValidator validator = new RequiredPasswordValidator("default-pwd");
     // default value should be set
     validator.validate(topology);
 
@@ -197,62 +225,78 @@
 
   @Test
   public void testValidate_pwdPropertyInTopoGroupConfig__NoDefaultPwd() throws Exception {
+    expect(topology.getDefaultPassword()).andReturn(null);
+    replayAll();
+
     Stack.ConfigProperty pwdProp = new Stack.ConfigProperty("test-type", "pwdProp", null);
     service3RequiredPwdConfigs.add(pwdProp);
     // group2 has a component from service 3
     topoGroup2Config.getProperties().put("test-type", Collections.singletonMap("pwdProp", "secret"));
 
-    TopologyValidator validator = new RequiredPasswordValidator(null);
     validator.validate(topology);
   }
 
   @Test
   public void testValidate_pwdPropertyInTopoClusterConfig__NoDefaultPwd() throws Exception {
+    expect(topology.getDefaultPassword()).andReturn(null);
+    replayAll();
+
     Stack.ConfigProperty pwdProp = new Stack.ConfigProperty("test-type", "pwdProp", null);
     service3RequiredPwdConfigs.add(pwdProp);
     // group2 has a component from service 3
     topoClusterConfig.getProperties().put("test-type", Collections.singletonMap("pwdProp", "secret"));
 
-    TopologyValidator validator = new RequiredPasswordValidator(null);
     validator.validate(topology);
   }
 
   @Test
   public void testValidate_pwdPropertyInBPGroupConfig__NoDefaultPwd() throws Exception {
+    expect(topology.getDefaultPassword()).andReturn(null);
+    replayAll();
+
     Stack.ConfigProperty pwdProp = new Stack.ConfigProperty("test-type", "pwdProp", null);
     service3RequiredPwdConfigs.add(pwdProp);
     // group2 has a component from service 3
     bpGroup2Config.getProperties().put("test-type", Collections.singletonMap("pwdProp", "secret"));
 
-    TopologyValidator validator = new RequiredPasswordValidator(null);
+
     validator.validate(topology);
   }
 
   @Test
   public void testValidate_pwdPropertyInBPClusterConfig__NoDefaultPwd() throws Exception {
+    expect(topology.getDefaultPassword()).andReturn(null);
+    replayAll();
+
     Stack.ConfigProperty pwdProp = new Stack.ConfigProperty("test-type", "pwdProp", null);
     service3RequiredPwdConfigs.add(pwdProp);
     // group2 has a component from service 3
     bpClusterConfig.getProperties().put("test-type", Collections.singletonMap("pwdProp", "secret"));
 
-    TopologyValidator validator = new RequiredPasswordValidator(null);
+
     validator.validate(topology);
   }
 
   @Test(expected = InvalidTopologyException.class)
   public void testValidate_pwdPropertyInStackConfig__NoDefaultPwd() throws Exception {
+    expect(topology.getDefaultPassword()).andReturn(null);
+    replayAll();
+
     Stack.ConfigProperty pwdProp = new Stack.ConfigProperty("test-type", "pwdProp", null);
     service3RequiredPwdConfigs.add(pwdProp);
     // group2 has a component from service 3
     stackDefaults.getProperties().put("test-type", Collections.singletonMap("pwdProp", "secret"));
 
-    TopologyValidator validator = new RequiredPasswordValidator(null);
+
     // because stack config is ignored for validation, an exception should be thrown
     validator.validate(topology);
   }
 
   @Test
   public void testValidate_twoRequiredPwdOneSpecified__defaultPwd() throws Exception {
+    expect(topology.getDefaultPassword()).andReturn("default-pwd");
+    replayAll();
+
     Stack.ConfigProperty pwdProp = new Stack.ConfigProperty("test-type", "pwdProp", null);
     Stack.ConfigProperty pwdProp2 = new Stack.ConfigProperty("test2-type", "pwdProp2", null);
     service1RequiredPwdConfigs.add(pwdProp);
@@ -260,7 +304,6 @@
 
     topoClusterConfig.getProperties().put("test2-type", Collections.singletonMap("pwdProp2", "secret"));
 
-    TopologyValidator validator = new RequiredPasswordValidator("default-pwd");
     // default value should be set
     validator.validate(topology);
 
@@ -271,6 +314,9 @@
 
   @Test
   public void testValidate_twoRequiredPwdTwoSpecified__noDefaultPwd() throws Exception {
+    expect(topology.getDefaultPassword()).andReturn("default-pwd");
+    replayAll();
+
     Stack.ConfigProperty pwdProp = new Stack.ConfigProperty("test-type", "pwdProp", null);
     Stack.ConfigProperty pwdProp2 = new Stack.ConfigProperty("test2-type", "pwdProp2", null);
     service1RequiredPwdConfigs.add(pwdProp);
@@ -279,7 +325,6 @@
     topoClusterConfig.getProperties().put("test2-type", Collections.singletonMap("pwdProp2", "secret2"));
     topoClusterConfig.getProperties().put("test-type", Collections.singletonMap("pwdProp", "secret1"));
 
-    TopologyValidator validator = new RequiredPasswordValidator(null);
     // default value should be set
     validator.validate(topology);
 
@@ -290,12 +335,14 @@
 
   @Test
   public void testValidate_multipleMissingPwd__defaultPwd() throws Exception {
+    expect(topology.getDefaultPassword()).andReturn("default-pwd");
+    replayAll();
+
     Stack.ConfigProperty pwdProp = new Stack.ConfigProperty("test-type", "pwdProp", null);
     Stack.ConfigProperty pwdProp2 = new Stack.ConfigProperty("test2-type", "pwdProp2", null);
     service1RequiredPwdConfigs.add(pwdProp);
     service3RequiredPwdConfigs.add(pwdProp2);
 
-    TopologyValidator validator = new RequiredPasswordValidator("default-pwd");
     // default value should be set
     validator.validate(topology);
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java
index 10406f1..02cc64f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java
@@ -66,6 +66,7 @@
 import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.state.quicklinksprofile.QuickLinksProfile;
 import org.apache.ambari.server.topology.tasks.ConfigureClusterTaskFactory;
+import org.apache.ambari.server.topology.validators.TopologyValidatorService;
 import org.easymock.Capture;
 import org.easymock.EasyMock;
 import org.easymock.EasyMockRule;
@@ -159,6 +160,9 @@
   @Mock(type = MockType.STRICT)
   private Future mockFuture;
 
+  @Mock
+  private TopologyValidatorService topologyValidatorService;
+
   private final Configuration stackConfig = new Configuration(new HashMap<String, Map<String, String>>(),
       new HashMap<String, Map<String, Map<String, String>>>());
   private final Configuration bpConfiguration = new Configuration(new HashMap<String, Map<String, String>>(),
@@ -279,7 +283,6 @@
     expect(request.getDescription()).andReturn("Provision Cluster Test").anyTimes();
     expect(request.getConfiguration()).andReturn(topoConfiguration).anyTimes();
     expect(request.getHostGroupInfo()).andReturn(groupInfoMap).anyTimes();
-    expect(request.getTopologyValidators()).andReturn(topologyValidators).anyTimes();
 
     expect(request.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.NEVER_APPLY).anyTimes();
 
@@ -392,7 +395,7 @@
     Map<ClusterTopology, List<LogicalRequest>> allRequests = new HashMap<>();
     List<LogicalRequest> requestList = new ArrayList<>();
     requestList.add(logicalRequest);
-    expect(logicalRequest.hasCompleted()).andReturn(true).anyTimes();
+    expect(logicalRequest.hasPendingHostRequests()).andReturn(false).anyTimes();
     allRequests.put(clusterTopologyMock, requestList);
     expect(requestStatusResponse.getTasks()).andReturn(Collections.<ShortTaskStatus>emptyList()).anyTimes();
     expect(clusterTopologyMock.isClusterKerberosEnabled()).andReturn(true);
@@ -401,8 +404,8 @@
     expect(persistedState.getAllRequests()).andReturn(allRequests).anyTimes();
     expect(persistedState.getProvisionRequest(CLUSTER_ID)).andReturn(logicalRequest).anyTimes();
     expect(ambariContext.isTopologyResolved(CLUSTER_ID)).andReturn(true).anyTimes();
-    expect(group1.addComponent("KERBEROS_CLIENT")).andReturn(true);
-    expect(group2.addComponent("KERBEROS_CLIENT")).andReturn(true);
+    expect(group1.addComponent("KERBEROS_CLIENT")).andReturn(true).anyTimes();
+    expect(group2.addComponent("KERBEROS_CLIENT")).andReturn(true).anyTimes();
 
     replayAll();
 
@@ -511,7 +514,8 @@
     allRequests.put(clusterTopologyMock, logicalRequests);
     expect(persistedState.getAllRequests()).andReturn(allRequests).anyTimes();
     expect(persistedState.getProvisionRequest(CLUSTER_ID)).andReturn(logicalRequest).anyTimes();
-    expect(logicalRequest.hasCompleted()).andReturn(true).anyTimes();
+    expect(logicalRequest.hasPendingHostRequests()).andReturn(true).anyTimes();
+    expect(logicalRequest.getCompletedHostRequests()).andReturn(Collections.EMPTY_LIST).anyTimes();
     expect(requestStatusResponse.getTasks()).andReturn(tasks).anyTimes();
     replayAll();
     EasyMock.replay(clusterTopologyMock);
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/HiveServiceValidatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/HiveServiceValidatorTest.java
index 745b01b..3308333 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/HiveServiceValidatorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/HiveServiceValidatorTest.java
@@ -122,6 +122,7 @@
     Collection<String> configTypes = Arrays.asList("hive-env", "core-site", "hadoop-env");
     EasyMock.expect(clusterTopologyMock.getBlueprint()).andReturn(blueprintMock).anyTimes();
     EasyMock.expect(blueprintMock.getServices()).andReturn(blueprintServices).anyTimes();
+    EasyMock.expect(blueprintMock.getComponents("HIVE")).andReturn(Collections.<String>emptyList()).anyTimes();
     EasyMock.expect(clusterTopologyMock.getConfiguration()).andReturn(configurationMock);
     EasyMock.expect(configurationMock.getAllConfigTypes()).andReturn(configTypes);
 
@@ -140,9 +141,11 @@
   public void testShouldValidationPassWhenDefaultsAreUsedAndMsqlComponentIsListed() throws Exception {
     // GIVEN
     Collection<String> blueprintServices = Arrays.asList("HIVE", "HDFS", "MYSQL_SERVER");
+    Collection<String> hiveComponents = Arrays.asList("MYSQL_SERVER");
     Collection<String> configTypes = Arrays.asList("hive-env", "core-site", "hadoop-env");
     EasyMock.expect(clusterTopologyMock.getBlueprint()).andReturn(blueprintMock).anyTimes();
     EasyMock.expect(blueprintMock.getServices()).andReturn(blueprintServices).anyTimes();
+    EasyMock.expect(blueprintMock.getComponents("HIVE")).andReturn(hiveComponents).anyTimes();
     EasyMock.expect(clusterTopologyMock.getConfiguration()).andReturn(configurationMock);
     EasyMock.expect(configurationMock.getAllConfigTypes()).andReturn(configTypes);
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidatorTest.java
new file mode 100644
index 0000000..8ead623
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidatorTest.java
@@ -0,0 +1,302 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.topology.validators;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+
+import org.apache.ambari.server.controller.internal.Stack;
+import org.apache.ambari.server.topology.Blueprint;
+import org.apache.ambari.server.topology.ClusterTopology;
+import org.apache.ambari.server.topology.Configuration;
+import org.apache.ambari.server.topology.HostGroup;
+import org.apache.ambari.server.topology.InvalidTopologyException;
+import org.easymock.EasyMock;
+import org.easymock.EasyMockRule;
+import org.easymock.EasyMockSupport;
+import org.easymock.Mock;
+import org.easymock.TestSubject;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+public class RequiredConfigPropertiesValidatorTest extends EasyMockSupport {
+
+  @Rule
+  public EasyMockRule mocks = new EasyMockRule(this);
+
+  @Mock
+  private ClusterTopology clusterTopologyMock;
+
+  @Mock
+  private Configuration topologyConfigurationMock;
+
+  @Mock
+  private Blueprint blueprintMock;
+
+  @Mock
+  private Stack stackMock;
+
+  @Mock
+  private HostGroup slaveHostGroupMock;
+
+  @Mock
+  private HostGroup masterHostGroupMock;
+
+  @Mock
+  private Configuration slaveHostGroupConfigurationMock;
+
+  @Mock
+  private Configuration masterHostGroupConfigurationMock;
+
+
+  private Map<String, Map<String, String>> topologyConfigurationMap = new HashMap<>();
+  private Map<String, Map<String, String>> masterHostGroupConfigurationMap = new HashMap<>();
+  private Map<String, Map<String, String>> slaveHostGroupConfigurationMap = new HashMap<>();
+  private Collection<String> bpServices = new HashSet<>();
+  private Collection<String> slaveHostGroupServices = new HashSet<>();
+  private Collection<String> masterHostGroupServices = new HashSet<>();
+  private Map<String, HostGroup> hostGroups = new HashMap<>();
+  private Map<String, Collection<String>> missingProps = new HashMap<>();
+
+  @TestSubject
+  private RequiredConfigPropertiesValidator testSubject = new RequiredConfigPropertiesValidator();
+
+  /**
+   * Assembles the basic default fixture of the test:
+   * The blueprint has 2 hostgroups, both of them contains the KERBEROS service (or KERBEROS_CLIENT)
+   * The changing items are the configurations, that my come from the topology (bp + cct) or the hostg roups
+   */
+
+  @Before
+  public void setup() {
+    resetAll();
+
+    EasyMock.expect(clusterTopologyMock.getConfiguration()).andReturn(topologyConfigurationMock);
+    EasyMock.expect(topologyConfigurationMock.getFullProperties(1)).andReturn(topologyConfigurationMap);
+
+    EasyMock.expect(clusterTopologyMock.getBlueprint()).andReturn(blueprintMock).anyTimes();
+
+    EasyMock.expect(blueprintMock.getHostGroups()).andReturn(hostGroups);
+    EasyMock.expect(blueprintMock.getServices()).andReturn(bpServices);
+    EasyMock.expect(blueprintMock.getStack()).andReturn(stackMock).anyTimes();
+
+    EasyMock.expect(masterHostGroupMock.getName()).andReturn("master").anyTimes();
+    EasyMock.expect(masterHostGroupMock.getConfiguration()).andReturn(masterHostGroupConfigurationMock).anyTimes();
+    EasyMock.expect(masterHostGroupMock.getServices()).andReturn(masterHostGroupServices);
+
+
+    EasyMock.expect(slaveHostGroupMock.getName()).andReturn("slave").anyTimes();
+    EasyMock.expect(slaveHostGroupMock.getConfiguration()).andReturn(slaveHostGroupConfigurationMock).anyTimes();
+    EasyMock.expect(slaveHostGroupMock.getServices()).andReturn(slaveHostGroupServices);
+
+    // there are 2 hostgroups to be considered by the test
+    hostGroups.put("master", masterHostGroupMock);
+    hostGroups.put("slave", slaveHostGroupMock);
+
+    // services in the blueprint
+    bpServices.addAll(Arrays.asList("KERBEROS", "OOZIE"));
+
+    // host group services
+    masterHostGroupServices.addAll(Arrays.asList("KERBEROS"));
+    slaveHostGroupServices.addAll(Arrays.asList("KERBEROS"));
+
+    EasyMock.expect(masterHostGroupConfigurationMock.getProperties()).andReturn(masterHostGroupConfigurationMap);
+    EasyMock.expect(slaveHostGroupConfigurationMock.getProperties()).andReturn(slaveHostGroupConfigurationMap);
+
+    // services in the blueprint
+    bpServices.addAll(Arrays.asList("KERBEROS", "OOZIE"));
+
+    // required properties for listed services
+    EasyMock.expect(stackMock.getRequiredConfigurationProperties("KERBEROS")).
+      andReturn(Arrays.asList(
+        new Stack.ConfigProperty("kerberos-env", "realm", "value"),
+        new Stack.ConfigProperty("kerberos-env", "kdc_type", "value"), // this is missing!
+        new Stack.ConfigProperty("krb5-conf", "domains", "smthg")));
+
+    EasyMock.expect(stackMock.getRequiredConfigurationProperties("OOZIE")).andReturn(Collections.EMPTY_LIST);
+
+  }
+
+  @Test
+  public void testShouldValidationFailWhenNoHostGroupConfigurationProvidedAndRequiredConfigTypesAreMissing() throws Exception {
+
+    // GIVEN
+    // all the configuration comes from the bp, cct hg configs are empty
+    topologyConfigurationMap.put("kerberos-env", new HashMap<String, String>());
+    topologyConfigurationMap.get("kerberos-env").put("realm", "etwas");
+    topologyConfigurationMap.get("kerberos-env").put("kdc_type", "mit-kdc");
+
+    // note, that the krb-5 config type is missing! (see the required properties in the fixture!)
+    missingProps.put("slave", Arrays.asList("domains"));
+    missingProps.put("master", Arrays.asList("domains"));
+
+    replayAll();
+
+    // WHEN
+    String expectedMsg = String.format("Missing required properties.  Specify a value for these properties in the blueprint or cluster creation template configuration. %s", missingProps);
+    String actualMsg = "";
+    try {
+      testSubject.validate(clusterTopologyMock);
+    } catch (InvalidTopologyException e) {
+      actualMsg = e.getMessage();
+    }
+
+    // THEN
+    // Exception is thrown, as the krb5-conf typeeis not provided
+    Assert.assertEquals("The exception message should be the expected one", expectedMsg, actualMsg);
+  }
+
+  @Test
+  public void testShouldValidationFailWhenNoHostGroupConfigurationProvidedAndRequiredPropertiesAreMissing() throws Exception {
+    // GIVEN
+
+    // configuration from the blueprint / cluster creation template
+    topologyConfigurationMap.put("kerberos-env", new HashMap<String, String>());
+    topologyConfigurationMap.get("kerberos-env").put("realm", "etwas");
+
+    // note, that tehe kdc_type is mssing from the operational config
+
+    topologyConfigurationMap.put("krb5-conf", new HashMap<String, String>());
+    topologyConfigurationMap.get("krb5-conf").put("domains", "smthg");
+
+    missingProps.put("master", Arrays.asList("kdc_type"));
+    missingProps.put("slave", Arrays.asList("kdc_type"));
+
+    replayAll();
+
+    // WHEN
+    String expectedMsg = String.format("Missing required properties.  Specify a value for these properties in the blueprint or cluster creation template configuration. %s", missingProps);
+    String actualMsg = "";
+    try {
+      testSubject.validate(clusterTopologyMock);
+    } catch (InvalidTopologyException e) {
+      actualMsg = e.getMessage();
+    }
+
+    // THEN
+    // Exception is thrown, as the krb5-conf typee is not provideds
+    Assert.assertEquals("The exception message should be the expected one", expectedMsg, actualMsg);
+
+  }
+
+
+  @Test
+  public void testShouldValidationFailWhenHostGroupConfigurationProvidedAndRequiredConfigTypesAreMissingFromBothHostgroups() throws Exception {
+    // GIVEN
+    // configuration come in the host groups, there are missing config types in both hostgroups
+
+    missingProps.put("master", Arrays.asList("kdc_type", "domains", "realm"));
+    missingProps.put("slave", Arrays.asList("kdc_type", "domains", "realm"));
+
+    replayAll();
+
+    // WHEN
+    String expectedMsg = String.format("Missing required properties.  Specify a value for these properties in the blueprint or cluster creation template configuration. %s", missingProps);
+    String actualMsg = "";
+    try {
+      testSubject.validate(clusterTopologyMock);
+    } catch (InvalidTopologyException e) {
+      actualMsg = e.getMessage();
+    }
+
+    // THEN
+    // Exception is thrown, as the krb5-conf typee is not provideds
+    Assert.assertEquals("The exception message should be the expected one", expectedMsg, actualMsg);
+  }
+
+  @Test
+  public void testShouldValidationFailWhenHostGroupConfigurationProvidedAndRequiredConfigTypesAreMissingFromSlaveHostgroup() throws Exception {
+    // GIVEN
+    // configuration come in the host groups, there are missing config types in both hostgroups
+    masterHostGroupConfigurationMap.put("kerberos-env", new HashMap<String, String>());
+    masterHostGroupConfigurationMap.get("kerberos-env").put("realm", "etwas");
+    masterHostGroupConfigurationMap.get("kerberos-env").put("kdc_type", "mit-kdc");
+    masterHostGroupConfigurationMap.put("krb5-conf", new HashMap<String, String>());
+    masterHostGroupConfigurationMap.get("krb5-conf").put("domains", "smthg");
+
+    missingProps.put("slave", Arrays.asList("kdc_type", "domains", "realm"));
+
+    replayAll();
+
+    // WHEN
+    String expectedMsg = String.format("Missing required properties.  Specify a value for these properties in the blueprint or cluster creation template configuration. %s", missingProps);
+    String actualMsg = "";
+    try {
+      testSubject.validate(clusterTopologyMock);
+    } catch (InvalidTopologyException e) {
+      actualMsg = e.getMessage();
+    }
+
+    // THEN
+    // Exception is thrown, as the krb5-conf typee is not provideds
+    Assert.assertEquals("The exception message should be the expected one", expectedMsg, actualMsg);
+  }
+
+  @Test
+  public void testShouldValidationPassWhenAllRequiredPropertiesAreProvidedInHostGroupConfiguration() throws Exception {
+    // GIVEN
+
+    masterHostGroupConfigurationMap.put("kerberos-env", new HashMap<String, String>());
+    masterHostGroupConfigurationMap.get("kerberos-env").put("realm", "etwas");
+    masterHostGroupConfigurationMap.get("kerberos-env").put("kdc_type", "mit-kdc");
+    masterHostGroupConfigurationMap.put("krb5-conf", new HashMap<String, String>());
+    masterHostGroupConfigurationMap.get("krb5-conf").put("domains", "smthg");
+
+    slaveHostGroupConfigurationMap.put("kerberos-env", new HashMap<String, String>());
+    slaveHostGroupConfigurationMap.get("kerberos-env").put("realm", "etwas");
+    slaveHostGroupConfigurationMap.get("kerberos-env").put("kdc_type", "mit-kdc");
+    slaveHostGroupConfigurationMap.put("krb5-conf", new HashMap<String, String>());
+    slaveHostGroupConfigurationMap.get("krb5-conf").put("domains", "smthg");
+
+    replayAll();
+
+    // WHEN
+
+    testSubject.validate(clusterTopologyMock);
+
+    // THEN
+    // no exceptions thrown
+
+  }
+
+
+  @Test
+  public void testShouldValidationPassWhenAllRequiredPropertiesAreProvidedInTopologyConfiguration() throws Exception {
+    // GIVEN
+    // configuration from the blueprint / cluster creation template
+    topologyConfigurationMap.put("kerberos-env", new HashMap<String, String>());
+    topologyConfigurationMap.get("kerberos-env").put("realm", "etwas");
+    topologyConfigurationMap.get("kerberos-env").put("kdc_type", "value");
+
+    topologyConfigurationMap.put("krb5-conf", new HashMap<String, String>());
+    topologyConfigurationMap.get("krb5-conf").put("domains", "smthg");
+
+    replayAll();
+
+    // WHEN
+    testSubject.validate(clusterTopologyMock);
+
+    // THEN
+    // no exceptions thrown
+
+  }
+
+}
\ No newline at end of file
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidatorTest.java
new file mode 100644
index 0000000..4a70448
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidatorTest.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.topology.validators;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.ambari.server.controller.internal.Stack;
+import org.apache.ambari.server.topology.Blueprint;
+import org.apache.ambari.server.topology.ClusterTopology;
+import org.apache.ambari.server.topology.Configuration;
+import org.apache.ambari.server.topology.InvalidTopologyException;
+import org.easymock.EasyMock;
+import org.easymock.EasyMockRule;
+import org.easymock.EasyMockSupport;
+import org.easymock.Mock;
+import org.easymock.TestSubject;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+public class StackConfigTypeValidatorTest extends EasyMockSupport {
+
+  @Rule
+  public EasyMockRule mocks = new EasyMockRule(this);
+
+  @Mock
+  private Configuration clusterConfigurationMock;
+
+  @Mock
+  private Configuration stackConfigurationMock;
+
+  @Mock
+  private Blueprint blueprintMock;
+
+  @Mock
+  private Stack stackMock;
+
+  @Mock
+  private ClusterTopology clusterTopologyMock;
+
+  private Set<String> clusterRequestConfigTypes;
+
+  @TestSubject
+  private StackConfigTypeValidator stackConfigTypeValidator = new StackConfigTypeValidator();
+
+  @Before
+  public void before() {
+    EasyMock.expect(clusterTopologyMock.getConfiguration()).andReturn(clusterConfigurationMock).anyTimes();
+    EasyMock.expect(clusterTopologyMock.getBlueprint()).andReturn(blueprintMock).anyTimes();
+
+    EasyMock.expect(blueprintMock.getStack()).andReturn(stackMock).anyTimes();
+  }
+
+  @After
+  public void after() {
+    resetAll();
+  }
+
+
+  @Test(expected = InvalidTopologyException.class)
+  public void testShouldValidationFailWhenUnknownConfigTypeComesIn() throws Exception {
+    // GIVEN
+    EasyMock.expect(stackMock.getConfiguration()).andReturn(stackConfigurationMock);
+    EasyMock.expect(stackConfigurationMock.getAllConfigTypes()).andReturn(new HashSet<>(Arrays.asList("core-site", "yarn-site")));
+    EasyMock.expect(clusterConfigurationMock.getAllConfigTypes()).andReturn(new HashSet<>(Arrays.asList("invalid-site")));
+
+    replayAll();
+
+    // WHEN
+    stackConfigTypeValidator.validate(clusterTopologyMock);
+
+    // THEN
+    // exception is thrown
+
+  }
+
+  @Test
+  public void testShouldValidationPassifNoConfigTypesomeIn() throws Exception {
+    // GIVEN
+    EasyMock.expect(stackMock.getConfiguration()).andReturn(stackConfigurationMock);
+    EasyMock.expect(stackConfigurationMock.getAllConfigTypes()).andReturn(new HashSet<>(Arrays.asList("core-site", "yarn-site")));
+    EasyMock.expect(clusterConfigurationMock.getAllConfigTypes()).andReturn(new HashSet<>(Collections.<String>emptyList()));
+
+    replayAll();
+
+    // WHEN
+    stackConfigTypeValidator.validate(clusterTopologyMock);
+
+    // THEN
+    // no exception is thrown
+
+  }
+
+  @Test(expected = InvalidTopologyException.class)
+  public void testShouldValidationFailIfMultipleInvalidConfigTypesComeIn() throws Exception {
+    // GIVEN
+    EasyMock.expect(stackMock.getConfiguration()).andReturn(stackConfigurationMock);
+    EasyMock.expect(stackConfigurationMock.getAllConfigTypes()).andReturn(new HashSet<>(Arrays.asList("core-site", "yarn-site")));
+    EasyMock.expect(clusterConfigurationMock.getAllConfigTypes()).andReturn(new HashSet<>(Arrays.asList("invalid-site-1", "invalid-default")));
+
+    replayAll();
+
+    // WHEN
+    stackConfigTypeValidator.validate(clusterTopologyMock);
+
+    // THEN
+    // no exception is thrown
+
+  }
+}
\ No newline at end of file
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/SectionDDL.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/SectionDDL.java
index bdb8cd9..987a922 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/SectionDDL.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/SectionDDL.java
@@ -33,12 +33,12 @@
    * @param dbAccessor
    * @throws SQLException
    */
-  public void execute(DBAccessor dbAccessor) throws SQLException;
+  void execute(DBAccessor dbAccessor) throws SQLException;
 
   /**
    * Retrieve the capture groups and make assertions about tables/columns created.
    * @param dbAccessor
    * @throws SQLException
    */
-  public void verify(DBAccessor dbAccessor) throws SQLException;
+  void verify(DBAccessor dbAccessor) throws SQLException;
 }
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog212Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog212Test.java
index 3743b53..896602b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog212Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog212Test.java
@@ -52,8 +52,6 @@
 import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.dao.StackDAO;
-import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
@@ -69,6 +67,7 @@
 import org.easymock.TestSubject;
 import org.junit.After;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 
@@ -114,15 +113,11 @@
   private ResultSet resultSet;
 
   @TestSubject
-  private UpgradeCatalog212 testSubject = new UpgradeCatalog212();
+  private UpgradeCatalog212 testSubject = new UpgradeCatalog212(
+      EasyMock.createNiceMock(Injector.class));
 
-
-  private UpgradeCatalogHelper upgradeCatalogHelper;
-  private StackEntity desiredStackEntity;
-
-
-  // This method to be called only when an IOC is needed - typically by functional tests
-  public void setupIoCContext() {
+  @Before
+  public void setUp() {
     reset(entityManagerProvider);
     expect(entityManagerProvider.get()).andReturn(entityManager).anyTimes();
     replay(entityManagerProvider);
@@ -130,12 +125,8 @@
     injector = Guice.createInjector(new InMemoryDefaultTestModule());
     injector.getInstance(GuiceJpaInitializer.class);
 
-    upgradeCatalogHelper = injector.getInstance(UpgradeCatalogHelper.class);
     // inject AmbariMetaInfo to ensure that stacks get populated in the DB
     injector.getInstance(AmbariMetaInfo.class);
-    // load the stack entity
-    StackDAO stackDAO = injector.getInstance(StackDAO.class);
-    desiredStackEntity = stackDAO.find("HDP", "2.2.0");
   }
 
   @After
@@ -148,7 +139,6 @@
 
   @Test
   public void testFinilizeTopologyDDL() throws Exception {
-    setupIoCContext();
     final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
     dbAccessor.dropColumn(eq("topology_request"), eq("cluster_name"));
     dbAccessor.setColumnNullable(eq("topology_request"), eq("cluster_id"), eq(false));
@@ -173,7 +163,6 @@
 
   @Test
   public void testExecuteDDLUpdates() throws Exception {
-    setupIoCContext();
     final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
     Configuration configuration = createNiceMock(Configuration.class);
     Connection connection = createNiceMock(Connection.class);
@@ -211,7 +200,6 @@
 
   @Test
   public void testExecuteDMLUpdates() throws Exception {
-    setupIoCContext();
     Method addMissingConfigs = UpgradeCatalog212.class.getDeclaredMethod("addMissingConfigs");
     Method addNewConfigurationsFromXml = AbstractUpgradeCatalog.class.getDeclaredMethod("addNewConfigurationsFromXml");
 
@@ -235,7 +223,6 @@
 
   @Test
   public void testUpdateHBaseAdnClusterConfigs() throws Exception {
-    setupIoCContext();
     EasyMockSupport easyMockSupport = new EasyMockSupport();
     final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
     final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
@@ -302,7 +289,6 @@
 
   @Test
   public void testUpdateHBaseAdnClusterConfigsTrue() throws Exception {
-    setupIoCContext();
     EasyMockSupport easyMockSupport = new EasyMockSupport();
     final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
     final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
@@ -355,7 +341,6 @@
 
   @Test
   public void testUpdateHBaseAdnClusterConfigsNoHBaseEnv() throws Exception {
-    setupIoCContext();
     EasyMockSupport easyMockSupport = new EasyMockSupport();
     final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
     final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
@@ -397,7 +382,6 @@
 
   @Test
   public void testUpdateHBaseAdnClusterConfigsNoOverrideHBaseUID() throws Exception {
-    setupIoCContext();
     EasyMockSupport easyMockSupport = new EasyMockSupport();
     final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
     final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
@@ -415,9 +399,6 @@
     expect(mockHbaseEnv.getProperties()).andReturn(propertiesHbaseEnv).once();
     final Config mockClusterEnv = easyMockSupport.createNiceMock(Config.class);
 
-    final Map<String, String> propertiesExpectedHbaseEnv = new HashMap<String, String>() {{
-      put("hbase_user", "hbase");
-    }};
     final Map<String, String> propertiesExpectedClusterEnv = new HashMap<String, String>() {{
       put("override_uid", "false");
     }};
@@ -451,17 +432,14 @@
 
   @Test
   public void testUpdateHiveConfigs() throws Exception {
-    setupIoCContext();
     EasyMockSupport easyMockSupport = new EasyMockSupport();
     final AmbariManagementController  mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
     final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
 
     final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
     final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Config mockHiveEnv = easyMockSupport.createNiceMock(Config.class);
     final Config mockHiveSite = easyMockSupport.createNiceMock(Config.class);
 
-    final Map<String, String> propertiesExpectedHiveEnv = new HashMap<>();
     final Map<String, String> propertiesExpectedHiveSite = new HashMap<String, String>() {{
       put("hive.heapsize", "512");
       put("hive.server2.custom.authentication.class", "");
@@ -498,7 +476,6 @@
 
   @Test
   public void testUpdateOozieConfigs() throws Exception {
-    setupIoCContext();
     EasyMockSupport easyMockSupport = new EasyMockSupport();
     final AmbariManagementController  mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
     final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
@@ -539,7 +516,6 @@
 
   @Test
   public void testUpdateHiveEnvContent() throws Exception {
-    setupIoCContext();
     final Injector mockInjector = Guice.createInjector(new AbstractModule() {
       @Override
       protected void configure() {
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
index e31a428..70673f8 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
@@ -93,6 +93,7 @@
 import org.apache.ambari.server.orm.entities.WidgetEntity;
 import org.apache.ambari.server.security.authorization.ResourceType;
 import org.apache.ambari.server.security.authorization.User;
+import org.apache.ambari.server.security.authorization.UserName;
 import org.apache.ambari.server.security.authorization.Users;
 import org.apache.ambari.server.stack.StackManagerFactory;
 import org.apache.ambari.server.state.AlertFirmness;
@@ -2579,7 +2580,7 @@
     expect(requestScheduleDAO.findAll()).andReturn(Collections.singletonList(requestScheduleEntity)).once();
 
     UserEntity userEntity = new UserEntity();
-    userEntity.setUserName("createdUser");
+    userEntity.setUserName(UserName.fromString("createdUser"));
     userEntity.setUserId(1);
     userEntity.setPrincipal(new PrincipalEntity());
     User user = new User(userEntity);
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
index 5251c56..084a489 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
@@ -95,7 +95,6 @@
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Maps;
 import com.google.gson.Gson;
-
 import com.google.gson.JsonObject;
 import com.google.gson.JsonParser;
 import com.google.gson.JsonPrimitive;
@@ -386,12 +385,11 @@
     Method updateAmsConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateAMSConfigs");
     Method updateHadoopEnvConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateHadoopEnvConfigs");
     Method updateKafkaConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateKafkaConfigs");
-    Method updateHiveLlapConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateHiveLlapConfigs");
+    Method updateTablesForZeppelinViewRemoval = UpgradeCatalog250.class.getDeclaredMethod("unInstallAllZeppelinViews");
     Method updateHIVEInteractiveConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateHIVEInteractiveConfigs");
     Method addManageServiceAutoStartPermissions = UpgradeCatalog250.class.getDeclaredMethod("addManageServiceAutoStartPermissions");
     Method addManageAlertNotificationsPermissions = UpgradeCatalog250.class.getDeclaredMethod("addManageAlertNotificationsPermissions");
     Method addNewConfigurationsFromXml = AbstractUpgradeCatalog.class.getDeclaredMethod("addNewConfigurationsFromXml");
-    Method updateTablesForZeppelinViewRemoval = UpgradeCatalog250.class.getDeclaredMethod("updateTablesForZeppelinViewRemoval");
     Method updateZeppelinConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateZeppelinConfigs");
     Method updateAtlasConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateAtlasConfigs");
     Method updateLogSearchConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateLogSearchConfigs");
@@ -409,7 +407,6 @@
         .addMockedMethod(updateAmsConfigs)
         .addMockedMethod(updateHadoopEnvConfigs)
         .addMockedMethod(updateKafkaConfigs)
-        .addMockedMethod(updateHiveLlapConfigs)
         .addMockedMethod(addNewConfigurationsFromXml)
         .addMockedMethod(addManageServiceAutoStartPermissions)
         .addMockedMethod(addManageAlertNotificationsPermissions)
@@ -444,10 +441,7 @@
     upgradeCatalog250.updateHIVEInteractiveConfigs();
     expectLastCall().once();
 
-    upgradeCatalog250.updateHiveLlapConfigs();
-    expectLastCall().once();
-
-    upgradeCatalog250.updateTablesForZeppelinViewRemoval();
+    upgradeCatalog250.unInstallAllZeppelinViews();
     expectLastCall().once();
 
     upgradeCatalog250.updateZeppelinConfigs();
@@ -1704,19 +1698,6 @@
         "hive.metastore.heapsize", "512",
         "hive_ambari_database", "MySQL");
 
-    Map<String, String> oldHiveIntSite = ImmutableMap.of(
-        "hive.llap.daemon.rpc.port", "15001");
-
-    Map<String, String> expectedHiveIntSite = ImmutableMap.of(
-        "hive.llap.daemon.rpc.port", "0",
-        "hive.auto.convert.join.noconditionaltask.size", "1000000000");
-
-    Config mockHsiSite = easyMockSupport.createNiceMock(Config.class);
-    expect(cluster.getDesiredConfigByType("hive-interactive-site")).andReturn(mockHsiSite).atLeastOnce();
-    expect(mockHsiSite.getProperties()).andReturn(oldHiveIntSite).anyTimes();
-    Capture<Map<String, String>> hsiSiteCapture = EasyMock.newCapture();
-    expect(controller.createConfig(anyObject(Cluster.class), anyString(), capture(hsiSiteCapture), anyString(),
-        EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
 
     Config mockHiveEnv = easyMockSupport.createNiceMock(Config.class);
     expect(cluster.getDesiredConfigByType("hive-env")).andReturn(mockHiveEnv).atLeastOnce();
@@ -1731,13 +1712,10 @@
 
     replay(clusters, cluster);
     replay(controller, injector2);
-    replay(mockHsiEnv, mockHiveEnv, mockHsiSite);
+    replay(mockHsiEnv, mockHiveEnv);
     new UpgradeCatalog250(injector2).updateHIVEInteractiveConfigs();
     easyMockSupport.verifyAll();
 
-    Map<String, String> updatedHsiSite = hsiSiteCapture.getValue();
-    assertTrue(Maps.difference(expectedHiveIntSite, updatedHsiSite).areEqual());
-
     Map<String, String> updatedHsiEnv = hsiEnvCapture.getValue();
     assertTrue(Maps.difference(expectedHsiEnv, updatedHsiEnv).areEqual());
   }
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog251Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog251Test.java
new file mode 100644
index 0000000..fda5f0e
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog251Test.java
@@ -0,0 +1,263 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.upgrade;
+
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.createMockBuilder;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.eq;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
+import static org.easymock.EasyMock.newCapture;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.reset;
+import static org.easymock.EasyMock.verify;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.Statement;
+import java.util.Collections;
+import java.util.Map;
+
+import javax.persistence.EntityManager;
+
+import org.apache.ambari.server.actionmanager.ActionManager;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.KerberosHelper;
+import org.apache.ambari.server.controller.MaintenanceStateHelper;
+import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.SecurityType;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.stack.OsFamily;
+import org.easymock.Capture;
+import org.easymock.EasyMockRunner;
+import org.easymock.EasyMockSupport;
+import org.easymock.Mock;
+import org.easymock.MockType;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.springframework.security.crypto.password.PasswordEncoder;
+
+import com.google.gson.Gson;
+import com.google.inject.AbstractModule;
+import com.google.inject.Binder;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.Module;
+import com.google.inject.Provider;
+
+/**
+ * {@link UpgradeCatalog251} unit tests.
+ */
+@RunWith(EasyMockRunner.class)
+public class UpgradeCatalog251Test {
+
+  //  private Injector injector;
+  @Mock(type = MockType.STRICT)
+  private Provider<EntityManager> entityManagerProvider;
+
+  @Mock(type = MockType.NICE)
+  private EntityManager entityManager;
+
+  @Mock(type = MockType.NICE)
+  private DBAccessor dbAccessor;
+
+  @Mock(type = MockType.NICE)
+  private Configuration configuration;
+
+  @Mock(type = MockType.NICE)
+  private Connection connection;
+
+  @Mock(type = MockType.NICE)
+  private Statement statement;
+
+  @Mock(type = MockType.NICE)
+  private ResultSet resultSet;
+
+  @Mock(type = MockType.NICE)
+  private OsFamily osFamily;
+
+  @Mock(type = MockType.NICE)
+  private KerberosHelper kerberosHelper;
+
+  @Mock(type = MockType.NICE)
+  private ActionManager actionManager;
+
+  @Mock(type = MockType.NICE)
+  private Config config;
+
+  @Mock(type = MockType.STRICT)
+  private Service service;
+
+  @Mock(type = MockType.NICE)
+  private Clusters clusters;
+
+  @Mock(type = MockType.NICE)
+  private Cluster cluster;
+
+  @Mock(type = MockType.NICE)
+  private Injector injector;
+
+  @Before
+  public void init() {
+    reset(entityManagerProvider, injector);
+
+    expect(entityManagerProvider.get()).andReturn(entityManager).anyTimes();
+
+    expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
+    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
+    expect(injector.getInstance(KerberosHelper.class)).andReturn(kerberosHelper).anyTimes();
+
+    replay(entityManagerProvider, injector);
+  }
+
+  @After
+  public void tearDown() {
+  }
+
+  @Test
+  public void testExecuteDDLUpdates() throws Exception {
+    Capture<DBColumnInfo> hrcBackgroundColumnCapture = newCapture();
+    dbAccessor.addColumn(eq(UpgradeCatalog251.HOST_ROLE_COMMAND_TABLE), capture(hrcBackgroundColumnCapture));
+
+    expect(dbAccessor.getConnection()).andReturn(connection).anyTimes();
+    expect(connection.createStatement()).andReturn(statement).anyTimes();
+    expect(statement.executeQuery(anyObject(String.class))).andReturn(resultSet).anyTimes();
+    expect(configuration.getDatabaseType()).andReturn(Configuration.DatabaseType.POSTGRES).anyTimes();
+
+    replay(dbAccessor, configuration, connection, statement, resultSet);
+
+    Module module = new Module() {
+      @Override
+      public void configure(Binder binder) {
+        binder.bind(DBAccessor.class).toInstance(dbAccessor);
+        binder.bind(OsFamily.class).toInstance(osFamily);
+        binder.bind(EntityManager.class).toInstance(entityManager);
+        binder.bind(Configuration.class).toInstance(configuration);
+      }
+    };
+
+    Injector injector = Guice.createInjector(module);
+    UpgradeCatalog251 upgradeCatalog251 = injector.getInstance(UpgradeCatalog251.class);
+    upgradeCatalog251.executeDDLUpdates();
+
+    verify(dbAccessor);
+
+    DBColumnInfo captured = hrcBackgroundColumnCapture.getValue();
+    Assert.assertEquals(UpgradeCatalog251.HRC_IS_BACKGROUND_COLUMN, captured.getName());
+    Assert.assertEquals(Integer.valueOf(0), captured.getDefaultValue());
+    Assert.assertEquals(Short.class, captured.getType());
+  }
+
+  @Test
+  public void testExecuteDMLUpdates() throws Exception {
+    Method updateKAFKAConfigs = UpgradeCatalog251.class.getDeclaredMethod("updateKAFKAConfigs");
+    Method addNewConfigurationsFromXml = AbstractUpgradeCatalog.class.getDeclaredMethod("addNewConfigurationsFromXml");
+
+    UpgradeCatalog251 upgradeCatalog251 = createMockBuilder(UpgradeCatalog251.class)
+        .addMockedMethod(updateKAFKAConfigs)
+        .addMockedMethod(addNewConfigurationsFromXml)
+        .createMock();
+
+    upgradeCatalog251.addNewConfigurationsFromXml();
+    expectLastCall().once();
+
+    Field field = AbstractUpgradeCatalog.class.getDeclaredField("dbAccessor");
+    field.set(upgradeCatalog251, dbAccessor);
+
+    upgradeCatalog251.updateKAFKAConfigs();
+    expectLastCall().once();
+
+    replay(upgradeCatalog251, dbAccessor);
+
+    upgradeCatalog251.executeDMLUpdates();
+
+    verify(upgradeCatalog251, dbAccessor);
+  }
+
+
+  @Test
+  public void testUpdateKAFKAConfigs() throws Exception{
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
+    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
+    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
+
+    Map<String, String> initialProperties = Collections.singletonMap("listeners", "PLAINTEXT://localhost:6667,SSL://localhost:6666");
+    Map<String, String> expectedUpdates = Collections.singletonMap("listeners", "PLAINTEXTSASL://localhost:6667,SSL://localhost:6666");
+
+    final Config kafkaBroker = easyMockSupport.createNiceMock(Config.class);
+    expect(kafkaBroker.getProperties()).andReturn(initialProperties).times(1);
+    // Re-entrant test
+    expect(kafkaBroker.getProperties()).andReturn(expectedUpdates).times(1);
+
+    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
+        bind(Clusters.class).toInstance(mockClusters);
+        bind(EntityManager.class).toInstance(entityManager);
+        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
+        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+        bind(PasswordEncoder.class).toInstance(createNiceMock(PasswordEncoder.class));
+      }
+    });
+
+    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).atLeastOnce();
+    expect(mockClusters.getClusters()).andReturn(Collections.singletonMap("normal", mockClusterExpected)).atLeastOnce();
+    expect(mockClusterExpected.getDesiredConfigByType("kafka-broker")).andReturn(kafkaBroker).atLeastOnce();
+    expect(mockClusterExpected.getSecurityType()).andReturn(SecurityType.KERBEROS).atLeastOnce();
+    expect(mockClusterExpected.getServices()).andReturn(Collections.<String, Service>singletonMap("KAFKA", null)).atLeastOnce();
+
+    UpgradeCatalog251 upgradeCatalog251 = createMockBuilder(UpgradeCatalog251.class)
+        .withConstructor(Injector.class)
+        .withArgs(mockInjector)
+        .addMockedMethod("updateConfigurationProperties", String.class,
+            Map.class, boolean.class, boolean.class)
+        .createMock();
+
+
+    // upgradeCatalog251.updateConfigurationProperties is only expected to execute once since no changes are
+    // expected when the relevant data have been previously changed
+    upgradeCatalog251.updateConfigurationProperties("kafka-broker", expectedUpdates, true, false);
+    expectLastCall().once();
+
+    easyMockSupport.replayAll();
+    replay(upgradeCatalog251);
+
+    // Execute the first time... upgrading to Ambari 2.4.0
+    upgradeCatalog251.updateKAFKAConfigs();
+
+    // Test reentry... upgrading from Ambari 2.4.0
+    upgradeCatalog251.updateKAFKAConfigs();
+
+    easyMockSupport.verifyAll();
+  }
+}
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog252Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog252Test.java
new file mode 100644
index 0000000..b71b335
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog252Test.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.upgrade;
+
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.eq;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.newCapture;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.reset;
+import static org.easymock.EasyMock.verify;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.Statement;
+
+import javax.persistence.EntityManager;
+
+import org.apache.ambari.server.actionmanager.ActionManager;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.KerberosHelper;
+import org.apache.ambari.server.controller.MaintenanceStateHelper;
+import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.stack.OsFamily;
+import org.easymock.Capture;
+import org.easymock.EasyMockRunner;
+import org.easymock.Mock;
+import org.easymock.MockType;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import com.google.gson.Gson;
+import com.google.inject.Binder;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.Module;
+import com.google.inject.Provider;
+
+/**
+ * {@link org.apache.ambari.server.upgrade.UpgradeCatalog252} unit tests.
+ */
+@RunWith(EasyMockRunner.class)
+public class UpgradeCatalog252Test {
+
+  //  private Injector injector;
+  @Mock(type = MockType.STRICT)
+  private Provider<EntityManager> entityManagerProvider;
+
+  @Mock(type = MockType.NICE)
+  private EntityManager entityManager;
+
+  @Mock(type = MockType.NICE)
+  private DBAccessor dbAccessor;
+
+  @Mock(type = MockType.NICE)
+  private Configuration configuration;
+
+  @Mock(type = MockType.NICE)
+  private Connection connection;
+
+  @Mock(type = MockType.NICE)
+  private Statement statement;
+
+  @Mock(type = MockType.NICE)
+  private ResultSet resultSet;
+
+  @Mock(type = MockType.NICE)
+  private OsFamily osFamily;
+
+  @Mock(type = MockType.NICE)
+  private KerberosHelper kerberosHelper;
+
+  @Mock(type = MockType.NICE)
+  private ActionManager actionManager;
+
+  @Mock(type = MockType.NICE)
+  private Config config;
+
+  @Mock(type = MockType.STRICT)
+  private Service service;
+
+  @Mock(type = MockType.NICE)
+  private Clusters clusters;
+
+  @Mock(type = MockType.NICE)
+  private Cluster cluster;
+
+  @Mock(type = MockType.NICE)
+  private Injector injector;
+
+  @Before
+  public void init() {
+    reset(entityManagerProvider, injector);
+
+    expect(entityManagerProvider.get()).andReturn(entityManager).anyTimes();
+
+    expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
+    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
+    expect(injector.getInstance(KerberosHelper.class)).andReturn(kerberosHelper).anyTimes();
+
+    replay(entityManagerProvider, injector);
+  }
+
+  @After
+  public void tearDown() {
+  }
+
+  @Test
+  public void testExecuteDDLUpdates() throws Exception {
+    Capture<DBColumnInfo> hrcBackgroundColumnCapture = newCapture();
+    dbAccessor.addColumn(eq(UpgradeCatalog252.CLUSTERCONFIG_TABLE), capture(hrcBackgroundColumnCapture));
+
+    expect(dbAccessor.getConnection()).andReturn(connection).anyTimes();
+    expect(connection.createStatement()).andReturn(statement).anyTimes();
+    expect(statement.executeQuery(anyObject(String.class))).andReturn(resultSet).anyTimes();
+    expect(configuration.getDatabaseType()).andReturn(Configuration.DatabaseType.POSTGRES).anyTimes();
+
+    replay(dbAccessor, configuration, connection, statement, resultSet);
+
+    Module module = new Module() {
+      @Override
+      public void configure(Binder binder) {
+        binder.bind(DBAccessor.class).toInstance(dbAccessor);
+        binder.bind(OsFamily.class).toInstance(osFamily);
+        binder.bind(EntityManager.class).toInstance(entityManager);
+        binder.bind(Configuration.class).toInstance(configuration);
+      }
+    };
+
+    Injector injector = Guice.createInjector(module);
+    UpgradeCatalog252 upgradeCatalog252 = injector.getInstance(UpgradeCatalog252.class);
+    upgradeCatalog252.executeDDLUpdates();
+
+    verify(dbAccessor);
+
+    DBColumnInfo captured = hrcBackgroundColumnCapture.getValue();
+    Assert.assertEquals(UpgradeCatalog252.SERVICE_DELETED_COLUMN, captured.getName());
+    Assert.assertEquals(0, captured.getDefaultValue());
+    Assert.assertEquals(Short.class, captured.getType());
+  }
+}
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
index a44c2b3..c949ca2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
@@ -17,32 +17,50 @@
  */
 package org.apache.ambari.server.upgrade;
 
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.anyString;
 import static org.easymock.EasyMock.capture;
 import static org.easymock.EasyMock.createMockBuilder;
 import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
 import static org.easymock.EasyMock.newCapture;
 import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.reset;
 import static org.easymock.EasyMock.verify;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import java.lang.reflect.Method;
 import java.sql.Connection;
 import java.sql.ResultSet;
 import java.sql.Statement;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
 
 import javax.persistence.Cache;
 import javax.persistence.EntityManager;
 import javax.persistence.EntityManagerFactory;
 import javax.persistence.EntityTransaction;
 
+import org.apache.ambari.server.actionmanager.ActionManager;
 import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.AmbariManagementControllerImpl;
 import org.apache.ambari.server.controller.MaintenanceStateHelper;
 import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.stack.OsFamily;
 import org.easymock.Capture;
+import org.easymock.CaptureType;
 import org.easymock.EasyMock;
 import org.easymock.EasyMockRunner;
+import org.easymock.EasyMockSupport;
 import org.easymock.Mock;
 import org.easymock.MockType;
 import org.junit.After;
@@ -51,6 +69,8 @@
 import org.junit.Test;
 import org.junit.runner.RunWith;
 
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
 import com.google.gson.Gson;
 import com.google.inject.Binder;
 import com.google.inject.Guice;
@@ -79,6 +99,18 @@
   @Mock(type = MockType.NICE)
   private Configuration configuration;
 
+  @Mock(type = MockType.NICE)
+  private Config config;
+
+  @Mock(type = MockType.NICE)
+  private ActionManager actionManager;
+
+  @Mock(type = MockType.NICE)
+  private Clusters clusters;
+
+  @Mock(type = MockType.NICE)
+  private Cluster cluster;
+
   @Before
   public void init() {
     reset(entityManagerProvider, injector);
@@ -100,11 +132,13 @@
     Method addNewConfigurationsFromXml = AbstractUpgradeCatalog.class.getDeclaredMethod("addNewConfigurationsFromXml");
     Method showHcatDeletedUserMessage = UpgradeCatalog300.class.getDeclaredMethod("showHcatDeletedUserMessage");
     Method setStatusOfStagesAndRequests = UpgradeCatalog300.class.getDeclaredMethod("setStatusOfStagesAndRequests");
+    Method updateLogSearchConfigs = UpgradeCatalog300.class.getDeclaredMethod("updateLogSearchConfigs");
 
    UpgradeCatalog300 upgradeCatalog300 = createMockBuilder(UpgradeCatalog300.class)
             .addMockedMethod(showHcatDeletedUserMessage)
             .addMockedMethod(addNewConfigurationsFromXml)
             .addMockedMethod(setStatusOfStagesAndRequests)
+            .addMockedMethod(updateLogSearchConfigs)
             .createMock();
 
 
@@ -112,6 +146,8 @@
     upgradeCatalog300.showHcatDeletedUserMessage();
     upgradeCatalog300.setStatusOfStagesAndRequests();
 
+    upgradeCatalog300.updateLogSearchConfigs();
+    expectLastCall().once();
 
     replay(upgradeCatalog300);
 
@@ -134,8 +170,15 @@
 
     Capture<DBAccessor.DBColumnInfo> clusterConfigSelectedColumn = newCapture();
     Capture<DBAccessor.DBColumnInfo> clusterConfigSelectedTimestampColumn = newCapture();
+    Capture<DBAccessor.DBColumnInfo> hrcOpsDisplayNameColumn = newCapture();
+
     dbAccessor.addColumn(eq(UpgradeCatalog300.CLUSTER_CONFIG_TABLE), capture(clusterConfigSelectedColumn));
     dbAccessor.addColumn(eq(UpgradeCatalog300.CLUSTER_CONFIG_TABLE), capture(clusterConfigSelectedTimestampColumn));
+    dbAccessor.addColumn(eq(UpgradeCatalog300.HOST_ROLE_COMMAND_TABLE), capture(hrcOpsDisplayNameColumn));
+
+    // component table
+    Capture<DBAccessor.DBColumnInfo> componentStateColumn = newCapture();
+    dbAccessor.addColumn(eq(UpgradeCatalog250.COMPONENT_TABLE), capture(componentStateColumn));
 
     replay(dbAccessor, configuration);
 
@@ -153,6 +196,17 @@
     Assert.assertEquals(UpgradeCatalog300.CLUSTER_CONFIG_SELECTED_TIMESTAMP_COLUMN, capturedSelectedTimestampColumn.getName());
     Assert.assertEquals(Long.class, capturedSelectedTimestampColumn.getType());
 
+    // component table
+    DBAccessor.DBColumnInfo capturedStateColumn = componentStateColumn.getValue();
+    Assert.assertNotNull(componentStateColumn);
+    Assert.assertEquals("repo_state", capturedStateColumn.getName());
+    Assert.assertEquals(String.class, capturedStateColumn.getType());
+
+    DBAccessor.DBColumnInfo capturedOpsDisplayNameColumn = hrcOpsDisplayNameColumn.getValue();
+    Assert.assertEquals(UpgradeCatalog300.HRC_OPS_DISPLAY_NAME_COLUMN, capturedOpsDisplayNameColumn.getName());
+    Assert.assertEquals(null, capturedOpsDisplayNameColumn.getDefaultValue());
+    Assert.assertEquals(String.class, capturedOpsDisplayNameColumn.getType());
+
     verify(dbAccessor);
   }
 
@@ -204,4 +258,89 @@
 
     verify(dbAccessor, entityManager, emFactory, emCache);
   }
+  
+  @Test
+  public void testLogSearchUpdateConfigs() throws Exception {
+    reset(clusters, cluster);
+    expect(clusters.getClusters()).andReturn(ImmutableMap.of("normal", cluster)).once();
+
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+
+    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
+    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
+        .addMockedMethod("createConfiguration")
+        .addMockedMethod("getClusters", new Class[]{})
+        .addMockedMethod("createConfig")
+        .withConstructor(actionManager, clusters, injector)
+        .createNiceMock();
+
+    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
+    expect(controller.getClusters()).andReturn(clusters).anyTimes();
+
+    Config confSomethingElse1 = easyMockSupport.createNiceMock(Config.class);
+    expect(confSomethingElse1.getType()).andReturn("something-else-1");
+    Config confSomethingElse2 = easyMockSupport.createNiceMock(Config.class);
+    expect(confSomethingElse2.getType()).andReturn("something-else-2");
+    Config confLogSearchConf1 = easyMockSupport.createNiceMock(Config.class);
+    expect(confLogSearchConf1.getType()).andReturn("service-1-logsearch-conf");
+    Config confLogSearchConf2 = easyMockSupport.createNiceMock(Config.class);
+    expect(confLogSearchConf2.getType()).andReturn("service-2-logsearch-conf");
+    
+    Map<String, String> oldLogSearchConf = ImmutableMap.of(
+        "service_name", "Service",
+        "component_mappings", "Component Mappings",
+        "content", "Content");
+
+    Collection<Config> configs = Arrays.asList(confSomethingElse1, confLogSearchConf1, confSomethingElse2, confLogSearchConf2);
+    
+    expect(cluster.getAllConfigs()).andReturn(configs).atLeastOnce();
+    expect(cluster.getDesiredConfigByType("service-1-logsearch-conf")).andReturn(confLogSearchConf1).once();
+    expect(cluster.getDesiredConfigByType("service-2-logsearch-conf")).andReturn(confLogSearchConf2).once();
+    expect(confLogSearchConf1.getProperties()).andReturn(oldLogSearchConf).once();
+    expect(confLogSearchConf2.getProperties()).andReturn(oldLogSearchConf).once();
+    Capture<Map<String, String>> logSearchConfCapture = EasyMock.newCapture(CaptureType.ALL);
+    expect(controller.createConfig(anyObject(Cluster.class), anyString(), capture(logSearchConfCapture), anyString(),
+        EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).times(2);
+
+    Map<String, String> oldLogSearchProperties = ImmutableMap.of(
+        "logsearch.logfeeder.include.default.level", "FATAL,ERROR,WARN"
+    );
+
+    Map<String, String> expectedLogFeederProperties = ImmutableMap.of(
+        "logfeeder.include.default.level", "FATAL,ERROR,WARN"
+    );
+
+    Config logFeederPropertiesConf = easyMockSupport.createNiceMock(Config.class);
+    expect(cluster.getDesiredConfigByType("logfeeder-properties")).andReturn(logFeederPropertiesConf).times(2);
+    expect(logFeederPropertiesConf.getProperties()).andReturn(Collections.<String, String> emptyMap()).once();
+    Capture<Map<String, String>> logFeederPropertiesCapture = EasyMock.newCapture();
+    expect(controller.createConfig(anyObject(Cluster.class), eq("logfeeder-properties"), capture(logFeederPropertiesCapture),
+        anyString(), EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
+
+    Config logSearchPropertiesConf = easyMockSupport.createNiceMock(Config.class);
+    expect(cluster.getDesiredConfigByType("logsearch-properties")).andReturn(logSearchPropertiesConf).times(2);
+    expect(logSearchPropertiesConf.getProperties()).andReturn(oldLogSearchProperties).times(2);
+    Capture<Map<String, String>> logSearchPropertiesCapture = EasyMock.newCapture();
+    expect(controller.createConfig(anyObject(Cluster.class), eq("logsearch-properties"), capture(logSearchPropertiesCapture),
+        anyString(), EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
+
+    replay(clusters, cluster);
+    replay(controller, injector2);
+    replay(confSomethingElse1, confSomethingElse2, confLogSearchConf1, confLogSearchConf2);
+    replay(logSearchPropertiesConf, logFeederPropertiesConf);
+    new UpgradeCatalog300(injector2).updateLogSearchConfigs();
+    easyMockSupport.verifyAll();
+
+    List<Map<String, String>> updatedLogSearchConfs = logSearchConfCapture.getValues();
+    assertEquals(updatedLogSearchConfs.size(), 2);
+    for (Map<String, String> updatedLogSearchConf : updatedLogSearchConfs) {
+      assertTrue(Maps.difference(Collections.<String, String> emptyMap(), updatedLogSearchConf).areEqual());
+    }
+    
+    Map<String,String> newLogFeederProperties = logFeederPropertiesCapture.getValue();
+    assertTrue(Maps.difference(expectedLogFeederProperties, newLogFeederProperties).areEqual());
+    
+    Map<String,String> newLogSearchProperties = logSearchPropertiesCapture.getValue();
+    assertTrue(Maps.difference(Collections.<String, String> emptyMap(), newLogSearchProperties).areEqual());
+  }
 }
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java b/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
index e9bd27c..5b39086 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
@@ -194,7 +194,7 @@
       JsonMappingException, JAXBException, IOException {
     StageUtils stageUtils = new StageUtils(injector.getInstance(StageFactory.class));
     Stage s = StageUtils.getATestStage(1, 2, "host1", "clusterHostInfo", "hostParamsStage");
-    ExecutionCommand cmd = s.getExecutionCommands(getHostName()).get(0).getExecutionCommand();
+    ExecutionCommand cmd = s.getExecutionCommands("host1").get(0).getExecutionCommand();
     HashMap<String, Map<String, String>> configTags = new HashMap<>();
     Map<String, String> globalTag = new HashMap<>();
     globalTag.put("tag", "version1");
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/view/ViewInstanceOperationHandlerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/view/ViewInstanceOperationHandlerTest.java
new file mode 100644
index 0000000..147d7f7
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/view/ViewInstanceOperationHandlerTest.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.view;
+
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.verify;
+
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.ambari.server.orm.dao.PrivilegeDAO;
+import org.apache.ambari.server.orm.dao.ViewDAO;
+import org.apache.ambari.server.orm.dao.ViewInstanceDAO;
+import org.apache.ambari.server.orm.entities.PrincipalEntity;
+import org.apache.ambari.server.orm.entities.PrivilegeEntity;
+import org.apache.ambari.server.orm.entities.ResourceEntity;
+import org.apache.ambari.server.orm.entities.ViewEntity;
+import org.apache.ambari.server.orm.entities.ViewInstanceEntity;
+import org.easymock.EasyMock;
+import org.junit.Test;
+
+public class ViewInstanceOperationHandlerTest {
+  @Test
+  public void uninstallViewInstance() throws Exception {
+    ViewInstanceOperationHandler viewInstanceOperationHandler = getViewInstanceOperationHandler();
+
+    ResourceEntity resourceEntity = new ResourceEntity();
+    resourceEntity.setId(3L);
+    ViewInstanceEntity instanceEntity = new ViewInstanceEntity();
+    instanceEntity.setName("VIEW_INSTANCE_NAME");
+    instanceEntity.setViewName("VIEW_NAME");
+    instanceEntity.setResource(resourceEntity);
+
+    ViewDAO viewDAO = viewInstanceOperationHandler.viewDAO;
+    ViewEntity viewEntity = EasyMock.createNiceMock(ViewEntity.class);
+    expect(viewDAO.findByName(instanceEntity.getViewName())).andReturn(viewEntity);
+    expect(viewEntity.getCommonName()).andReturn("view-common-name");
+    expect(viewEntity.getVersion()).andReturn("0.0.1");
+
+    ViewInstanceDAO viewInstanceDAO = viewInstanceOperationHandler.instanceDAO;
+    ViewInstanceEntity viewInstanceEntity = createNiceMock(ViewInstanceEntity.class);
+    expect(viewInstanceDAO.findByName(instanceEntity.getViewName(), instanceEntity.getName())).andReturn(viewInstanceEntity);
+    expect(viewInstanceEntity.isXmlDriven()).andReturn(false);
+    expect(viewInstanceEntity.isXmlDriven()).andReturn(false);
+
+    PrivilegeEntity privilege1 = createNiceMock(PrivilegeEntity.class);
+    PrivilegeEntity privilege2 = createNiceMock(PrivilegeEntity.class);
+    List<PrivilegeEntity> privileges = Arrays.asList(privilege1, privilege2);
+
+    PrivilegeDAO privilegeDAO = viewInstanceOperationHandler.privilegeDAO;
+
+    PrincipalEntity principalEntity = createNiceMock(PrincipalEntity.class);
+
+    expect(privilege1.getPrincipal()).andReturn(principalEntity);
+    expect(privilege2.getPrincipal()).andReturn(principalEntity);
+
+    principalEntity.removePrivilege(privilege1);
+    principalEntity.removePrivilege(privilege2);
+
+    expect(privilegeDAO.findByResourceId(3L)).andReturn(privileges);
+
+    privilegeDAO.remove(privilege1);
+    privilegeDAO.remove(privilege2);
+
+    viewInstanceDAO.remove(viewInstanceEntity);
+
+    replay(privilegeDAO, viewDAO, viewInstanceDAO, principalEntity, privilege1, privilege2,viewInstanceEntity, viewEntity);
+
+    viewInstanceOperationHandler.uninstallViewInstance(instanceEntity);
+
+    verify(privilegeDAO, viewDAO, viewInstanceDAO);
+  }
+
+  private ViewInstanceOperationHandler getViewInstanceOperationHandler() {
+    ViewDAO viewDAO = EasyMock.createNiceMock(ViewDAO.class);
+    ViewInstanceDAO instanceDAO = EasyMock.createNiceMock(ViewInstanceDAO.class);
+    PrivilegeDAO privilegeDAO = EasyMock.createNiceMock(PrivilegeDAO.class);
+
+    ViewInstanceOperationHandler instance = new ViewInstanceOperationHandler();
+    instance.viewDAO = viewDAO;
+    instance.instanceDAO = instanceDAO;
+    instance.privilegeDAO = privilegeDAO;
+
+    return instance;
+  }
+
+}
\ No newline at end of file
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/view/ViewRegistryTest.java b/ambari-server/src/test/java/org/apache/ambari/server/view/ViewRegistryTest.java
index daabcb3..eda232b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/view/ViewRegistryTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/view/ViewRegistryTest.java
@@ -37,7 +37,6 @@
 import java.net.MalformedURLException;
 import java.net.URI;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
@@ -212,6 +211,7 @@
   // registry mocks
   private static final ViewDAO viewDAO = createMock(ViewDAO.class);
   private static final ViewInstanceDAO viewInstanceDAO = createNiceMock(ViewInstanceDAO.class);
+  private static final ViewInstanceOperationHandler viewInstanceOperationHandler = createNiceMock(ViewInstanceOperationHandler.class);
   private static final UserDAO userDAO = createNiceMock(UserDAO.class);
   private static final MemberDAO memberDAO = createNiceMock(MemberDAO.class);
   private static final PrivilegeDAO privilegeDAO = createNiceMock(PrivilegeDAO.class);
@@ -227,10 +227,10 @@
 
   @Before
   public void resetGlobalMocks() {
-    ViewRegistry.initInstance(getRegistry(viewDAO, viewInstanceDAO, userDAO, memberDAO, privilegeDAO,
+    ViewRegistry.initInstance(getRegistry(viewInstanceOperationHandler, viewDAO, viewInstanceDAO, userDAO, memberDAO, privilegeDAO,
         permissionDAO, resourceDAO, resourceTypeDAO, securityHelper, handlerList, null, null, ambariMetaInfo, clusters));
 
-    reset(viewDAO, resourceDAO, viewInstanceDAO, userDAO, memberDAO,
+    reset(viewInstanceOperationHandler, viewDAO, resourceDAO, viewInstanceDAO, userDAO, memberDAO,
         privilegeDAO, resourceTypeDAO, securityHelper, configuration, handlerList, ambariMetaInfo,
         clusters);
   }
@@ -448,7 +448,7 @@
     TestViewArchiveUtility archiveUtility =
         new TestViewArchiveUtility(viewConfigs, files, outputStreams, jarFiles, badArchive);
 
-    ViewRegistry registry = getRegistry(viewDAO, viewInstanceDAO, userDAO, memberDAO, privilegeDAO, permissionDAO,
+    ViewRegistry registry = getRegistry(viewInstanceOperationHandler, viewDAO, viewInstanceDAO, userDAO, memberDAO, privilegeDAO, permissionDAO,
         resourceDAO, resourceTypeDAO, securityHelper, handlerList, null, archiveUtility, ambariMetaInfo, clusters);
 
     registry.readViewArchives();
@@ -1122,29 +1122,10 @@
     ViewConfig config = ViewConfigTest.getConfig(XML_VALID_INSTANCE);
     ViewEntity viewEntity = getViewEntity(config, ambariConfig, getClass().getClassLoader(), "");
     ViewInstanceEntity viewInstanceEntity = getViewInstanceEntity(viewEntity, config.getInstances().get(0));
-    ResourceEntity resource = new ResourceEntity();
-    resource.setId(3L);
-    viewInstanceEntity.setResource(resource);
-    PrivilegeEntity privilege1 = createNiceMock(PrivilegeEntity.class);
-    PrivilegeEntity privilege2 = createNiceMock(PrivilegeEntity.class);
-    List<PrivilegeEntity> privileges = Arrays.asList(privilege1, privilege2);
 
-    PrincipalEntity principalEntity = createNiceMock(PrincipalEntity.class);
-
-    expect(privilege1.getPrincipal()).andReturn(principalEntity);
-    expect(privilege2.getPrincipal()).andReturn(principalEntity);
-
-    principalEntity.removePrivilege(privilege1);
-    principalEntity.removePrivilege(privilege2);
-
-    expect(privilegeDAO.findByResourceId(3L)).andReturn(privileges);
-    privilegeDAO.remove(privilege1);
-    privilegeDAO.remove(privilege2);
-    viewInstanceDAO.remove(viewInstanceEntity);
-
+    viewInstanceOperationHandler.uninstallViewInstance(viewInstanceEntity);
     handlerList.removeViewInstance(viewInstanceEntity);
-
-    replay(viewInstanceDAO, privilegeDAO, handlerList, privilege1, privilege2, principalEntity);
+    replay(viewInstanceOperationHandler,/* viewInstanceDAO, privilegeDAO,*/ handlerList/*, privilege1, privilege2, principalEntity*/);
 
     registry.addDefinition(viewEntity);
     registry.addInstanceDefinition(viewEntity, viewInstanceEntity);
@@ -1154,7 +1135,7 @@
 
     Assert.assertEquals(0, viewInstanceDefinitions.size());
 
-    verify(viewInstanceDAO, privilegeDAO, handlerList, privilege1, privilege2, principalEntity);
+    verify(viewInstanceOperationHandler, /*viewInstanceDAO, privilegeDAO,*/ handlerList/*, privilege1, privilege2, principalEntity*/);
   }
 
   @Test
@@ -1809,12 +1790,12 @@
                                          ViewExtractor viewExtractor,
                                          ViewArchiveUtility archiveUtility,
                                          AmbariMetaInfo ambariMetaInfo) {
-    return getRegistry(viewDAO, viewInstanceDAO, userDAO, memberDAO, privilegeDAO, permissionDAO,
+    return getRegistry(null, viewDAO, viewInstanceDAO, userDAO, memberDAO, privilegeDAO, permissionDAO,
         resourceDAO, resourceTypeDAO, securityHelper, handlerList, viewExtractor, archiveUtility,
         ambariMetaInfo, null);
   }
 
-  public static ViewRegistry getRegistry(ViewDAO viewDAO, ViewInstanceDAO viewInstanceDAO,
+  public static ViewRegistry getRegistry(ViewInstanceOperationHandler viewInstanceOperationHandler, ViewDAO viewDAO, ViewInstanceDAO viewInstanceDAO,
                                          UserDAO userDAO, MemberDAO memberDAO,
                                          PrivilegeDAO privilegeDAO, PermissionDAO permissionDAO,
                                          ResourceDAO resourceDAO, ResourceTypeDAO resourceTypeDAO,
@@ -1833,6 +1814,7 @@
 
     ViewRegistry instance = new ViewRegistry(publisher);
 
+    instance.viewInstanceOperationHandler = viewInstanceOperationHandler;
     instance.viewDAO = viewDAO;
     instance.resourceDAO = resourceDAO;
     instance.instanceDAO = viewInstanceDAO;
diff --git a/ambari-server/src/test/python/TestAmbariServer.py b/ambari-server/src/test/python/TestAmbariServer.py
index 9579c22..66b5ac5 100644
--- a/ambari-server/src/test/python/TestAmbariServer.py
+++ b/ambari-server/src/test/python/TestAmbariServer.py
@@ -85,7 +85,7 @@
                   print_info_msg, print_warning_msg, print_error_msg
                 from ambari_commons.os_utils import run_os_command, search_file, set_file_permissions, remove_file, copy_file, \
                   is_valid_filepath
-                from ambari_server.dbConfiguration import DBMSConfigFactory, check_jdbc_drivers
+                from ambari_server.dbConfiguration import DBMSConfigFactory, check_jdbc_drivers, DBMSConfig
                 from ambari_server.dbConfiguration_linux import PGConfig, LinuxDBMSConfig, OracleConfig
                 from ambari_server.properties import Properties
                 from ambari_server.resourceFilesKeeper import ResourceFilesKeeper, KeeperException
@@ -302,30 +302,27 @@
 
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
   @patch.object(_ambari_server_, "setup_security")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
   def test_main_test_setup_security(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock,
-                                    logger_mock, OptionParserMock,
-                                    setup_security_method):
-    opm = OptionParserMock.return_value
-    options = MagicMock()
-    args = ["setup-security"]
-    opm.parse_args.return_value = (options, args)
-    options.dbms = None
-    options.security_option = "setup-security"
-    options.sid_or_sname = "sid"
-    setup_security_method.return_value = None
+                                    logger_mock, setup_security_method):
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ['ambari-server', 'setup-security', '--security-option=setup-security']
+      setup_security_method.return_value = None
 
-    _ambari_server_.mainBody()
+      _ambari_server_.mainBody()
 
-    _ambari_server_.mainBody()
-    self.assertTrue(setup_security_method.called)
-    self.assertFalse(False, get_verbose())
-    self.assertFalse(False, get_silent())
-    pass
+      _ambari_server_.mainBody()
+      self.assertTrue(setup_security_method.called)
+      self.assertFalse(False, get_verbose())
+      self.assertFalse(False, get_silent())
+    finally:
+      sys.argv = tmp_argv
+  pass
 
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
   @patch.object(_ambari_server_, "setup_ambari_krb5_jaas")
@@ -404,148 +401,147 @@
   @patch.object(_ambari_server_, "start")
   @patch.object(_ambari_server_, "stop")
   @patch.object(_ambari_server_, "reset")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
   def test_main_test_setup(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock,
-                           logger_mock, OptionParserMock, reset_method, stop_method,
+                           logger_mock, reset_method, stop_method,
                            start_method, setup_method, exit_mock):
-    opm = OptionParserMock.return_value
-    options = self._create_empty_options_mock()
-    args = ["setup"]
-    opm.parse_args.return_value = (options, args)
-
-    options.dbms = None
-    options.sid_or_sname = "sid"
-    _ambari_server_.mainBody()
-
-    self.assertTrue(setup_method.called)
-    self.assertFalse(start_method.called)
-    self.assertFalse(stop_method.called)
-    self.assertFalse(reset_method.called)
-
-    self.assertFalse(False, get_verbose())
-    self.assertFalse(False, get_silent())
-
-    setup_method.reset_mock()
-    start_method.reset_mock()
-    stop_method.reset_mock()
-    reset_method.reset_mock()
-    exit_mock.reset_mock()
-    args = ["setup", "-v"]
-    options = self._create_empty_options_mock()
-    opm.parse_args.return_value = (options, args)
-    options.dbms = None
-    options.sid_or_sname = "sid"
-    setup_method.side_effect = Exception("Unexpected error")
+    import sys
+    tmp_argv = sys.argv
     try:
+      sys.argv = ["ambari-server", "setup"]
+
       _ambari_server_.mainBody()
-    except Exception:
-      self.assertTrue(True)
-    self.assertTrue(setup_method.called)
-    self.assertFalse(start_method.called)
-    self.assertFalse(stop_method.called)
-    self.assertFalse(reset_method.called)
-    self.assertTrue(get_verbose())
 
-    setup_method.reset_mock()
-    start_method.reset_mock()
-    stop_method.reset_mock()
-    reset_method.reset_mock()
-    exit_mock.reset_mock()
-    args = ["setup"]
-    options = self._create_empty_options_mock()
-    opm.parse_args.return_value = (options, args)
-    options.dbms = None
-    options.sid_or_sname = "sid"
-    options.verbose = False
-    setup_method.side_effect = Exception("Unexpected error")
-    _ambari_server_.mainBody()
-    self.assertTrue(exit_mock.called)
-    self.assertTrue(setup_method.called)
-    self.assertFalse(start_method.called)
-    self.assertFalse(stop_method.called)
-    self.assertFalse(reset_method.called)
-    self.assertFalse(get_verbose())
+      self.assertTrue(setup_method.called)
+      self.assertFalse(start_method.called)
+      self.assertFalse(stop_method.called)
+      self.assertFalse(reset_method.called)
 
-    pass
+      self.assertFalse(False, get_verbose())
+      self.assertFalse(False, get_silent())
+
+      setup_method.reset_mock()
+      start_method.reset_mock()
+      stop_method.reset_mock()
+      reset_method.reset_mock()
+      exit_mock.reset_mock()
+      sys.argv = ["ambari-server", "setup", "-v"]
+      setup_method.side_effect = Exception("Unexpected error")
+      try:
+        _ambari_server_.mainBody()
+      except Exception:
+        self.assertTrue(True)
+      self.assertTrue(setup_method.called)
+      self.assertFalse(start_method.called)
+      self.assertFalse(stop_method.called)
+      self.assertFalse(reset_method.called)
+      self.assertTrue(get_verbose())
+
+      setup_method.reset_mock()
+      start_method.reset_mock()
+      stop_method.reset_mock()
+      reset_method.reset_mock()
+      exit_mock.reset_mock()
+      sys.argv = ["ambari-server", "setup"]
+      setup_method.side_effect = Exception("Unexpected error")
+      _ambari_server_.mainBody()
+      self.assertTrue(exit_mock.called)
+      self.assertTrue(setup_method.called)
+      self.assertFalse(start_method.called)
+      self.assertFalse(stop_method.called)
+      self.assertFalse(reset_method.called)
+      self.assertFalse(get_verbose())
+
+      pass
+    finally:
+      sys.argv = tmp_argv
 
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
-  @patch.object(_ambari_server_, "setup")
-  @patch("optparse.OptionParser")
+  @patch.object(PGConfig, "_setup_local_server")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
-  def test_main_with_preset_dbms(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock,
-                                 logger_mock, optionParserMock, setup_method):
-    opm = optionParserMock.return_value
-    options = self._create_empty_options_mock()
-    args = ["setup"]
-    opm.parse_args.return_value = (options, args)
+  @patch("ambari_server.serverSetup.check_ambari_user")
+  @patch('ambari_server.serverSetup.download_and_install_jdk')
+  @patch("ambari_server.serverSetup.configure_os_settings")
+  @patch.object(DBMSConfig, "setup_database")
+  @patch("ambari_server.serverSetup.check_jdbc_drivers")
+  @patch("ambari_server.serverSetup.extract_views")
+  @patch("ambari_server.serverSetup.adjust_directory_permissions")
+  @patch("ambari_server.serverSetup.service_setup")
+  def test_main_with_preset_dbms(self, service_setup_mock, adjust_directory_permissions_mock, extract_views_mock, check_jdbc_drivers_mock, setup_database_mock, configure_os_settings_mock, download_and_install_jdk_mock, check_ambari_user_mock, init_logging_mock, setup_logging_mock, get_ambari_properties_mock,
+                                 logger_mock, setup_local_db_method):
+    extract_views_mock.return_value = 0
+    check_ambari_user_mock.return_value = (0, False, 'user', None)
+    configure_os_settings_mock.return_value = 0
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ["ambari-server", "setup", "-s"]
 
-    options.dbms = "sqlanywhere"
-    options.sid_or_sname = "sname"
-    _ambari_server_.mainBody()
+      _ambari_server_.mainBody()
 
-    self.assertTrue(setup_method.called)
-    self.assertEquals(options.database_index, 5)
-    pass
+      self.assertTrue(setup_local_db_method.called)
+      pass
+    finally:
+      sys.argv = tmp_argv
 
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
   @patch.object(_ambari_server_, "setup")
   @patch.object(_ambari_server_, "fix_database_options")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
-  def test_fix_database_options_called(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock, logger_mock, optionParserMock,
+  def test_fix_database_options_called(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock, logger_mock,
                                        fixDBOptionsMock, setup_method):
-    opm = optionParserMock.return_value
-    options = self._create_empty_options_mock()
-    args = ["setup"]
-    opm.parse_args.return_value = (options, args)
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ['ambari-server', 'setup']
 
-    _ambari_server_.mainBody()
+      _ambari_server_.mainBody()
 
-    self.assertTrue(setup_method.called)
-    self.assertTrue(fixDBOptionsMock.called)
-    set_silent(False)
-    pass
+      self.assertTrue(setup_method.called)
+      self.assertTrue(fixDBOptionsMock.called)
+      set_silent(False)
+      pass
+    finally:
+      sys.argv = tmp_argv
 
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
   @patch.object(_ambari_server_, "setup")
   @patch.object(_ambari_server_, "start")
   @patch.object(_ambari_server_, "stop")
   @patch.object(_ambari_server_, "reset")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
   def test_main_test_start(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock, logger_mock,
-                           optionParserMock, reset_method, stop_method,
+                           reset_method, stop_method,
                            start_method, setup_method):
-    opm = optionParserMock.return_value
-    options = self._create_empty_options_mock()
-    args = ["setup"]
-    opm.parse_args.return_value = (options, args)
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ['ambari-server', "setup"]
 
-    options.dbms = None
-    options.sid_or_sname = "sname"
-    _ambari_server_.mainBody()
+      _ambari_server_.mainBody()
 
-    self.assertTrue(setup_method.called)
-    self.assertFalse(start_method.called)
-    self.assertFalse(stop_method.called)
-    self.assertFalse(reset_method.called)
+      self.assertTrue(setup_method.called)
+      self.assertFalse(start_method.called)
+      self.assertFalse(stop_method.called)
+      self.assertFalse(reset_method.called)
 
-    self.assertFalse(False, get_verbose())
-    self.assertFalse(False, get_silent())
-    pass
+      self.assertFalse(False, get_verbose())
+      self.assertFalse(False, get_silent())
+      pass
+    finally:
+      sys.argv = tmp_argv
 
   @not_for_platform(PLATFORM_WINDOWS)
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@@ -656,33 +652,32 @@
   @patch.object(_ambari_server_, "reset")
   @patch.object(_ambari_server_, "backup")
   @patch.object(_ambari_server_, "restore")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
   def test_main_test_backup(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock, logger_mock,
-                            optionParserMock, restore_mock, backup_mock, reset_method, stop_method,
+                            restore_mock, backup_mock, reset_method, stop_method,
                            start_method, setup_method):
-    opm = optionParserMock.return_value
-    options = self._create_empty_options_mock()
-    args = ["backup"]
-    opm.parse_args.return_value = (options, args)
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ['ambari-server', "backup"]
 
-    options.dbms = None
-    options.sid_or_sname = "sname"
-    _ambari_server_.mainBody()
+      _ambari_server_.mainBody()
 
-    self.assertTrue(backup_mock.called)
-    self.assertFalse(restore_mock.called)
-    self.assertFalse(setup_method.called)
-    self.assertFalse(start_method.called)
-    self.assertFalse(stop_method.called)
-    self.assertFalse(reset_method.called)
+      self.assertTrue(backup_mock.called)
+      self.assertFalse(restore_mock.called)
+      self.assertFalse(setup_method.called)
+      self.assertFalse(start_method.called)
+      self.assertFalse(stop_method.called)
+      self.assertFalse(reset_method.called)
 
-    self.assertFalse(False, get_verbose())
-    self.assertFalse(False, get_silent())
-    pass
+      self.assertFalse(False, get_verbose())
+      self.assertFalse(False, get_silent())
+      pass
+    finally:
+      sys.argv = tmp_argv
 
   #Restore is not yet supported on Windows
   @not_for_platform(PLATFORM_WINDOWS)
@@ -693,33 +688,31 @@
   @patch.object(_ambari_server_, "reset")
   @patch.object(_ambari_server_, "backup")
   @patch.object(_ambari_server_, "restore")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
   def test_main_test_restore(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock, logger_mock,
-                             optionParserMock, restore_mock, backup_mock, reset_method, stop_method,
+                             restore_mock, backup_mock, reset_method, stop_method,
                             start_method, setup_method):
-    opm = optionParserMock.return_value
-    options = self._create_empty_options_mock()
-    args = ["restore"]
-    opm.parse_args.return_value = (options, args)
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ['ambari-server', "restore"]
+      _ambari_server_.mainBody()
 
-    options.dbms = None
-    options.sid_or_sname = "sname"
-    _ambari_server_.mainBody()
+      self.assertTrue(restore_mock.called)
+      self.assertFalse(backup_mock.called)
+      self.assertFalse(setup_method.called)
+      self.assertFalse(start_method.called)
+      self.assertFalse(stop_method.called)
+      self.assertFalse(reset_method.called)
 
-    self.assertTrue(restore_mock.called)
-    self.assertFalse(backup_mock.called)
-    self.assertFalse(setup_method.called)
-    self.assertFalse(start_method.called)
-    self.assertFalse(stop_method.called)
-    self.assertFalse(reset_method.called)
-
-    self.assertFalse(False, get_verbose())
-    self.assertFalse(False, get_silent())
-    pass
+      self.assertFalse(False, get_verbose())
+      self.assertFalse(False, get_silent())
+      pass
+    finally:
+      sys.argv = tmp_argv
 
   @not_for_platform(PLATFORM_WINDOWS)
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@@ -791,32 +784,30 @@
   @patch.object(_ambari_server_, "start")
   @patch.object(_ambari_server_, "stop")
   @patch.object(_ambari_server_, "reset")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
   def test_main_test_reset(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock,
-                           logger_mock, optionParserMock, reset_method, stop_method,
+                           logger_mock, reset_method, stop_method,
                            start_method, setup_method):
-    opm = optionParserMock.return_value
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ['ambari-server', "reset"]
 
-    options = self._create_empty_options_mock()
-    args = ["reset"]
-    opm.parse_args.return_value = (options, args)
-    options.dbms = None
-    options.sid_or_sname = "sid"
+      _ambari_server_.mainBody()
 
-    _ambari_server_.mainBody()
+      self.assertFalse(setup_method.called)
+      self.assertFalse(start_method.called)
+      self.assertFalse(stop_method.called)
+      self.assertTrue(reset_method.called)
 
-    self.assertFalse(setup_method.called)
-    self.assertFalse(start_method.called)
-    self.assertFalse(stop_method.called)
-    self.assertTrue(reset_method.called)
-
-    self.assertFalse(False, get_verbose())
-    self.assertFalse(False, get_silent())
-    pass
+      self.assertFalse(False, get_verbose())
+      self.assertFalse(False, get_silent())
+      pass
+    finally:
+      sys.argv = tmp_argv
 
 
   @not_for_platform(PLATFORM_WINDOWS)
@@ -8497,64 +8488,84 @@
   @not_for_platform(PLATFORM_WINDOWS)
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
   @patch.object(_ambari_server_, "is_server_runing")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
   def test_main_test_status_running(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock,
-                                    logger_mock,  optionParserMock, is_server_runing_method):
-    opm = optionParserMock.return_value
-    options = self._create_empty_options_mock()
-    del options.exit_message
+                                    logger_mock, is_server_runing_method):
 
-    args = ["status"]
-    opm.parse_args.return_value = (options, args)
-
-    is_server_runing_method.return_value = (True, 100)
-
-    options.dbms = None
-    options.sid_or_sname = "sid"
-
+    import sys
+    tmp_argv = sys.argv
     try:
-      _ambari_server_.mainBody()
-    except SystemExit as e:
-      self.assertTrue(e.code == 0)
+      sys.argv = ['ambari-server', "status"]
 
-    self.assertTrue(is_server_runing_method.called)
-    pass
+      is_server_runing_method.return_value = (True, 100)
+
+
+      try:
+        _ambari_server_.mainBody()
+      except SystemExit as e:
+        self.assertTrue(e.code == 0)
+
+      self.assertTrue(is_server_runing_method.called)
+      pass
+    finally:
+      sys.argv = tmp_argv
 
 
   @not_for_platform(PLATFORM_WINDOWS)
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
   @patch.object(_ambari_server_, "is_server_runing")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
   def test_main_test_status_not_running(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock,
-                                        logger_mock, optionParserMock, is_server_runing_method):
-    opm = optionParserMock.return_value
-    options = self._create_empty_options_mock()
-    del options.exit_message
+                                        logger_mock, is_server_runing_method):
 
-    args = ["status"]
-    opm.parse_args.return_value = (options, args)
-
-    is_server_runing_method.return_value = (False, None)
-
-    options.dbms = None
-    options.sid_or_sname = "sid"
-
+    import sys
+    tmp_argv = sys.argv
     try:
-      _ambari_server_.mainBody()
-    except SystemExit as e:
-      self.assertTrue(e.code == 3)
+      sys.argv = ['ambari-server', "status"]
 
-    self.assertTrue(is_server_runing_method.called)
-    pass
+      is_server_runing_method.return_value = (False, None)
 
+      try:
+        _ambari_server_.mainBody()
+      except SystemExit as e:
+        self.assertTrue(e.code == 3)
+
+      self.assertTrue(is_server_runing_method.called)
+      pass
+    finally:
+      sys.argv = tmp_argv
+
+  @not_for_platform(PLATFORM_WINDOWS)
+  @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
+  @patch.object(_ambari_server_, "logger")
+  @patch("ambari_server.serverConfiguration.get_ambari_properties")
+  @patch.object(_ambari_server_, "setup_logging")
+  @patch.object(_ambari_server_, "init_logging")
+  def test_status_extra_option(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock,
+                                        logger_mock):
+
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ['ambari-server', "status", '--skip-database-check']
+      flag = False
+      try:
+        _ambari_server_.mainBody()
+      except SystemExit as e:
+        self.assertEquals(e.code, 2)
+        flag = True
+
+      self.assertTrue(flag)
+
+      pass
+    finally:
+      sys.argv = tmp_argv
 
   def test_web_server_startup_timeout(self):
     from ambari_server.serverConfiguration import get_web_server_startup_timeout
diff --git a/ambari-server/src/test/python/TestMpacks.py b/ambari-server/src/test/python/TestMpacks.py
index 32db005..f9ae39c 100644
--- a/ambari-server/src/test/python/TestMpacks.py
+++ b/ambari-server/src/test/python/TestMpacks.py
@@ -269,7 +269,8 @@
   @patch("ambari_server.setupMpacks.download_mpack")
   @patch("ambari_server.setupMpacks.run_os_command")
   @patch("ambari_server.setupMpacks.validate_purge")
-  def test_install_stack_mpack(self, validate_purge_mock, run_os_command_mock, download_mpack_mock, expand_mpack_mock, purge_stacks_and_mpacks_mock,
+  @patch("ambari_server.setupMpacks.set_file_permissions")
+  def test_install_stack_mpack(self, set_file_permissions_mock, validate_purge_mock, run_os_command_mock, download_mpack_mock, expand_mpack_mock, purge_stacks_and_mpacks_mock,
                                      add_replay_log_mock, get_ambari_properties_mock, get_ambari_version_mock,
                                      create_symlink_mock, os_mkdir_mock, shutil_move_mock, os_path_exists_mock):
     options = self._create_empty_options_mock()
@@ -415,7 +416,9 @@
   @patch("ambari_server.setupMpacks.add_replay_log")
   @patch("ambari_server.setupMpacks.expand_mpack")
   @patch("ambari_server.setupMpacks.download_mpack")
-  def test_install_extension_mpack(self, download_mpack_mock, expand_mpack_mock, add_replay_log_mock,
+  @patch("ambari_server.setupMpacks.set_file_permissions")
+
+  def test_install_extension_mpack(self, set_file_permissions_mock, download_mpack_mock, expand_mpack_mock, add_replay_log_mock,
       purge_stacks_and_mpacks_mock, get_ambari_properties_mock, get_ambari_version_mock,
       create_symlink_mock, os_mkdir_mock, shutil_move_mock, os_path_exists_mock):
     options = self._create_empty_options_mock()
@@ -490,10 +493,11 @@
   @patch("ambari_server.setupMpacks.purge_stacks_and_mpacks")
   @patch("ambari_server.setupMpacks.expand_mpack")
   @patch("ambari_server.setupMpacks.download_mpack")
-  def test_install_addon_service_mpack(self, download_mpack_mock, expand_mpack_mock, purge_stacks_and_mpacks_mock,
+  @patch("ambari_server.setupMpacks.set_file_permissions")
+  def test_install_addon_service_mpack(self, set_file_permissions_mock, download_mpack_mock, expand_mpack_mock, purge_stacks_and_mpacks_mock,
                                        add_replay_log_mock, get_ambari_properties_mock, get_ambari_version_mock,
                                        create_symlink_mock, os_mkdir_mock, shutil_move_mock,os_symlink_mock,
-                                       os_path_isdir_mock, os_path_exists_mock):
+                                       os_path_isdir_mock, os_path_exists_mock ):
     options = self._create_empty_options_mock()
     options.mpack_path = "/path/to/myservice.tar.gz"
     options.purge = False
@@ -580,7 +584,9 @@
   @patch("ambari_server.setupMpacks.expand_mpack")
   @patch("ambari_server.setupMpacks.download_mpack")
   @patch("ambari_server.setupMpacks.run_os_command")
-  def test_upgrade_stack_mpack(self, run_os_command_mock, download_mpack_mock, expand_mpack_mock, purge_stacks_and_mpacks_mock,
+  @patch("ambari_server.setupMpacks.set_file_permissions")
+
+  def test_upgrade_stack_mpack(self, set_file_permissions_mock, run_os_command_mock, download_mpack_mock, expand_mpack_mock, purge_stacks_and_mpacks_mock,
                                _uninstall_mpack_mock, add_replay_log_mock, get_ambari_properties_mock,
                                get_ambari_version_mock, create_symlink_mock, os_mkdir_mock, shutil_move_mock,
                                os_path_exists_mock, create_symlink_using_path_mock):
@@ -590,6 +596,7 @@
     expand_mpack_mock.side_effect = ["mpacks/mystack-ambari-mpack-1.0.0.1", "mpacks/mystack-ambari-mpack-1.0.0.1"]
     get_ambari_version_mock.return_value = "2.4.0.0"
     run_os_command_mock.return_value = (0, "", "")
+
     mpacks_directory = configs[serverConfiguration.MPACKS_STAGING_PATH_PROPERTY]
     os_path_exists_mock.side_effect = [True, True, True, True, True, True, True, True, True, True,
                                        True, True, True, True, True, True, True, False, False, True,
diff --git a/ambari-server/src/test/python/TestResourceFilesKeeper.py b/ambari-server/src/test/python/TestResourceFilesKeeper.py
index 4f7dc52..4f8bdd5 100644
--- a/ambari-server/src/test/python/TestResourceFilesKeeper.py
+++ b/ambari-server/src/test/python/TestResourceFilesKeeper.py
@@ -179,6 +179,7 @@
     except Exception, e:
       self.fail('Unexpected exception thrown:' + str(e))
 
+  @patch("os.path.isfile")
   @patch("os.path.exists")
   @patch("os.listdir")
   @patch.object(ResourceFilesKeeper, "count_hash_sum")
@@ -188,8 +189,10 @@
   def test_update_directory_archive(self, write_hash_sum_mock,
                                     zip_directory_mock, read_hash_sum_mock,
                                     count_hash_sum_mock,
-                                    os_listdir_mock, os_path_exists_mock):
+                                    os_listdir_mock, os_path_exists_mock,
+                                    os_path_isfile_mock):
     os_listdir_mock.return_value = ['file1', 'dir1']
+
     # Test situation when there is no saved directory hash
     read_hash_sum_mock.return_value = None
     count_hash_sum_mock.return_value = self.YA_HASH
@@ -206,10 +209,27 @@
     zip_directory_mock.reset_mock()
     write_hash_sum_mock.reset_mock()
 
-    # Test situation when saved directory hash == current hash
+    # Test situation where there is a .hash file, equal to the current hash,
+    # but no archive.zip file
+    count_hash_sum_mock.return_value = self.YA_HASH
+    read_hash_sum_mock.return_value = self.YA_HASH
+    os_path_isfile_mock.return_value = False
+    resource_files_keeper = ResourceFilesKeeper(self.TEST_RESOURCES_DIR, self.SOME_PATH)
+    resource_files_keeper.update_directory_archive(self.SOME_PATH)
+    self.assertTrue(read_hash_sum_mock.called)
+    self.assertTrue(count_hash_sum_mock.called)
+    self.assertTrue(zip_directory_mock.called)
+    self.assertFalse(write_hash_sum_mock.called)
+
+    read_hash_sum_mock.reset_mock()
+    count_hash_sum_mock.reset_mock()
+    zip_directory_mock.reset_mock()
+    write_hash_sum_mock.reset_mock()
+
+    # Test situation when saved directory hash == current hash and old archive does not exist
     read_hash_sum_mock.return_value = self.DUMMY_HASH
     count_hash_sum_mock.return_value = self.YA_HASH
-    os_path_exists_mock.return_value = True
+    os_path_isfile_mock.return_value = False
     resource_files_keeper.update_directory_archive(self.SOME_PATH)
     self.assertTrue(read_hash_sum_mock.called)
     self.assertTrue(count_hash_sum_mock.called)
@@ -221,10 +241,11 @@
     zip_directory_mock.reset_mock()
     write_hash_sum_mock.reset_mock()
 
-    # Test situation when saved directory hash == current hash
+    # Test situation when saved directory hash == current hash and old archive exists
     read_hash_sum_mock.return_value = self.DUMMY_HASH
     count_hash_sum_mock.return_value = self.DUMMY_HASH
     os_path_exists_mock.return_value = True
+    os_path_isfile_mock.return_value = True
     resource_files_keeper.update_directory_archive(self.SOME_PATH)
     self.assertTrue(read_hash_sum_mock.called)
     self.assertTrue(count_hash_sum_mock.called)
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index 42bc989..fda63e0 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -712,108 +712,6 @@
 
     self.assertNoMoreResources()
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'hbase-site': {
-        'hbase.master.kerberos.principal': '/path/to/hbase_keytab',
-        'hbase.master.keytab.file': 'hbase_principal'
-      }
-    }
-
-    result_issues = []
-    props_value_check = {"hbase.security.authentication": "kerberos",
-                           "hbase.security.authorization": "true"}
-    props_empty_check = ["hbase.master.keytab.file",
-                           "hbase.master.kerberos.principal"]
-
-    props_read_check = ["hbase.master.keytab.file"]
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
-                   classname = "HbaseMaster",
-                   command = "security_status",
-                   config_file="secured.json",
-                   stack_version = self.STACK_VERSION,
-                   target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('hbase-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    cached_kinit_executor_mock.called_with('/usr/bin/kinit',
-                                           self.config_dict['configurations']['hbase-env']['hbase_user'],
-                                           security_params['hbase-site']['hbase.master.keytab.file'],
-                                           security_params['hbase-site']['hbase.master.kerberos.principal'],
-                                           self.config_dict['hostname'],
-                                           '/tmp')
-
-     # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
-                   classname = "HbaseMaster",
-                   command = "security_status",
-                   config_file="secured.json",
-                   stack_version = self.STACK_VERSION,
-                   target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains hbase-site
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
-                   classname = "HbaseMaster",
-                   command = "security_status",
-                   config_file="secured.json",
-                   stack_version = self.STACK_VERSION,
-                   target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {}
-    result_issues_with_params['hbase-site']="Something bad happened"
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
-                   classname = "HbaseMaster",
-                   command = "security_status",
-                   config_file="default.json",
-                   stack_version = self.STACK_VERSION,
-                   target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
-                   classname = "HbaseMaster",
-                   command = "security_status",
-                   config_file="secured.json",
-                   stack_version = self.STACK_VERSION,
-                   target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
   def test_upgrade_backup(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_upgrade.py",
                    classname = "HbaseMasterUpgrade",
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
index 9bb0dd7..93f5d19 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
@@ -530,110 +530,6 @@
 
     self.assertNoMoreResources()
 
-
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'hbase-site': {
-        'hbase.regionserver.keytab.file': '/path/to/hbase_keytab',
-        'hbase.regionserver.kerberos.principal': 'hbase_principal'
-      }
-    }
-
-    result_issues = []
-    props_value_check = {"hbase.security.authentication": "kerberos",
-                           "hbase.security.authorization": "true"}
-    props_empty_check = ["hbase.regionserver.keytab.file",
-                           "hbase.regionserver.kerberos.principal"]
-
-    props_read_check = ["hbase.regionserver.keytab.file"]
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
-                   classname = "HbaseRegionServer",
-                   command = "security_status",
-                   config_file="secured.json",
-                   stack_version = self.STACK_VERSION,
-                   target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('hbase-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    cached_kinit_executor_mock.called_with('/usr/bin/kinit',
-                                           self.config_dict['configurations']['hbase-env']['hbase_user'],
-                                           security_params['hbase-site']['hbase.regionserver.keytab.file'],
-                                           security_params['hbase-site']['hbase.regionserver.kerberos.principal'],
-                                           self.config_dict['hostname'],
-                                           '/tmp')
-
-     # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
-                   classname = "HbaseRegionServer",
-                   command = "security_status",
-                   config_file="secured.json",
-                   stack_version = self.STACK_VERSION,
-                   target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains hbase-site
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
-                   classname = "HbaseRegionServer",
-                   command = "security_status",
-                   config_file="secured.json",
-                   stack_version = self.STACK_VERSION,
-                   target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'hbase-site' : "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
-                   classname = "HbaseRegionServer",
-                   command = "security_status",
-                   config_file="secured.json",
-                   stack_version = self.STACK_VERSION,
-                   target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
-                   classname = "HbaseRegionServer",
-                   command = "security_status",
-                   config_file="default.json",
-                   stack_version = self.STACK_VERSION,
-                   target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
   def test_pre_upgrade_restart(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
index 1c3c5b7..5702b57 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
@@ -22,6 +22,7 @@
 from mock.mock import MagicMock, patch
 from resource_management.libraries.script.script import Script
 from resource_management.core import shell
+import itertools
 from resource_management.core.exceptions import Fail
 import resource_management.libraries.functions.mounted_dirs_helper
 
@@ -76,13 +77,21 @@
     )
     self.assertNoMoreResources()
 
+  @patch('time.sleep')
   @patch("os.path.exists", new = MagicMock(return_value=False))
-  def test_stop_default(self):
+  @patch("resource_management.core.shell.checked_call")
+  def test_stop_default(self, checked_call_mock, time_mock):
+    def side_effect(arg):
+      if '-D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo' in arg :
+        raise Fail()
+      return
+    checked_call_mock.side_effect = side_effect
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                        classname = "DataNode",
                        command = "stop",
                        config_file = "default.json",
                        stack_version = self.STACK_VERSION,
+                       checked_call_mocks = side_effect,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode'",
@@ -221,13 +230,21 @@
     )
     self.assertNoMoreResources()
 
+  @patch('time.sleep')
   @patch("os.path.exists", new = MagicMock(return_value=False))
-  def test_stop_secured(self):
+  @patch("resource_management.core.shell.checked_call")
+  def test_stop_secured(self, checked_call_mock, time_mock):
+    def side_effect(arg):
+      if '-D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo' in arg :
+        raise Fail()
+      return
+    checked_call_mock.side_effect = side_effect
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                        classname = "DataNode",
                        command = "stop",
                        config_file = "secured.json",
                        stack_version = self.STACK_VERSION,
+                       checked_call_mocks = side_effect,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode',
@@ -237,9 +254,15 @@
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid', action = ['delete'])
     self.assertNoMoreResources()
 
-
+  @patch('time.sleep')
   @patch("os.path.exists", new = MagicMock(return_value=False))
-  def test_stop_secured_HDP22_root(self):
+  @patch("resource_management.core.shell.checked_call")
+  def test_stop_secured_HDP22_root(self, checked_call_mock, time_mock):
+    def side_effect(arg):
+      if '-D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo' in arg :
+        raise Fail()
+      return
+    checked_call_mock.side_effect = side_effect
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/secured.json"
     with open(config_file, "r") as f:
       secured_json = json.load(f)
@@ -251,6 +274,7 @@
                        command = "stop",
                        config_dict = secured_json,
                        stack_version = self.STACK_VERSION,
+                       checked_call_mocks = side_effect,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf stop datanode',
@@ -260,8 +284,15 @@
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid', action = ['delete'])
     self.assertNoMoreResources()
 
+  @patch('time.sleep')
   @patch("os.path.exists", new = MagicMock(return_value=False))
-  def test_stop_secured_HDP22_non_root_https_only(self):
+  @patch("resource_management.core.shell.checked_call")
+  def test_stop_secured_HDP22_non_root_https_only(self, checked_call_mock, time_mock):
+    def side_effect(arg):
+      if '-D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo' in arg :
+        raise Fail()
+      return
+    checked_call_mock.side_effect = side_effect
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/secured.json"
     with open(config_file, "r") as f:
       secured_json = json.load(f)
@@ -276,6 +307,7 @@
                        command = "stop",
                        config_dict = secured_json,
                        stack_version = self.STACK_VERSION,
+                       checked_call_mocks = side_effect,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf stop datanode'",
@@ -564,7 +596,7 @@
 
   @patch("resource_management.core.shell.call")
   @patch('time.sleep')
-  def test_stop_during_upgrade(self, time_mock, call_mock):
+  def test_stop_during_upgrade_not_shutdown(self, time_mock, call_mock):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     call_mock_side_effects = [(0, ""), ]
     call_mock.side_effects = call_mock_side_effects
@@ -573,7 +605,7 @@
 
     version = '2.2.1.0-3242'
     json_content['commandParams']['version'] = version
-
+    mocks_dict={}
     try:
       self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
         classname = "DataNode",
@@ -582,19 +614,23 @@
         stack_version = self.STACK_VERSION,
         target = RMFTestCase.TARGET_COMMON_SERVICES,
         call_mocks = call_mock_side_effects,
+        checked_call_mocks=itertools.cycle([(0, "OK.")]),
+        mocks_dict = mocks_dict,
         command_args=["rolling"])
 
       raise Fail("Expected a fail since datanode didn't report a shutdown")
     except Exception, err:
-      expected_message = "DataNode has not shutdown."
+      expected_message = "DataNode has not yet deregistered from the NameNode..."
       if str(err.message) != expected_message:
         self.fail("Expected this exception to be thrown. " + expected_message + ". Got this instead, " + str(err.message))
 
-    self.assertResourceCalled("Execute", "hdfs dfsadmin -fs hdfs://c6401.ambari.apache.org:8020 -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo 0.0.0.0:8010", tries=1, user="hdfs")
+    self.assertEquals(
+      ('hdfs dfsadmin -fs hdfs://c6401.ambari.apache.org:8020 -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo 0.0.0.0:8010'),
+      mocks_dict['checked_call'].call_args_list[0][0][0])
 
   @patch("resource_management.core.shell.call")
   @patch('time.sleep')
-  def test_stop_during_upgrade(self, time_mock, call_mock):
+  def test_stop_during_upgrade_not_shutdown_ha(self, time_mock, call_mock):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/ha_default.json"
     call_mock_side_effects = [(0, ""), ]
     call_mock.side_effects = call_mock_side_effects
@@ -603,7 +639,7 @@
 
     version = '2.2.1.0-3242'
     json_content['commandParams']['version'] = version
-
+    mocks_dict={}
     try:
       self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                          classname = "DataNode",
@@ -612,123 +648,16 @@
                          stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES,
                          call_mocks = call_mock_side_effects,
+                         checked_call_mocks=itertools.cycle([(0, "OK.")]),
+                         mocks_dict = mocks_dict,
                          command_args=["rolling"])
 
       raise Fail("Expected a fail since datanode didn't report a shutdown")
     except Exception, err:
-      expected_message = "DataNode has not shutdown."
+      expected_message = "DataNode has not yet deregistered from the NameNode..."
       if str(err.message) != expected_message:
         self.fail("Expected this exception to be thrown. " + expected_message + ". Got this instead, " + str(err.message))
 
-    self.assertResourceCalled("Execute", "hdfs dfsadmin -fs hdfs://ns1 -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo 0.0.0.0:8010", tries=1, user="hdfs")
-
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'core-site': {
-        'hadoop.security.authentication': 'kerberos'
-      },
-      'hdfs-site': {
-        'dfs.datanode.keytab.file': 'path/to/datanode/keytab/file',
-        'dfs.datanode.kerberos.principal': 'datanode_principal'
-      }
-    }
-
-    props_value_check = None
-    props_empty_check = ['dfs.datanode.keytab.file',
-                         'dfs.datanode.kerberos.principal']
-    props_read_check = ['dfs.datanode.keytab.file']
-
-    result_issues = []
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
-                       classname = "DataNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('hdfs-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    cached_kinit_executor_mock.called_with('/usr/bin/kinit',
-                                           self.config_dict['configurations']['hadoop-env']['hdfs_user'],
-                                           security_params['hdfs-site']['dfs.datanode.keytab.file'],
-                                           security_params['hdfs-site']['dfs.datanode.kerberos.principal'],
-                                           self.config_dict['hostname'],
-                                           '/tmp')
-
-    # Testing when hadoop.security.authentication is simple
-    security_params['core-site']['hadoop.security.authentication'] = 'simple'
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
-                       classname = "DataNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-    security_params['core-site']['hadoop.security.authentication'] = 'kerberos'
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
-                         classname = "DataNode",
-                         command = "security_status",
-                         config_file="secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains hdfs-site
-    empty_security_params = {}
-    empty_security_params['core-site'] = {}
-    empty_security_params['core-site']['hadoop.security.authentication'] = 'kerberos'
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
-                       classname = "DataNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {}
-    result_issues_with_params['hdfs-site']="Something bad happened"
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
-                       classname = "DataNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
+    self.assertEquals(
+      ('hdfs dfsadmin -fs hdfs://ns1 -D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo 0.0.0.0:8010'),
+      mocks_dict['checked_call'].call_args_list[0][0][0])
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
index 85098fa..bcd9c80 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
@@ -50,25 +50,25 @@
                               )
     self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
                               conf_dir = '/tmp/123',
-                              mode=0600,
+                              mode=0644,
                               configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site'],
                               configurations = self.getConfig()['configurations']['hdfs-site'],
                               )
     self.assertResourceCalled('File', '/tmp/123/hadoop-env.sh',
-                              mode=0600,
+                              mode=0644,
                               content = InlineTemplate(self.getConfig()['configurations']['hadoop-env']['content']),
                               )
     self.assertResourceCalled('File', '/tmp/123/log4j.properties',
-                              mode=0600,
+                              mode=0644,
                               content = InlineTemplate(self.getConfig()['configurations']['hdfs-log4j']['content']+
                                                        self.getConfig()['configurations']['yarn-log4j']['content']),
                               )
     self.assertResourceCalled('PropertiesFile', '/tmp/123/runtime.properties',
-                              mode=0600,
+                              mode=0644,
                               properties = UnknownConfigurationMock(),
     )
     self.assertResourceCalled('PropertiesFile', '/tmp/123/startup.properties',
-                              mode=0600,
+                              mode=0644,
                               properties = UnknownConfigurationMock(),
     )
     self.assertResourceCalled('Directory', '/tmp/123',
@@ -88,106 +88,6 @@
 
     # for now, it's enough that <stack-selector-tool> is confirmed
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'core-site': {
-        'hadoop.security.authentication': 'kerberos'
-      }
-    }
-
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-
-    result_issues = []
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hdfs_client.py",
-                       classname = "HdfsClient",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('core-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    cached_kinit_executor_mock.called_with('/usr/bin/kinit',
-                                           self.config_dict['configurations']['hadoop-env']['hdfs_user'],
-                                           self.config_dict['configurations']['hadoop-env']['hdfs_user_keytab'],
-                                           self.config_dict['configurations']['hadoop-env']['hdfs_user_principal_name'],
-                                           self.config_dict['hostname'],
-                                           '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hdfs_client.py",
-                         classname = "HdfsClient",
-                         command = "security_status",
-                         config_file="secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing when hadoop.security.authentication is simple
-    security_params['core-site']['hadoop.security.authentication'] = 'simple'
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hdfs_client.py",
-                       classname = "HdfsClient",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-    security_params['core-site']['hadoop.security.authentication'] = 'kerberos'
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'hdfs-site': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hdfs_client.py",
-                       classname = "HdfsClient",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with empty hdfs_user_principal and hdfs_user_keytab
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hdfs_client.py",
-                       classname = "HdfsClient",
-                       command = "security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-
   @patch("resource_management.core.shell.call")
   def test_pre_upgrade_restart_23(self, call_mock):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
index 4b63de4..2202661 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
@@ -369,120 +369,6 @@
     except:
       pass
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-    security_params = {
-      'core-site': {
-        'hadoop.security.authentication': 'kerberos'
-      },
-      'hdfs-site': {
-        'dfs.journalnode.kerberos.keytab.file': 'path/to/journalnode/keytab/file',
-        'dfs.journalnode.kerberos.principal': 'journalnode_principal'
-      }
-    }
-
-    props_value_check = None
-    props_empty_check = ['dfs.journalnode.keytab.file',
-                         'dfs.journalnode.kerberos.principal']
-    props_read_check = ['dfs.journalnode.keytab.file']
-
-    result_issues = []
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
-                       classname = "JournalNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('hdfs-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    cached_kinit_executor_mock.called_with('/usr/bin/kinit',
-                                           self.config_dict['configurations']['hadoop-env']['hdfs_user'],
-                                           security_params['hdfs-site']['dfs.journalnode.kerberos.keytab.file'],
-                                           security_params['hdfs-site']['dfs.journalnode.kerberos.principal'],
-                                           self.config_dict['hostname'],
-                                           '/tmp')
-
-    # Testing when hadoop.security.authentication is simple
-    security_params['core-site']['hadoop.security.authentication'] = 'simple'
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
-                       classname = "JournalNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-    security_params['core-site']['hadoop.security.authentication'] = 'kerberos'
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
-                         classname = "JournalNode",
-                         command = "security_status",
-                         config_file="secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains hdfs-site
-    empty_security_params = {
-      'core-site': {
-        'hadoop.security.authentication': 'kerberos'
-      }
-    }
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
-                       classname = "JournalNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'hdfs-site': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
-                       classname = "JournalNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-
   def test_pre_upgrade_restart(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index a6a474a..01149fb 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -1277,120 +1277,6 @@
 
     self.assertTrue(isfile_mock.called)
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'core-site': {
-        'hadoop.security.authentication': 'kerberos'
-      },
-      'hdfs-site': {
-        'dfs.namenode.keytab.file': 'path/to/namenode/keytab/file',
-        'dfs.namenode.kerberos.principal': 'namenode_principal'
-      }
-    }
-    props_value_check = None
-    props_empty_check = ['dfs.namenode.kerberos.internal.spnego.principal',
-                       'dfs.namenode.keytab.file',
-                       'dfs.namenode.kerberos.principal']
-    props_read_check = ['dfs.namenode.keytab.file']
-
-    result_issues = []
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
-                       classname = "NameNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('hdfs-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    cached_kinit_executor_mock.called_with('/usr/bin/kinit',
-                                           self.config_dict['configurations']['hadoop-env']['hdfs_user'],
-                                           security_params['hdfs-site']['dfs.namenode.keytab.file'],
-                                           security_params['hdfs-site']['dfs.namenode.kerberos.principal'],
-                                           self.config_dict['hostname'],
-                                           '/tmp')
-
-    # Testing when hadoop.security.authentication is simple
-    security_params['core-site']['hadoop.security.authentication'] = 'simple'
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
-                       classname = "NameNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-    security_params['core-site']['hadoop.security.authentication'] = 'kerberos'
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
-                         classname = "NameNode",
-                         command = "security_status",
-                         config_file="secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains hdfs-site
-    empty_security_params = {
-      'core-site': {
-        'hadoop.security.authentication': 'kerberos'
-      }
-    }
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
-                       classname = "NameNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'hdfs-site': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
-                       classname = "NameNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
   @patch.object(time, "sleep")
   @patch("resource_management.libraries.functions.namenode_ha_utils.get_namenode_states")
   def test_upgrade_restart(self, get_namenode_states_mock, sleep_mock):
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py
index 396778d..b8fee12 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py
@@ -267,122 +267,6 @@
         group = 'hadoop',
     )
 
-
-
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-    security_params = {
-      'core-site': {
-        'hadoop.security.authentication': 'kerberos'
-      },
-      'hdfs-site': {
-        'nfs.keytab.file': 'path/to/nfsgateway/keytab/file',
-        'nfs.kerberos.principal': 'nfs_principal'
-      }
-    }
-
-    props_value_check = None
-    props_empty_check = ['nfs.keytab.file',
-                         'nfs.kerberos.principal']
-    props_read_check = ['nfs.keytab.file']
-
-    result_issues = []
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nfsgateway.py",
-                       classname = "NFSGateway",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('hdfs-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    cached_kinit_executor_mock.called_with('/usr/bin/kinit',
-                                           self.config_dict['configurations']['hadoop-env']['hdfs_user'],
-                                           security_params['hdfs-site']['nfs.keytab.file'],
-                                           security_params['hdfs-site']['nfs.kerberos.principal'],
-                                           self.config_dict['hostname'],
-                                           '/tmp')
-
-    # Testing when hadoop.security.authentication is simple
-    security_params['core-site']['hadoop.security.authentication'] = 'simple'
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nfsgateway.py",
-                       classname = "NFSGateway",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-    security_params['core-site']['hadoop.security.authentication'] = 'kerberos'
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nfsgateway.py",
-                         classname = "NFSGateway",
-                         command = "security_status",
-                         config_file="secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains hdfs-site
-    empty_security_params = {
-      'core-site': {
-        'hadoop.security.authentication': 'kerberos'
-      }
-    }
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nfsgateway.py",
-                       classname = "NFSGateway",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'hdfs-site': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nfsgateway.py",
-                       classname = "NFSGateway",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-    self.assertNoMoreResources()
-
   @patch("resource_management.core.shell.call")
   def test_pre_upgrade_restart(self, call_mock):
     call_mock.side_effects = [(0, None), (0, None)]
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_snamenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_snamenode.py
index 7b9dcb4..9e9366d 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_snamenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_snamenode.py
@@ -274,119 +274,4 @@
                               mode = 0755,
                               create_parents = True,
                               cd_access='a'
-                              )
-
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'core-site': {
-        'hadoop.security.authentication': 'kerberos'
-      },
-      'hdfs-site': {
-        'dfs.secondary.namenode.keytab.file': 'path/to/snamenode/keytab/file',
-        'dfs.secondary.namenode.kerberos.principal': 'snamenode_principal'
-      }
-    }
-
-    props_value_check = None
-    props_empty_check = ['dfs.secondary.namenode.kerberos.internal.spnego.principal',
-                         'dfs.secondary.namenode.keytab.file',
-                         'dfs.secondary.namenode.kerberos.principal']
-    props_read_check = ['dfs.secondary.namenode.keytab.file']
-
-    result_issues = []
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/snamenode.py",
-                       classname = "SNameNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('hdfs-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    cached_kinit_executor_mock.called_with('/usr/bin/kinit',
-                                           self.config_dict['configurations']['hadoop-env']['hdfs_user'],
-                                           security_params['hdfs-site']['dfs.secondary.namenode.keytab.file'],
-                                           security_params['hdfs-site']['dfs.secondary.namenode.kerberos.principal'],
-                                           self.config_dict['hostname'],
-                                           '/tmp')
-
-    # Testing when hadoop.security.authentication is simple
-    security_params['core-site']['hadoop.security.authentication'] = 'simple'
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/snamenode.py",
-                       classname = "SNameNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-    security_params['core-site']['hadoop.security.authentication'] = 'kerberos'
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/snamenode.py",
-                         classname = "SNameNode",
-                         command = "security_status",
-                         config_file="secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains hdfs-site
-    empty_security_params = {
-      'core-site': {
-        'hadoop.security.authentication': 'kerberos'
-      }
-    }
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/snamenode.py",
-                       classname = "SNameNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'hdfs-site': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/snamenode.py",
-                       classname = "SNameNode",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
\ No newline at end of file
+                              )
\ No newline at end of file
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py
index e952108..127a045 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py
@@ -381,104 +381,4 @@
         environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
         not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid",
     )
-    self.assertNoMoreResources()
-
-
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-
-    # Test that function works when is called with correct parameters
-    security_params = {
-      'core-site': {
-        'hadoop.security.authentication': 'kerberos'
-      }
-    }
-
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-
-    result_issues = []
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
-                       classname = "ZkfcSlave",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('core-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    cached_kinit_executor_mock.called_with('/usr/bin/kinit',
-                                           self.config_dict['configurations']['hadoop-env']['hdfs_user'],
-                                           self.config_dict['configurations']['hadoop-env']['hdfs_user_keytab'],
-                                           self.config_dict['configurations']['hadoop-env']['hdfs_user_principal_name'],
-                                           self.config_dict['hostname'],
-                                           '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-        self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
-                           classname = "ZkfcSlave",
-                           command = "security_status",
-                           config_file="secured.json",
-                           stack_version = self.STACK_VERSION,
-                           target = RMFTestCase.TARGET_COMMON_SERVICES
-        )
-    except:
-      self.assertTrue(True)
-
-    # Testing when hadoop.security.authentication is simple
-    security_params['core-site']['hadoop.security.authentication'] = 'simple'
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
-                       classname = "ZkfcSlave",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-    security_params['core-site']['hadoop.security.authentication'] = 'kerberos'
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'hdfs-site': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
-                       classname = "ZkfcSlave",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with empty hdfs_user_principal and hdfs_user_keytab
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
-                       classname = "ZkfcSlave",
-                       command = "security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
\ No newline at end of file
+    self.assertNoMoreResources()
\ No newline at end of file
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
index 6592590..ae2ec86 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
@@ -853,118 +853,6 @@
 
     self.assertNoMoreResources()
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'hive-site': {
-        "hive.server2.authentication": "KERBEROS",
-        "hive.metastore.sasl.enabled": "true",
-        "hive.security.authorization.enabled": "true",
-        "hive.server2.authentication.kerberos.keytab": "path/to/keytab",
-        "hive.server2.authentication.kerberos.principal": "principal",
-        "hive.server2.authentication.spnego.keytab": "path/to/spnego_keytab",
-        "hive.server2.authentication.spnego.principal": "spnego_principal"
-      }
-    }
-    result_issues = []
-    props_value_check = {"hive.server2.authentication": "KERBEROS",
-                         "hive.metastore.sasl.enabled": "true",
-                         "hive.security.authorization.enabled": "true"}
-    props_empty_check = ["hive.server2.authentication.kerberos.keytab",
-                         "hive.server2.authentication.kerberos.principal",
-                         "hive.server2.authentication.spnego.principal",
-                         "hive.server2.authentication.spnego.keytab"]
-
-    props_read_check = ["hive.server2.authentication.kerberos.keytab",
-                        "hive.server2.authentication.spnego.keytab"]
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
-                       classname = "HiveServer",
-                       command = "security_status",
-                       config_file="../../2.1/configs/secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    get_params_mock.assert_called_with('/usr/hdp/current/hive-server2/conf', {'hive-site.xml': "XML"})
-    build_exp_mock.assert_called_with('hive-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['hive-env']['hive_user'],
-                                                  security_params['hive-site']['hive.server2.authentication.spnego.keytab'],
-                                                  security_params['hive-site']['hive.server2.authentication.spnego.principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
-                         classname = "HiveServer",
-                         command = "security_status",
-                         config_file="../../2.1/configs/secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains startup
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
-                       classname = "HiveServer",
-                       command = "security_status",
-                       config_file="../../2.1/configs/secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {}
-    result_issues_with_params['hive-site']="Something bad happened"
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
-                       classname = "HiveServer",
-                       command = "security_status",
-                       config_file="../../2.1/configs/secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
-                       classname = "HiveServer",
-                       command = "security_status",
-                       config_file="../../2.1/configs/default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
   @patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
   def test_pre_upgrade_restart(self, copy_to_hdfs_mock):
     copy_to_hdfs_mock.return_value = True
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
index 718ad4d..03dd391 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
@@ -276,122 +276,6 @@
                               mode = 0644,
                               )
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'webhcat-site': {
-        "templeton.kerberos.secret": "secret",
-        "templeton.kerberos.keytab": 'path/to/keytab',
-        "templeton.kerberos.principal": "principal"
-      },
-      "hive-site": {
-        "hive.server2.authentication": "KERBEROS",
-        "hive.metastore.sasl.enabled": "true",
-        "hive.security.authorization.enabled": "true"
-      }
-    }
-    result_issues = []
-    webhcat_props_value_check = {"templeton.kerberos.secret": "secret"}
-    webhcat_props_empty_check = ["templeton.kerberos.keytab",
-                         "templeton.kerberos.principal"]
-    webhcat_props_read_check = ["templeton.kerberos.keytab"]
-
-    hive_props_value_check = {"hive.server2.authentication": "KERBEROS",
-                         "hive.metastore.sasl.enabled": "true",
-                         "hive.security.authorization.enabled": "true"}
-    hive_props_empty_check = None
-    hive_props_read_check = None
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
-                       classname = "WebHCatServer",
-                       command = "security_status",
-                       config_file="../../2.1/configs/secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('hive-site', hive_props_value_check, hive_props_empty_check, hive_props_read_check)
-    # get_params_mock.assert_called_with(status_params.hive_conf_dir, {'hive-site.xml': "XML"})
-    get_params_mock.assert_called_with('/usr/hdp/current/hive-webhcat/conf', {'webhcat-site.xml': "XML"})
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['hive-env']['webhcat_user'],
-                                                  security_params['webhcat-site']['templeton.kerberos.keytab'],
-                                                  security_params['webhcat-site']['templeton.kerberos.principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
-                         classname = "WebHCatServer",
-                         command = "security_status",
-                         config_file="../../2.1/configs/secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains startup
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
-                       classname = "WebHCatServer",
-                       command = "security_status",
-                       config_file="../../2.1/configs/secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'hive-site': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
-                       classname = "WebHCatServer",
-                       command = "security_status",
-                       config_file="../../2.1/configs/secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
-                       classname = "WebHCatServer",
-                       command = "security_status",
-                       config_file="../../2.1/configs/default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-
   def test_pre_upgrade_restart(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
diff --git a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
index f5bd4aa..426c36a 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
@@ -1150,119 +1150,6 @@
     )
     self.assert_configure_default()
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-    security_params = {
-      "oozie-site": {
-        "oozie.authentication.type": "kerberos",
-        "oozie.service.AuthorizationService.security.enabled": "true",
-        "oozie.service.HadoopAccessorService.kerberos.enabled": "true",
-        "local.realm": "EXAMPLE.COM",
-        "oozie.authentication.kerberos.principal": "principal",
-        "oozie.authentication.kerberos.keytab": "/path/to_keytab",
-        "oozie.service.HadoopAccessorService.kerberos.principal": "principal",
-        "oozie.service.HadoopAccessorService.keytab.file": "/path/to_keytab"}
-    }
-
-    result_issues = []
-    props_value_check = {"oozie.authentication.type": "kerberos",
-                         "oozie.service.AuthorizationService.security.enabled": "true",
-                         "oozie.service.HadoopAccessorService.kerberos.enabled": "true"}
-    props_empty_check = [ "local.realm",
-                          "oozie.authentication.kerberos.principal",
-                          "oozie.authentication.kerberos.keytab",
-                          "oozie.service.HadoopAccessorService.kerberos.principal",
-                          "oozie.service.HadoopAccessorService.keytab.file"]
-    props_read_check = None
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
-                       classname = "OozieServer",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    get_params_mock.assert_called_with("/etc/oozie/conf", {'oozie-site.xml': 'XML'})
-    build_exp_mock.assert_called_with('oozie-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['oozie-env']['oozie_user'],
-                                                  security_params['oozie-site']['oozie.service.HadoopAccessorService.keytab.file'],
-                                                  security_params['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
-                         classname = "OozieServer",
-                         command = "security_status",
-                         config_file="secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains oozie-site
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
-                       classname = "OozieServer",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'oozie-site': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
-                       classname = "OozieServer",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
-                       classname = "OozieServer",
-                       command = "security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-
   @patch("os.path.isdir")
   @patch("os.path.exists")
   @patch("os.path.isfile")
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
index 7a0514a..53218d5 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
@@ -197,7 +197,55 @@
     self.assertNoMoreResources()
 
   def assert_configure_default(self):
-
+    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
+      owner = 'yarn',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
+      owner = 'yarn',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
+      owner = 'yarn',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
+      owner = 'mapred',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
+      owner = 'mapred',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
+      owner = 'yarn',
+      group = 'hadoop',
+      create_parents = True,
+      ignore_failures = True,
+      cd_access = 'a',
+    )
     self.assertResourceCalled('HdfsResource', '/app-logs',
         immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = False,
@@ -309,55 +357,6 @@
       cd_access = 'a',
       recursive_ownership = True,
     )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
-      owner = 'yarn',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
-      owner = 'yarn',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
-      owner = 'yarn',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
-      owner = 'mapred',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
-      owner = 'mapred',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
-      owner = 'yarn',
-      group = 'hadoop',
-      create_parents = True,
-      ignore_failures = True,
-      cd_access = 'a',
-    )
     self.assertResourceCalled('XmlConfig', 'core-site.xml',
       owner = 'hdfs',
       group = 'hadoop',
@@ -464,6 +463,56 @@
                               )
 
   def assert_configure_secured(self):
+    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
+      owner = 'yarn',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
+      owner = 'yarn',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
+      owner = 'yarn',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
+      owner = 'mapred',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
+      owner = 'mapred',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
+      owner = 'yarn',
+      group = 'hadoop',
+      create_parents = True,
+      ignore_failures = True,
+      cd_access = 'a',
+    )
+
     self.assertResourceCalled('HdfsResource', '/app-logs',
         immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
         security_enabled = True,
@@ -575,55 +624,6 @@
       cd_access = 'a',
       recursive_ownership = True,
     )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
-      owner = 'yarn',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
-      owner = 'yarn',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
-      owner = 'yarn',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
-      owner = 'mapred',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
-      owner = 'mapred',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
-      owner = 'yarn',
-      group = 'hadoop',
-      create_parents = True,
-      ignore_failures = True,
-      cd_access = 'a',
-    )
     self.assertResourceCalled('XmlConfig', 'core-site.xml',
       owner = 'hdfs',
       group = 'hadoop',
@@ -741,112 +741,6 @@
                               group = 'hadoop',
                               )
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      "mapred-site": {
-        'mapreduce.jobhistory.keytab': "/path/to/keytab1",
-        'mapreduce.jobhistory.principal': "principal1",
-        'mapreduce.jobhistory.webapp.spnego-keytab-file': "/path/to/keytab2",
-        'mapreduce.jobhistory.webapp.spnego-principal': "principal2"
-      }
-    }
-    result_issues = []
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historyserver.py",
-                       classname="HistoryServer",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    get_params_mock.assert_called_with("/etc/hadoop/conf", {'mapred-site.xml': 'XML'})
-    build_exp_mock.assert_called_with('mapred-site',
-                                      None,
-                                      [
-                                        'mapreduce.jobhistory.keytab',
-                                        'mapreduce.jobhistory.principal',
-                                        'mapreduce.jobhistory.webapp.spnego-keytab-file',
-                                        'mapreduce.jobhistory.webapp.spnego-principal'
-                                        ],
-                                      None)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['mapred-env']['mapred_user'],
-                                                  security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-keytab-file'],
-                                                  security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historyserver.py",
-                         classname="HistoryServer",
-                         command="security_status",
-                         config_file="secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contain mapred-site
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historyserver.py",
-                       classname="HistoryServer",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal not set."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {'mapred-site': "Something bad happened"}
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historyserver.py",
-                       classname="HistoryServer",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historyserver.py",
-                       classname="HistoryServer",
-                       command="security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
   def assert_call_to_get_hadoop_conf_dir(self):
     # From call to conf_select.get_hadoop_conf_dir()
     self.assertResourceCalled("Execute", ("cp", "-R", "-p", "/etc/hadoop/conf", "/etc/hadoop/conf.backup"),
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
index ab5e2cd..6fc5bae 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
@@ -133,6 +133,56 @@
     self.assertNoMoreResources()
 
   def assert_configure_default(self):
+    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
+      owner = 'yarn',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
+      owner = 'yarn',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
+      owner = 'yarn',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
+      owner = 'mapred',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
+      owner = 'mapred',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      create_parents = True,
+      cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
+      owner = 'yarn',
+      group = 'hadoop',
+      create_parents = True,
+      ignore_failures = True,
+      cd_access = 'a',
+    )
+
     self.assertResourceCalled('Directory', '/var/lib/ambari-agent/data/yarn',
         create_parents = True,
         mode = 0755,
@@ -185,55 +235,7 @@
         group = 'hadoop',
         mode = 0644,
     )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
-      owner = 'yarn',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
-      owner = 'yarn',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
-      owner = 'yarn',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
-      owner = 'mapred',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
-      owner = 'mapred',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      create_parents = True,
-      cd_access = 'a',
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
-      owner = 'yarn',
-      group = 'hadoop',
-      create_parents = True,
-      ignore_failures = True,
-      cd_access = 'a',
-    )
+
     self.assertResourceCalled('XmlConfig', 'core-site.xml',
       owner = 'hdfs',
       group = 'hadoop',
@@ -340,53 +342,6 @@
                               )
 
   def assert_configure_secured(self):
-    self.assertResourceCalled('Directory', '/hadoop/yarn/local',
-                              action = ['delete']
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/log',
-                              action = ['delete']
-    )
-    self.assertResourceCalled('Directory', '/var/lib/hadoop-yarn',)
-    self.assertResourceCalled('File', '/var/lib/hadoop-yarn/nm_security_enabled',
-                              content= 'Marker file to track first start after enabling/disabling security. During first start yarn local, log dirs are removed and recreated'
-    )
-    self.assertResourceCalled('Directory', '/var/lib/ambari-agent/data/yarn',
-        create_parents = True,
-        mode = 0755,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/log',
-                              owner = 'yarn',
-                              group = 'hadoop',
-                              create_parents = True,
-                              ignore_failures = True,
-                              mode = 0775,
-                              cd_access='a',
-                              )
-    self.assertResourceCalled('File', '/var/lib/ambari-agent/data/yarn/yarn_log_dir_mount.hist',
-        content = '\n# This file keeps track of the last known mount-point for each dir.\n# It is safe to delete, since it will get regenerated the next time that the component of the service starts.\n# However, it is not advised to delete this file since Ambari may\n# re-create a dir that used to be mounted on a drive but is now mounted on the root.\n# Comments begin with a hash (#) symbol\n# dir,mount_point\n',
-        owner = 'hdfs',
-        group = 'hadoop',
-        mode = 0644,
-    )
-    self.assertResourceCalled('Directory', '/var/lib/ambari-agent/data/yarn',
-        create_parents = True,
-        mode = 0755,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/yarn/local',
-                              owner = 'yarn',
-                              group = 'hadoop',
-                              create_parents = True,
-                              ignore_failures = True,
-                              mode = 0755,
-                              cd_access='a',
-                              recursive_mode_flags = {'d': 'a+rwx', 'f': 'a+rw'},
-                              )
-    self.assertResourceCalled('File', '/var/lib/ambari-agent/data/yarn/yarn_local_dir_mount.hist',
-        content = '\n# This file keeps track of the last known mount-point for each dir.\n# It is safe to delete, since it will get regenerated the next time that the component of the service starts.\n# However, it is not advised to delete this file since Ambari may\n# re-create a dir that used to be mounted on a drive but is now mounted on the root.\n# Comments begin with a hash (#) symbol\n# dir,mount_point\n',
-        owner = 'hdfs',
-        group = 'hadoop',
-        mode = 0644,
-    )
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
       owner = 'yarn',
       group = 'hadoop',
@@ -436,6 +391,55 @@
       ignore_failures = True,
       cd_access = 'a',
     )
+
+    self.assertResourceCalled('Directory', '/hadoop/yarn/local',
+                              action = ['delete']
+    )
+    self.assertResourceCalled('Directory', '/hadoop/yarn/log',
+                              action = ['delete']
+    )
+    self.assertResourceCalled('Directory', '/var/lib/hadoop-yarn',)
+    self.assertResourceCalled('File', '/var/lib/hadoop-yarn/nm_security_enabled',
+                              content= 'Marker file to track first start after enabling/disabling security. During first start yarn local, log dirs are removed and recreated'
+    )
+    self.assertResourceCalled('Directory', '/var/lib/ambari-agent/data/yarn',
+        create_parents = True,
+        mode = 0755,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/yarn/log',
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              create_parents = True,
+                              ignore_failures = True,
+                              mode = 0775,
+                              cd_access='a',
+                              )
+    self.assertResourceCalled('File', '/var/lib/ambari-agent/data/yarn/yarn_log_dir_mount.hist',
+        content = '\n# This file keeps track of the last known mount-point for each dir.\n# It is safe to delete, since it will get regenerated the next time that the component of the service starts.\n# However, it is not advised to delete this file since Ambari may\n# re-create a dir that used to be mounted on a drive but is now mounted on the root.\n# Comments begin with a hash (#) symbol\n# dir,mount_point\n',
+        owner = 'hdfs',
+        group = 'hadoop',
+        mode = 0644,
+    )
+    self.assertResourceCalled('Directory', '/var/lib/ambari-agent/data/yarn',
+        create_parents = True,
+        mode = 0755,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/yarn/local',
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              create_parents = True,
+                              ignore_failures = True,
+                              mode = 0755,
+                              cd_access='a',
+                              recursive_mode_flags = {'d': 'a+rwx', 'f': 'a+rw'},
+                              )
+    self.assertResourceCalled('File', '/var/lib/ambari-agent/data/yarn/yarn_local_dir_mount.hist',
+        content = '\n# This file keeps track of the last known mount-point for each dir.\n# It is safe to delete, since it will get regenerated the next time that the component of the service starts.\n# However, it is not advised to delete this file since Ambari may\n# re-create a dir that used to be mounted on a drive but is now mounted on the root.\n# Comments begin with a hash (#) symbol\n# dir,mount_point\n',
+        owner = 'hdfs',
+        group = 'hadoop',
+        mode = 0644,
+    )
+
     self.assertResourceCalled('XmlConfig', 'core-site.xml',
       owner = 'hdfs',
       group = 'hadoop',
@@ -628,115 +632,6 @@
       self.assertTrue(mocks_dict['call'].called)
       self.assertEqual(mocks_dict['call'].call_count,1)
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'yarn-site': {
-        'yarn.nodemanager.keytab': 'path/to/nodemanager/keytab',
-        'yarn.nodemanager.principal': 'nodemanager_principal',
-        'yarn.nodemanager.webapp.spnego-keytab-file': 'path/to/nodemanager/webapp/keytab',
-        'yarn.nodemanager.webapp.spnego-principal': 'nodemanager_webapp_principal'
-      }
-    }
-    result_issues = []
-    props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
-                         "yarn.acl.enable": "true"}
-    props_empty_check = ["yarn.nodemanager.principal",
-                         "yarn.nodemanager.keytab",
-                         "yarn.nodemanager.webapp.spnego-principal",
-                         "yarn.nodemanager.webapp.spnego-keytab-file"]
-
-    props_read_check = ["yarn.nodemanager.keytab",
-                        "yarn.nodemanager.webapp.spnego-keytab-file"]
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
-                       classname="Nodemanager",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('yarn-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['yarn-env']['yarn_user'],
-                                                  security_params['yarn-site']['yarn.nodemanager.webapp.spnego-keytab-file'],
-                                                  security_params['yarn-site']['yarn.nodemanager.webapp.spnego-principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-          self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
-                       classname="Nodemanager",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-          )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains yarn-site
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
-                       classname="Nodemanager",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'yarn-site': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
-                       classname="Nodemanager",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nodemanager.py",
-                       classname="Nodemanager",
-                       command="security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-  
   @patch.object(resource_management.libraries.functions, "get_stack_version", new = MagicMock(return_value='2.3.0.0-1234'))
   def test_pre_upgrade_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
index b7f90c8..ed5ee2c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
@@ -178,19 +178,6 @@
     self.assertNoMoreResources()
 
   def assert_configure_default(self):
-    self.assertResourceCalled('Directory', '/etc/hadoop/conf',
-        mode = 0755,
-        create_parents = True,
-        cd_access = 'a',
-    )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
-        owner = 'yarn',
-        group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/var/log/hadoop-yarn/yarn/hadoop-mapreduce.jobsummary.log',
-      owner = 'yarn',
-      group = 'hadoop',
-    )
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
       owner = 'yarn',
       group = 'hadoop',
@@ -240,6 +227,19 @@
       ignore_failures = True,
       cd_access = 'a',
     )
+    self.assertResourceCalled('Directory', '/etc/hadoop/conf',
+        mode = 0755,
+        create_parents = True,
+        cd_access = 'a',
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
+        owner = 'yarn',
+        group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/var/log/hadoop-yarn/yarn/hadoop-mapreduce.jobsummary.log',
+      owner = 'yarn',
+      group = 'hadoop',
+    )
     self.assertResourceCalled('XmlConfig', 'core-site.xml',
       owner = 'hdfs',
       group = 'hadoop',
@@ -346,19 +346,6 @@
                               )
 
   def assert_configure_secured(self):
-    self.assertResourceCalled('Directory', '/etc/hadoop/conf',
-        mode = 0755,
-        create_parents = True,
-        cd_access = 'a',
-    )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
-        owner = 'yarn',
-        group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/var/log/hadoop-yarn/yarn/hadoop-mapreduce.jobsummary.log',
-      owner = 'yarn',
-      group = 'hadoop',
-    )
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
       owner = 'yarn',
       group = 'hadoop',
@@ -408,6 +395,19 @@
       ignore_failures = True,
       cd_access = 'a',
     )
+    self.assertResourceCalled('Directory', '/etc/hadoop/conf',
+        mode = 0755,
+        create_parents = True,
+        cd_access = 'a',
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
+        owner = 'yarn',
+        group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/var/log/hadoop-yarn/yarn/hadoop-mapreduce.jobsummary.log',
+      owner = 'yarn',
+      group = 'hadoop',
+    )
     self.assertResourceCalled('XmlConfig', 'core-site.xml',
       owner = 'hdfs',
       group = 'hadoop',
@@ -525,114 +525,6 @@
                               group = 'hadoop',
                               )
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'yarn-site': {
-        'yarn.resourcemanager.keytab': '/path/to/resourcemanager/keytab',
-        'yarn.resourcemanager.principal': 'nresourcemanager_principal',
-        'yarn.resourcemanager.webapp.spnego-keytab-file': 'path/to/resourcemanager/webapp/keytab',
-        'yarn.resourcemanager.webapp.spnego-principal': 'resourcemanager_webapp_principal'
-      }
-    }
-    result_issues = []
-    props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
-                         "yarn.acl.enable": "true"}
-    props_empty_check = ["yarn.resourcemanager.principal",
-                         "yarn.resourcemanager.keytab",
-                         "yarn.resourcemanager.webapp.spnego-principal",
-                         "yarn.resourcemanager.webapp.spnego-keytab-file"]
-
-    props_read_check = ["yarn.resourcemanager.keytab",
-                        "yarn.resourcemanager.webapp.spnego-keytab-file"]
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
-                       classname="Resourcemanager",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('yarn-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['yarn-env']['yarn_user'],
-                                                  security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-keytab-file'],
-                                                  security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-          self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
-                       classname="Resourcemanager",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-          )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains yarn-site
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
-                       classname="Resourcemanager",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'yarn-site': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
-                       classname="Resourcemanager",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
-                       classname="Resourcemanager",
-                       command="security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
   def test_pre_upgrade_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
diff --git a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
index b9ebea2..6d38a67 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
@@ -247,109 +247,6 @@
       group = 'hadoop',
     )
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'zookeeper_jaas': {
-        'Server': {
-          'keyTab': 'path/to/zookeeper/service/keytab',
-          'principal': 'zookeeper_keytab'
-        }
-      }
-    }
-    result_issues = []
-    props_value_check = None
-    props_empty_check = ['Server/keyTab', 'Server/principal']
-    props_read_check = ['Server/keyTab']
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_server.py",
-                       classname = "ZookeeperServer",
-                       command = "security_status",
-                       config_file = "secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('zookeeper_jaas', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['zookeeper-env']['zk_user'],
-                                                  security_params['zookeeper_jaas']['Server']['keyTab'],
-                                                  security_params['zookeeper_jaas']['Server']['principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_server.py",
-                       classname = "ZookeeperServer",
-                       command = "security_status",
-                       config_file = "secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains zookeeper_jaas
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_server.py",
-                       classname = "ZookeeperServer",
-                       command = "security_status",
-                       config_file = "secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'zookeeper_jaas': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_server.py",
-                       classname = "ZookeeperServer",
-                       command = "security_status",
-                       config_file = "secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_server.py",
-                       classname = "ZookeeperServer",
-                       command = "security_status",
-                       config_file = "default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-
   def test_pre_upgrade_restart(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index 94425e0..d267bc1 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -627,8 +627,8 @@
           "user_group": "hadoop",
           "repo_suse_rhel_template": "[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0",
           "repo_ubuntu_template": "{{package_type}} {{base_url}} {{components}}",
-          "metrics_collector_vip_host": "c6402.ambari.apache.org",
-          "metrics_collector_vip_port": "6189",
+          "metrics_collector_external_hosts": "c6402.ambari.apache.org",
+          "metrics_collector_external_port": "6189",
           "override_uid" : "true",
           "fetch_nonlocal_groups": "true",
           "manage_dirs_on_root": "true",
diff --git a/ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py b/ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py
index 1bfa173..19c785c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py
@@ -24,6 +24,7 @@
 from resource_management.libraries.functions import conf_select
 
 @patch("os.path.exists", new = MagicMock(return_value=True))
+@patch("os.path.isfile", new = MagicMock(return_value=False))
 class TestHookAfterInstall(RMFTestCase):
 
   def test_hook_default(self):
@@ -40,7 +41,11 @@
                               configurations = self.getConfig()['configurations']['core-site'],
                               configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
                               only_if="ls /etc/hadoop/conf")
-
+    self.assertResourceCalled('Directory',
+                              '/etc/ambari-logsearch-logfeeder/conf',
+                              mode = 0755,
+                              cd_access = 'a',
+                              create_parents = True)
     self.assertNoMoreResources()
 
 
@@ -81,6 +86,12 @@
       configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
       only_if="ls /usr/hdp/current/hadoop-client/conf")
 
+    self.assertResourceCalled('Directory',
+                              '/etc/ambari-logsearch-logfeeder/conf',
+                              mode = 0755,
+                              cd_access = 'a',
+                              create_parents = True)
+
     package_dirs = conf_select.get_package_dirs();
     for package, dir_defs in package_dirs.iteritems():
       for dir_def in dir_defs:
@@ -148,6 +159,12 @@
       configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
       only_if="ls /usr/hdp/current/hadoop-client/conf")
 
+    self.assertResourceCalled('Directory',
+                              '/etc/ambari-logsearch-logfeeder/conf',
+                              mode = 0755,
+                              cd_access = 'a',
+                              create_parents = True)
+
     package_dirs = conf_select.get_package_dirs();
     for package, dir_defs in package_dirs.iteritems():
       for dir_def in dir_defs:
@@ -248,6 +265,12 @@
       configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
       only_if="ls /usr/hdp/current/hadoop-client/conf")
 
+    self.assertResourceCalled('Directory',
+                              '/etc/ambari-logsearch-logfeeder/conf',
+                              mode = 0755,
+                              cd_access = 'a',
+                              create_parents = True)
+
     package_dirs = conf_select.get_package_dirs();
     for package, dir_defs in package_dirs.iteritems():
       for dir_def in dir_defs:
diff --git a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py
index 5f88b6b..b9747a2 100644
--- a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py
+++ b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py
@@ -91,30 +91,6 @@
                           )
     self.assertNoMoreResources()
 
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock):
-    # Test that function works when is called with correct parameters
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/falcon_client.py",
-                       classname="FalconClient",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/falcon_client.py",
-                       classname="FalconClient",
-                       command="security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
   def test_pre_upgrade_restart(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
diff --git a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
index feba0c4..44da365 100644
--- a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
+++ b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
@@ -414,115 +414,6 @@
     )
     self.assertNoMoreResources()
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'startup': {
-        '*.falcon.service.authentication.kerberos.keytab': 'path/to/falcon/service/keytab',
-        '*.falcon.service.authentication.kerberos.principal': 'falcon_service_keytab',
-        '*.falcon.http.authentication.kerberos.keytab': 'path/to/falcon/http/keytab',
-        '*.falcon.http.authentication.kerberos.principal': 'falcon_http_principal'
-      }
-    }
-    result_issues = []
-    props_value_check = {"*.falcon.authentication.type": "kerberos",
-                           "*.falcon.http.authentication.type": "kerberos"}
-    props_empty_check = ["*.falcon.service.authentication.kerberos.principal",
-                           "*.falcon.service.authentication.kerberos.keytab",
-                           "*.falcon.http.authentication.kerberos.principal",
-                           "*.falcon.http.authentication.kerberos.keytab"]
-
-    props_read_check = ["*.falcon.service.authentication.kerberos.keytab",
-                          "*.falcon.http.authentication.kerberos.keytab"]
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/falcon_server.py",
-                       classname="FalconServer",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    get_params_mock.assert_called_with('/etc/falcon/conf', {'startup.properties': 'PROPERTIES'})
-    build_exp_mock.assert_called_with('startup', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['falcon-env']['falcon_user'],
-                                                  security_params['startup']['*.falcon.http.authentication.kerberos.keytab'],
-                                                  security_params['startup']['*.falcon.http.authentication.kerberos.principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/falcon_server.py",
-                       classname="FalconServer",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains startup
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/falcon_server.py",
-                       classname="FalconServer",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'startup': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/falcon_server.py",
-                       classname="FalconServer",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/falcon_server.py",
-                       classname="FalconServer",
-                       command="security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
   @patch('os.path.isfile', new=MagicMock(return_value=True))
   def test_pre_upgrade_restart(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.2/configs/falcon-upgrade.json"
diff --git a/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py b/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
index a159cd0..e34734c 100644
--- a/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
+++ b/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
@@ -406,119 +406,6 @@
                               mode = 0755,
                               )
 
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'hive-site': {
-        'hive.server2.authentication': "KERBEROS",
-        'hive.metastore.sasl.enabled': "true",
-        'hive.security.authorization.enabled': 'true',
-        'hive.metastore.kerberos.keytab.file': 'path/to/keytab',
-        'hive.metastore.kerberos.principal': 'principal'
-      }
-    }
-    result_issues = []
-    props_value_check = {
-      'hive.server2.authentication': "KERBEROS",
-      'hive.metastore.sasl.enabled': "true",
-      'hive.security.authorization.enabled': 'true'
-    }
-    props_empty_check = [
-      'hive.metastore.kerberos.keytab.file',
-      'hive.metastore.kerberos.principal'
-    ]
-    props_read_check = [
-      'hive.metastore.kerberos.keytab.file'
-    ]
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_metastore.py",
-                       classname = "HiveMetastore",
-                       command = "security_status",
-                       config_file="../../2.1/configs/secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    get_params_mock.assert_called_with("/usr/hdp/current/hive-server2/conf", {'hive-site.xml': "XML"})
-    build_exp_mock.assert_called_with('hive-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['hive-env']['hive_user'],
-                                                  security_params['hive-site']['hive.metastore.kerberos.keytab.file'],
-                                                  security_params['hive-site']['hive.metastore.kerberos.principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_metastore.py",
-                         classname = "HiveMetastore",
-                         command = "security_status",
-                         config_file="../../2.1/configs/secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains startup
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_metastore.py",
-                       classname = "HiveMetastore",
-                       command = "security_status",
-                       config_file="../../2.1/configs/secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'hive-site': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_metastore.py",
-                       classname = "HiveMetastore",
-                       command = "security_status",
-                       config_file="../../2.1/configs/secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_metastore.py",
-                       classname = "HiveMetastore",
-                       command = "security_status",
-                       config_file="../../2.1/configs/default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
   def test_pre_upgrade_restart(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
@@ -801,6 +688,13 @@
         content = StaticFile('startMetastore.sh'),
         mode = 0755,
     )
+
+    self.assertResourceCalled('Directory', '/tmp/hive',
+                              owner = 'hive',
+                              create_parents = True,
+                              mode=0777
+                              )
+
     self.assertResourceCalled('Execute', ('cp',
      '--remove-destination',
      '/usr/share/java/mysql-connector-java.jar',
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py
index 3e10611..f208d3a 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py
@@ -176,107 +176,3 @@
     self.assertEquals(
       ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
-
-
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'storm_jaas': {
-        'StormServer': {
-          'keyTab': 'path/to/storm/service/keytab',
-          'principal': 'storm_keytab'
-        }
-      }
-    }
-    result_issues = []
-
-    props_value_check = None
-    props_empty_check = ['StormServer/keyTab', 'StormServer/principal']
-    props_read_check = ['StormServer/keyTab']
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/drpc_server.py",
-                       classname = "DrpcServer",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('storm_jaas', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['storm-env']['storm_user'],
-                                                  security_params['storm_jaas']['StormServer']['keyTab'],
-                                                  security_params['storm_jaas']['StormServer']['principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/drpc_server.py",
-                        classname = "DrpcServer",
-                        command = "security_status",
-                        config_file="secured.json",
-                        stack_version = self.STACK_VERSION,
-                        target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains storm_jaas
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/drpc_server.py",
-                       classname = "DrpcServer",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'storm_jaas': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/drpc_server.py",
-                       classname = "DrpcServer",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/drpc_server.py",
-                       classname = "DrpcServer",
-                       command = "security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
index 40972f9..35f057c 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
@@ -261,106 +261,3 @@
       ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
     self.assertNoMoreResources()
-    
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'storm_jaas': {
-        'StormServer': {
-          'keyTab': 'path/to/storm/service/keytab',
-          'principal': 'storm_keytab'
-        }
-      }
-    }
-    result_issues = []
-
-    props_value_check = None
-    props_empty_check = ['StormServer/keyTab', 'StormServer/principal']
-    props_read_check = ['StormServer/keyTab']
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nimbus.py",
-                       classname = "Nimbus",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('storm_jaas', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['storm-env']['storm_user'],
-                                                  security_params['storm_jaas']['StormServer']['keyTab'],
-                                                  security_params['storm_jaas']['StormServer']['principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nimbus.py",
-                       classname = "Nimbus",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains storm_jaas
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nimbus.py",
-                       classname = "Nimbus",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'storm_jaas': "Something bad happened"
-    }
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nimbus.py",
-                       classname = "Nimbus",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nimbus.py",
-                       classname = "Nimbus",
-                       command = "security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-    self.assertNoMoreResources()
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py
index d6497ed..3ac38c7 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py
@@ -246,85 +246,3 @@
     self.assertEquals(
       ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'storm', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
        mocks_dict['call'].call_args_list[0][0][0])
-
-
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-    result_issues = []
-
-    security_params = {
-      'storm_ui': {
-        'storm_ui_principal_name': 'HTTP/_HOST',
-        'storm_ui_keytab': '/etc/security/keytabs/spnego.service.keytab'
-      }
-    }
-    props_value_check = None
-    props_empty_check = ['storm_ui_principal_name', 'storm_ui_keytab']
-    props_read_check = ['storm_ui_keytab']
-
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ui_server.py",
-                       classname = "UiServer",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('storm_ui', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['storm-env']['storm_user'],
-                                                  security_params['storm_ui']['storm_ui_keytab'],
-                                                  security_params['storm_ui']['storm_ui_principal_name'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ui_server.py",
-                        classname = "UiServer",
-                        command = "security_status",
-                        config_file="secured.json",
-                        stack_version = self.STACK_VERSION,
-                        target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {}
-    result_issues_with_params['storm_ui']="Something bad happened"
-
-    validate_security_config_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ui_server.py",
-                       classname = "UiServer",
-                       command = "security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ui_server.py",
-                       classname = "UiServer",
-                       command = "security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
diff --git a/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py b/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
index 40db813..530d1d9 100644
--- a/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
+++ b/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
@@ -92,26 +92,6 @@
     self.assertNoMoreResources()
 
   def assert_configure_default(self):
-    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/timeline',
-                              owner = 'yarn',
-                              group = 'hadoop',
-                              create_parents = True,
-                              cd_access='a'
-                              )
-    self.assertResourceCalled('HdfsResource', None,
-                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
-                              security_enabled = False,
-                              hadoop_bin_dir = '/usr/bin',
-                              keytab = UnknownConfigurationMock(),
-                              default_fs = 'hdfs://c6401.ambari.apache.org:8020',
-                              dfs_type = '',
-                              hdfs_site = self.getConfig()['configurations']['hdfs-site'],
-                              kinit_path_local = '/usr/bin/kinit',
-                              principal_name = UnknownConfigurationMock(),
-                              user = 'hdfs',
-                              action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
-                              hadoop_conf_dir = '/etc/hadoop/conf',
-                              )
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
                               owner = 'yarn',
                               group = 'hadoop',
@@ -161,6 +141,26 @@
                               ignore_failures = True,
                               cd_access = 'a',
                               )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/timeline',
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              create_parents = True,
+                              cd_access='a'
+                              )
+    self.assertResourceCalled('HdfsResource', None,
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
+                              security_enabled = False,
+                              hadoop_bin_dir = '/usr/bin',
+                              keytab = UnknownConfigurationMock(),
+                              default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+                              dfs_type = '',
+                              hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+                              kinit_path_local = '/usr/bin/kinit',
+                              principal_name = UnknownConfigurationMock(),
+                              user = 'hdfs',
+                              action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
+                              hadoop_conf_dir = '/etc/hadoop/conf',
+                              )
     self.assertResourceCalled('XmlConfig', 'core-site.xml',
                               owner = 'hdfs',
                               group = 'hadoop',
@@ -266,116 +266,6 @@
                               group = 'hadoop',
                               )
 
-
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      'yarn-site': {
-        'yarn.timeline-service.keytab': '/path/to/applicationtimeline/keytab',
-        'yarn.timeline-service.principal': 'applicationtimeline_principal',
-        'yarn.timeline-service.http-authentication.kerberos.keytab': 'path/to/timeline/kerberos/keytab',
-        'yarn.timeline-service.http-authentication.kerberos.principal': 'timeline_principal'
-      }
-    }
-    result_issues = []
-    props_value_check = {"yarn.timeline-service.enabled": "true",
-                         "yarn.timeline-service.http-authentication.type": "kerberos",
-                         "yarn.acl.enable": "true"}
-    props_empty_check = ["yarn.timeline-service.principal",
-                         "yarn.timeline-service.keytab",
-                         "yarn.timeline-service.http-authentication.kerberos.principal",
-                         "yarn.timeline-service.http-authentication.kerberos.keytab"]
-
-    props_read_check = ["yarn.timeline-service.keytab",
-                        "yarn.timeline-service.http-authentication.kerberos.keytab"]
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/application_timeline_server.py",
-                       classname="ApplicationTimelineServer",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    build_exp_mock.assert_called_with('yarn-site', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 2)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['yarn-env']['yarn_user'],
-                                                  security_params['yarn-site']['yarn.timeline-service.http-authentication.kerberos.keytab'],
-                                                  security_params['yarn-site']['yarn.timeline-service.http-authentication.kerberos.principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/application_timeline_server.py",
-                         classname="ApplicationTimelineServer",
-                         command="security_status",
-                         config_file="secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains yarn-site
-    empty_security_params = {}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/application_timeline_server.py",
-                       classname="ApplicationTimelineServer",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {
-      'yarn-site': "Something bad happened"
-    }
-
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/application_timeline_server.py",
-                       classname="ApplicationTimelineServer",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/application_timeline_server.py",
-                       classname="ApplicationTimelineServer",
-                       command="security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
   @patch.object(resource_management.libraries.functions, "get_stack_version", new = MagicMock(return_value='2.3.0.0-1234'))
   def test_pre_upgrade_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
diff --git a/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
index e8bd5d0..bdd34fc 100644
--- a/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
@@ -272,6 +272,11 @@
       "services": [
         {
           "StackServices": {
+            "service_name": "YARN"
+          }, "components": []
+        },
+        {
+          "StackServices": {
             "service_name": "HIVE",
           },
           "components": [
@@ -338,7 +343,7 @@
     self.assertEqual(configurations["core-site"]["properties"]["hadoop.proxyuser.HTTP.hosts"] == "example.com", True)
 
     newhost_list = ["example.com", "example.org"]
-    services["services"][0]["components"][0]["StackServiceComponents"]["hostnames"] = newhost_list
+    services["services"][1]["components"][0]["StackServiceComponents"]["hostnames"] = newhost_list
     configurations["core-site"]["properties"]["hadoop.proxyuser.HTTP.hosts"] = ""
 
     self.stackAdvisor.recommendHiveConfigurations(configurations, clusterData, services, hosts)
diff --git a/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py b/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py
index e0118de..32b5d70 100644
--- a/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py
+++ b/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py
@@ -128,108 +128,6 @@
     )
     self.assertNoMoreResources()
 
-
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock,
-                           validate_security_config_mock, get_params_mock, build_exp_mock):
-    # Test that function works when is called with correct parameters
-
-    security_params = {
-      "krb5JAASLogin":
-        {
-          'keytab': "/path/to/keytab",
-          'principal': "principal"
-        },
-      "gateway-site" : {
-        "gateway.hadoop.kerberos.secured" : "true"
-      }
-    }
-
-    result_issues = []
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/knox_gateway.py",
-                       classname = "KnoxGateway",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-
-    self.assertTrue(build_exp_mock.call_count, 2)
-    build_exp_mock.assert_called_with('gateway-site', {"gateway.hadoop.kerberos.secured": "true"}, None, None)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-    self.assertTrue(cached_kinit_executor_mock.call_count, 1)
-    cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
-                                                  self.config_dict['configurations']['knox-env']['knox_user'],
-                                                  security_params['krb5JAASLogin']['keytab'],
-                                                  security_params['krb5JAASLogin']['principal'],
-                                                  self.config_dict['hostname'],
-                                                  '/tmp')
-
-    # Testing that the exception throw by cached_executor is caught
-    cached_kinit_executor_mock.reset_mock()
-    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
-
-    try:
-      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/knox_gateway.py",
-                         classname = "KnoxGateway",
-                         command="security_status",
-                         config_file="secured.json",
-                         stack_version = self.STACK_VERSION,
-                         target = RMFTestCase.TARGET_COMMON_SERVICES
-      )
-    except:
-      self.assertTrue(True)
-
-    # Testing with a security_params which doesn't contains krb5JAASLogin
-    empty_security_params = {"krb5JAASLogin" : {}}
-    cached_kinit_executor_mock.reset_mock()
-    get_params_mock.reset_mock()
-    put_structured_out_mock.reset_mock()
-    get_params_mock.return_value = empty_security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/knox_gateway.py",
-                       classname = "KnoxGateway",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file and principal are not set."})
-
-    # Testing with not empty result_issues
-    result_issues_with_params = {'krb5JAASLogin': "Something bad happened"}
-    validate_security_config_mock.reset_mock()
-    get_params_mock.reset_mock()
-    validate_security_config_mock.return_value = result_issues_with_params
-    get_params_mock.return_value = security_params
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/knox_gateway.py",
-                       classname = "KnoxGateway",
-                       command="security_status",
-                       config_file="secured.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
-    # Testing with security_enable = false
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/knox_gateway.py",
-                       classname = "KnoxGateway",
-                       command="security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
-
   @patch("os.path.isdir")
   def test_pre_upgrade_restart(self, isdir_mock):
     isdir_mock.return_value = True
diff --git a/ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py b/ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py
index b523412..b46a0ed 100644
--- a/ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py
+++ b/ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py
@@ -50,6 +50,55 @@
     self.assertNoMoreResources()
 
   def assert_configure_default(self):
+    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              create_parents = True,
+                              cd_access = 'a',
+                              )
+    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              create_parents = True,
+                              cd_access = 'a',
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              create_parents = True,
+                              cd_access = 'a',
+                              )
+    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
+                              owner = 'mapred',
+                              group = 'hadoop',
+                              create_parents = True,
+                              cd_access = 'a',
+                              )
+    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
+                              owner = 'mapred',
+                              group = 'hadoop',
+                              create_parents = True,
+                              cd_access = 'a',
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
+                              owner = 'mapred',
+                              group = 'hadoop',
+                              create_parents = True,
+                              cd_access = 'a',
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
+                              owner = 'mapred',
+                              group = 'hadoop',
+                              create_parents = True,
+                              cd_access = 'a',
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
+                              owner = 'yarn',
+                              group = 'hadoop',
+                              ignore_failures = True,
+                              create_parents = True,
+                              cd_access = 'a',
+                              )
     self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/timeline',
                               owner = 'yarn',
                               group = 'hadoop',
@@ -144,55 +193,6 @@
                               action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
                               hadoop_conf_dir = '/etc/hadoop/conf',
                               )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
-                              owner = 'yarn',
-                              group = 'hadoop',
-                              create_parents = True,
-                              cd_access = 'a',
-                              )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
-                              owner = 'yarn',
-                              group = 'hadoop',
-                              create_parents = True,
-                              cd_access = 'a',
-                              )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
-                              owner = 'yarn',
-                              group = 'hadoop',
-                              create_parents = True,
-                              cd_access = 'a',
-                              )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
-                              owner = 'mapred',
-                              group = 'hadoop',
-                              create_parents = True,
-                              cd_access = 'a',
-                              )
-    self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
-                              owner = 'mapred',
-                              group = 'hadoop',
-                              create_parents = True,
-                              cd_access = 'a',
-                              )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
-                              owner = 'mapred',
-                              group = 'hadoop',
-                              create_parents = True,
-                              cd_access = 'a',
-                              )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
-                              owner = 'mapred',
-                              group = 'hadoop',
-                              create_parents = True,
-                              cd_access = 'a',
-                              )
-    self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
-                              owner = 'yarn',
-                              group = 'hadoop',
-                              ignore_failures = True,
-                              create_parents = True,
-                              cd_access = 'a',
-                              )
     self.assertResourceCalled('XmlConfig', 'core-site.xml',
                               group = 'hadoop',
                               conf_dir = '/etc/hadoop/conf',
diff --git a/ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py b/ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py
index 2de3fba..87304cd 100644
--- a/ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py
+++ b/ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py
@@ -21,7 +21,10 @@
 from stacks.utils.RMFTestCase import RMFTestCase, Template, InlineTemplate, StaticFile
 from resource_management.core.exceptions import ComponentIsNotRunning
 from resource_management.libraries.script.config_dictionary import UnknownConfiguration
+from mock.mock import MagicMock, call, patch
 
+@patch("os.listdir", new = MagicMock(return_value=['solr-8886.pid']))
+@patch("os.path.isdir", new = MagicMock(return_value=True))
 class TestInfraSolr(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "AMBARI_INFRA/0.1.0/package"
   STACK_VERSION = "2.4"
diff --git a/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logfeeder.py b/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logfeeder.py
index 0a7f074..159cb81 100644
--- a/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logfeeder.py
+++ b/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logfeeder.py
@@ -67,15 +67,19 @@
                               action = ['delete']
                               )
     self.assertResourceCalled('PropertiesFile', '/etc/ambari-logsearch-logfeeder/conf/logfeeder.properties',
-                              properties={'hadoop.security.credential.provider.path': 'jceks://file/etc/ambari-logsearch-logfeeder/conf/logfeeder.jceks',
+                              properties={'cluster.name': 'c1',
+                                          'common-property': 'common-value',
+                                          'hadoop.security.credential.provider.path': 'jceks://file/etc/ambari-logsearch-logfeeder/conf/logfeeder.jceks',
                                           'logfeeder.checkpoint.folder': '/etc/ambari-logsearch-logfeeder/conf/checkpoints',
-                                          'logfeeder.config.files': 'output.config.json,input.config-ambari.json,global.config.json,input.config-logsearch.json,input.config-zookeeper.json',
+                                          'logfeeder.config.dir': '/etc/ambari-logsearch-logfeeder/conf',
+                                          'logfeeder.config.files': 'output.config.json,global.config.json',
                                           'logfeeder.metrics.collector.hosts': '',
                                           'logfeeder.metrics.collector.path': '/ws/v1/timeline/metrics',
                                           'logfeeder.metrics.collector.port': '',
                                           'logfeeder.metrics.collector.protocol': '',
                                           'logfeeder.solr.core.config.name': 'history',
-                                          'logfeeder.solr.zk_connect_string': 'c6401.ambari.apache.org:2181/infra-solr'
+                                          'logfeeder.solr.zk_connect_string': 'c6401.ambari.apache.org:2181/infra-solr',
+                                          'logsearch.config.zk_connect_string': 'c6401.ambari.apache.org:2181'
                                          }
                               )
     self.assertResourceCalled('File', '/etc/ambari-logsearch-logfeeder/conf/logfeeder-env.sh',
@@ -89,6 +93,9 @@
                               content=InlineTemplate('GP'),
                               encoding='utf-8'
                               )
+    self.assertResourceCalled('File', '/etc/ambari-logsearch-logfeeder/conf/global.config.json',
+                              content=Template('global.config.json.j2')
+                              )
     self.assertResourceCalled('File', '/etc/ambari-logsearch-logfeeder/conf/input.config-ambari.json',
                               content=InlineTemplate('ambari-grok-filter'),
                               encoding='utf-8'
@@ -98,22 +105,6 @@
                               encoding='utf-8'
                               )
 
-    logfeeder_supported_services = ['logsearch']
-
-    logfeeder_config_file_names = ['global.config.json'] + \
-                                  ['input.config-%s.json' % (tag) for tag in logfeeder_supported_services]
-
-    for file_name in logfeeder_config_file_names:
-      self.assertResourceCalled('File', '/etc/ambari-logsearch-logfeeder/conf/' + file_name,
-                                content=Template(file_name + ".j2")
-                                )
-    self.assertResourceCalled('File', '/etc/ambari-logsearch-logfeeder/conf/input.config-logfeeder-custom.json',
-                              action=['delete']
-                              )
-    self.assertResourceCalled('File', '/etc/ambari-logsearch-logfeeder/conf/input.config-zookeeper.json',
-                              content=InlineTemplate("pattern content")
-                              )
-
   def test_configure_default(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/logfeeder.py",
                        classname="LogFeeder",
diff --git a/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logsearch.py b/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logsearch.py
index b283a9f..afdc42f 100644
--- a/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logsearch.py
+++ b/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logsearch.py
@@ -83,7 +83,8 @@
                               action = ['delete']
     )
     self.assertResourceCalled('PropertiesFile', '/etc/ambari-logsearch-portal/conf/logsearch.properties',
-                              properties = {'hadoop.security.credential.provider.path': 'jceks://file/etc/ambari-logsearch-portal/conf/logsearch.jceks',
+                              properties = {'common-property': 'common-value',
+                                            'hadoop.security.credential.provider.path': 'jceks://file/etc/ambari-logsearch-portal/conf/logsearch.jceks',
                                             'logsearch.audit.logs.split.interval.mins': '1',
                                             'logsearch.auth.external_auth.enabled': 'false',
                                             'logsearch.auth.external_auth.host_url': 'http://c6401.ambari.apache.org:8080',
@@ -96,6 +97,7 @@
                                             'logsearch.collection.history.replication.factor': '1',
                                             'logsearch.collection.service.logs.numshards': '10',
                                             'logsearch.collection.service.logs.replication.factor': '1',
+                                            'logsearch.config.zk_connect_string': 'c6401.ambari.apache.org:2181',
                                             'logsearch.login.credentials.file': 'logsearch-admin.json',
                                             'logsearch.protocol': 'http',
                                             'logsearch.roles.allowed': 'AMBARI.ADMINISTRATOR,CLUSTER.ADMINISTRATOR',
diff --git a/ambari-server/src/test/python/stacks/2.4/configs/default.json b/ambari-server/src/test/python/stacks/2.4/configs/default.json
index 8822e96..1863c02 100644
--- a/ambari-server/src/test/python/stacks/2.4/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.4/configs/default.json
@@ -292,6 +292,9 @@
       "logsearch-log4j": {
         "content": "&lt;?xml version=\"1.0\" encoding=\"UTF-8\" ?&gt;\n&lt;!--\n  Licensed to the Apache Software Foundation (ASF) under one or more\n  contributor license agreements.  See the NOTICE file distributed with\n  this work for additional information regarding copyright ownership.\n  The ASF licenses this file to You under the Apache License, Version 2.0\n  (the \"License\"); you may not use this file except in compliance with\n  the License.  You may obtain a copy of the License at\n\n      http://www.apache.org/licenses/LICENSE-2.0\n\n  Unless required by applicable law or agreed to in writing, software\n  distributed under the License is distributed on an \"AS IS\" BASIS,\n  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n  See the License for the specific language governing permissions and\n  limitations under the License.\n--&gt;\n&lt;!DOCTYPE log4j:configuration SYSTEM \"log4j.dtd\"&gt;\n&lt;log4j:configuration xmlns:log4j=\"http://jakarta.apache.org/log4j/\"&gt;\n  &lt;appender name=\"console\" class=\"org.apache.log4j.ConsoleAppender\"&gt;\n    &lt;param name=\"Target\" value=\"System.out\" /&gt;\n    &lt;layout class=\"org.apache.log4j.PatternLayout\"&gt;\n      &lt;param name=\"ConversionPattern\" value=\"%d [%t] %-5p %C{6} (%F:%L) - %m%n\" /&gt;\n    &lt;/layout&gt;\n  &lt;/appender&gt;\n\n  &lt;appender name=\"rolling_file\" class=\"org.apache.log4j.RollingFileAppender\"&gt; \n    &lt;param name=\"file\" value=\"{{logsearch_log_dir}}/logsearch.log\" /&gt; \n    &lt;param name=\"append\" value=\"true\" /&gt; \n    &lt;param name=\"maxFileSize\" value=\"10MB\" /&gt; \n    &lt;param name=\"maxBackupIndex\" value=\"10\" /&gt; \n    &lt;layout class=\"org.apache.log4j.PatternLayout\"&gt; \n      &lt;param name=\"ConversionPattern\" value=\"%d [%t] %-5p %C{6} (%F:%L) - %m%n\"/&gt; \n    &lt;/layout&gt; \n  &lt;/appender&gt; \n\n  &lt;appender name=\"performance_analyzer\" class=\"org.apache.log4j.RollingFileAppender\"&gt;\n    &lt;param name=\"file\" value=\"{{logsearch_log_dir}}/logsearch-performance.log\" /&gt;\n    &lt;param name=\"Threshold\" value=\"info\" /&gt;\n    &lt;param name=\"append\" value=\"true\" /&gt;\n    &lt;param name=\"maxFileSize\" value=\"10MB\" /&gt; \n    &lt;param name=\"maxBackupIndex\" value=\"10\" /&gt; \n    &lt;layout class=\"org.apache.log4j.PatternLayout\"&gt;\n      &lt;param name=\"ConversionPattern\" value=\"%d [%t] %-5p %C{6} (%F:%L) - %m%n\" /&gt;\n    &lt;/layout&gt;\n  &lt;/appender&gt;\n  \n  &lt;logger name=\"org.apache.ambari.logsearch.perfomance\" additivity=\"false\"&gt;\n   &lt;appender-ref ref=\"performance_analyzer\" /&gt;\n  &lt;/logger&gt;\n\n  &lt;category name=\"org.apache.ambari.logsearch\" additivity=\"false\"&gt;\n    &lt;priority value=\"info\" /&gt;\n    &lt;appender-ref ref=\"rolling_file\" /&gt;\n  &lt;/category&gt;\n\n  &lt;root&gt;\n    &lt;priority value=\"warn\" /&gt;\n    &lt;appender-ref ref=\"rolling_file\" /&gt;\n  &lt;/root&gt;\n&lt;/log4j:configuration&gt;"
       },
+      "logsearch-common-properties": {
+        "common-property": "common-value"
+      },
       "logsearch-properties": {
         "logsearch.spnego.kerberos.host" : "localhost",
         "logsearch.solr.collection.service.logs" : "hadoop_logs",
diff --git a/ambari-server/src/test/python/stacks/2.5/ATLAS/test_atlas_server.py b/ambari-server/src/test/python/stacks/2.5/ATLAS/test_atlas_server.py
index cd2fac8..539bef5 100644
--- a/ambari-server/src/test/python/stacks/2.5/ATLAS/test_atlas_server.py
+++ b/ambari-server/src/test/python/stacks/2.5/ATLAS/test_atlas_server.py
@@ -184,45 +184,3 @@
     self.assertResourceCalled('File', '/usr/hdp/current/atlas-server/conf/hdfs-site.xml',action = ['delete'],)
 
     self.assertNoMoreResources()
-
-  @patch("resource_management.libraries.functions.security_commons.build_expectations")
-  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
-  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
-  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
-  @patch("resource_management.libraries.script.Script.put_structured_out")
-  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
-
-    security_params = {
-      'atlas-application': {
-        'atlas.authentication.keytab': '/etc/security/keytabs/atlas.service.keytab',
-        'atlas.authentication.method.file': 'true',
-        'atlas.authentication.method.kerberos': 'true',
-        'atlas.authentication.method.kerberos.keytab': '/etc/security/keytabs/spnego.service.keytab',
-        'atlas.authentication.method.kerberos.principal': 'HTTP/_HOST@EXAMPLE.COM',
-        'atlas.authentication.principal': 'atlas/_HOST@EXAMPLE.COM'
-      }
-    }
-    result_issues = []
-    props_value_check = {'atlas.authentication.method.kerberos': 'true',
-                         'atlas.solr.kerberos.enable': 'true'}
-    props_empty_check = ['atlas.authentication.principal',
-                         'atlas.authentication.keytab',
-                         'atlas.authentication.method.kerberos.principal',
-                         'atlas.authentication.method.kerberos.keytab']
-    props_read_check = ['atlas.authentication.keytab',
-                        'atlas.authentication.method.kerberos.keytab']
-
-    get_params_mock.return_value = security_params
-    validate_security_config_mock.return_value = result_issues
-
-    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py",
-                       classname = "MetadataServer",
-                       command = "security_status",
-                       config_file="default.json",
-                       stack_version = self.STACK_VERSION,
-                       target = RMFTestCase.TARGET_COMMON_SERVICES
-    )
-    build_exp_mock.assert_called_with('atlas-application', props_value_check, props_empty_check, props_read_check)
-    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
-
-    self.assertNoMoreResources()
diff --git a/ambari-server/src/test/python/stacks/2.5/RANGER_KMS/test_kms_server.py b/ambari-server/src/test/python/stacks/2.5/RANGER_KMS/test_kms_server.py
index 7082a33..f6f0af3 100644
--- a/ambari-server/src/test/python/stacks/2.5/RANGER_KMS/test_kms_server.py
+++ b/ambari-server/src/test/python/stacks/2.5/RANGER_KMS/test_kms_server.py
@@ -43,13 +43,22 @@
     self.assertTrue(isfile_mock.called)
     self.assertNoMoreResources()
 
+  current_date = datetime.now()
+
+  class DTMOCK(object):
+    """
+    Mock datetime to avoid test failures when test run a little bit slower than usuall.
+    """
+    def now(self):
+      return TestRangerKMS.current_date
+
   @patch("resource_management.libraries.functions.ranger_functions.Rangeradmin.check_ranger_login_urllib2", new=MagicMock(return_value=200))
   @patch("resource_management.libraries.functions.ranger_functions.Rangeradmin.create_ambari_admin_user", new=MagicMock(return_value=200))
   @patch("kms.get_repo")
   @patch("kms.create_repo")
   @patch("os.path.isfile")
+  @patch("kms.datetime", new=DTMOCK())
   def test_start_default(self, get_repo_mock, create_repo_mock, isfile_mock):
-
     get_repo_mock.return_value = True
     create_repo_mock.return_value = True
 
@@ -64,7 +73,7 @@
 
     # TODO confirm repo call
 
-    current_datetime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+    current_datetime = self.current_date.strftime("%Y-%m-%d %H:%M:%S")
 
     self.assertResourceCalled('File', '/usr/hdp/current/ranger-kms/conf/ranger-security.xml',
       owner = 'kms',
@@ -151,6 +160,55 @@
       mode = 0640
     )
 
+    self.assertResourceCalled('HdfsResource', '/ranger/audit',
+                        type = 'directory',
+                        action = ['create_on_execute'],
+                        owner = 'hdfs',
+                        group = 'hdfs',
+                        mode = 0755,
+                        recursive_chmod = True,
+                        user = 'hdfs',
+                        security_enabled = False,
+                        keytab = None,
+                        kinit_path_local = '/usr/bin/kinit',
+                        hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                        hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
+                        principal_name = None,
+                        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+                        default_fs = 'hdfs://c6401.ambari.apache.org:8020'
+    )
+
+    self.assertResourceCalled('HdfsResource', '/ranger/audit/kms',
+                        type = 'directory',
+                        action = ['create_on_execute'],
+                        owner = 'kms',
+                        group = 'kms',
+                        mode = 0750,
+                        recursive_chmod = True,
+                        user = 'hdfs',
+                        security_enabled = False,
+                        keytab = None,
+                        kinit_path_local = '/usr/bin/kinit',
+                        hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                        hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
+                        principal_name = None,
+                        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+                        default_fs = 'hdfs://c6401.ambari.apache.org:8020'
+    )
+
+    self.assertResourceCalled('HdfsResource', None,
+                        action = ['execute'],
+                        user = 'hdfs',
+                        security_enabled = False,
+                        keytab = None,
+                        kinit_path_local = '/usr/bin/kinit',
+                        hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                        hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
+                        principal_name = None,
+                        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+                        default_fs = 'hdfs://c6401.ambari.apache.org:8020'
+    )
+
     self.assertResourceCalled('File', '/usr/hdp/current/ranger-kms/conf/hdfs-site.xml',
       action = ['delete'],
     )
@@ -537,6 +595,55 @@
       mode = 0640
     )
 
+    self.assertResourceCalled('HdfsResource', '/ranger/audit',
+                        type = 'directory',
+                        action = ['create_on_execute'],
+                        owner = 'hdfs',
+                        group = 'hdfs',
+                        mode = 0755,
+                        recursive_chmod = True,
+                        user = 'hdfs',
+                        security_enabled = True,
+                        keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+                        kinit_path_local = '/usr/bin/kinit',
+                        hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                        hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
+                        principal_name = 'hdfs-cl1@EXAMPLE.COM',
+                        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+                        default_fs = 'hdfs://c6401.ambari.apache.org:8020'
+    )
+
+    self.assertResourceCalled('HdfsResource', '/ranger/audit/kms',
+                        type = 'directory',
+                        action = ['create_on_execute'],
+                        owner = 'kms',
+                        group = 'kms',
+                        mode = 0750,
+                        recursive_chmod = True,
+                        user = 'hdfs',
+                        security_enabled = True,
+                        keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+                        kinit_path_local = '/usr/bin/kinit',
+                        hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                        hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
+                        principal_name = 'hdfs-cl1@EXAMPLE.COM',
+                        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+                        default_fs = 'hdfs://c6401.ambari.apache.org:8020'
+    )
+
+    self.assertResourceCalled('HdfsResource', None,
+                        action = ['execute'],
+                        user = 'hdfs',
+                        security_enabled = True,
+                        keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+                        kinit_path_local = '/usr/bin/kinit',
+                        hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                        hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
+                        principal_name = 'hdfs-cl1@EXAMPLE.COM',
+                        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
+                        default_fs = 'hdfs://c6401.ambari.apache.org:8020'
+    )
+
     self.assertResourceCalled('File', '/usr/hdp/current/ranger-kms/conf/hdfs-site.xml',
       action = ['delete'],
     )
diff --git a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
index 4250681..e62e00c 100644
--- a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
@@ -140,6 +140,149 @@
       ]
     }
 
+
+    # setup for 'test_recommendYARNConfigurations'
+    self.hosts_9_total = {
+      "items": [
+        {
+          "Hosts": {
+            "cpu_count": 6,
+            "total_mem": 50331648,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          },
+        }, {
+          "Hosts": {
+            "cpu_count": 6,
+            "total_mem": 50331648,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6402.ambari.apache.org",
+            "host_name": "c6402.ambari.apache.org"
+          },
+        }, {
+          "Hosts": {
+            "cpu_count": 6,
+            "total_mem": 50331648,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6403.ambari.apache.org",
+            "host_name": "c6403.ambari.apache.org"
+          },
+        }, {
+          "Hosts": {
+            "cpu_count": 6,
+            "total_mem": 50331648,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6404.ambari.apache.org",
+            "host_name": "c6404.ambari.apache.org"
+          },
+        }, {
+          "Hosts": {
+            "cpu_count": 6,
+            "total_mem": 50331648,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6405.ambari.apache.org",
+            "host_name": "c6405.ambari.apache.org"
+          },
+        }, {
+          "Hosts": {
+            "cpu_count": 6,
+            "total_mem": 50331648,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6406.ambari.apache.org",
+            "host_name": "c6406.ambari.apache.org"
+          },
+        }, {
+          "Hosts": {
+            "cpu_count": 6,
+            "total_mem": 50331648,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6407.ambari.apache.org",
+            "host_name": "c6407.ambari.apache.org"
+          },
+        }, {
+          "Hosts": {
+            "cpu_count": 6,
+            "total_mem": 50331648,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6408.ambari.apache.org",
+            "host_name": "c6408.ambari.apache.org"
+          },
+        }, {
+          "Hosts": {
+            "cpu_count": 6,
+            "total_mem": 50331648,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6409.ambari.apache.org",
+            "host_name": "c6409.ambari.apache.org"
+          },
+        }
+      ]
+    }
+
     # Expected config outputs.
 
     # Expected capacity-scheduler with 'llap' (size:20) and 'default' queue at root level.
@@ -926,6 +1069,8 @@
         ]
       }
       ],
+      "changed-configurations": [
+      ],
       "configurations": {
         "capacity-scheduler": {
           "properties": {
@@ -3671,6 +3816,271 @@
 
 
 
+  ####################### 'Nine Node Managers' cluster - tests for calculating llap configs ################
+
+
+
+  # Test 16 (1). 'default' and 'llap' (State : RUNNING) queue exists at root level in capacity-scheduler, and
+  #          'capacity-scheduler' configs are passed-in as dictionary and
+  #          services['configurations']["capacity-scheduler"]["properties"]["capacity-scheduler"] is set to value "null"  and
+  #          (2). enable_hive_interactive' is 'on' and (3). configuration change detected for 'hive.server2.tez.sessions.per.default.queue'
+  #         Expected : Configurations values recommended for llap related configs.
+  def test_recommendYARNConfigurations_nine_node_manager_llap_configs_updated_1(self):
+    # 9 node managers and yarn.nodemanager.resource.memory-mb": "204800"
+    services = {
+      "services": [{
+        "StackServices": {
+          "service_name": "YARN",
+        },
+        "Versions": {
+          "stack_version": "2.5"
+        },
+        "components": [
+          {
+            "StackServiceComponents": {
+              "component_name": "NODEMANAGER",
+              "hostnames": ["c6401.ambari.apache.org", "c6402.ambari.apache.org", "c6403.ambari.apache.org",
+                            "c6404.ambari.apache.org", "c6405.ambari.apache.org", "c6406.ambari.apache.org",
+                            "c6407.ambari.apache.org", "c6408.ambari.apache.org", "c6409.ambari.apache.org"]
+            }
+          }
+        ]
+      }, {
+        "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE",
+        "StackServices": {
+          "service_name": "HIVE",
+          "service_version": "1.2.1.2.5",
+          "stack_name": "HDP",
+          "stack_version": "2.5"
+        },
+        "components": [
+          {
+            "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE/components/HIVE_SERVER_INTERACTIVE",
+            "StackServiceComponents": {
+              "advertise_version": "true",
+              "bulk_commands_display_name": "",
+              "bulk_commands_master_component_name": "",
+              "cardinality": "0-1",
+              "component_category": "MASTER",
+              "component_name": "HIVE_SERVER_INTERACTIVE",
+              "custom_commands": ["RESTART_LLAP"],
+              "decommission_allowed": "false",
+              "display_name": "HiveServer2 Interactive",
+              "has_bulk_commands_definition": "false",
+              "is_client": "false",
+              "is_master": "true",
+              "reassign_allowed": "false",
+              "recovery_enabled": "false",
+              "service_name": "HIVE",
+              "stack_name": "HDP",
+              "stack_version": "2.5",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+            "dependencies": []
+          },
+          {
+            "StackServiceComponents": {
+              "advertise_version": "true",
+              "cardinality": "1+",
+              "component_category": "SLAVE",
+              "component_name": "NODEMANAGER",
+              "display_name": "NodeManager",
+              "is_client": "false",
+              "is_master": "false",
+              "hostnames": [
+                "c6401.ambari.apache.org"
+              ]
+            },
+            "dependencies": []
+          },
+        ]
+      }
+      ],
+      "changed-configurations": [
+        {
+          u'old_value': u'3',
+          u'type': u'hive-interactive-site',
+          u'name': u'hive.server2.tez.sessions.per.default.queue'
+        }
+      ],
+      "configurations": {
+        "capacity-scheduler" : {
+          "properties" : {
+            "capacity-scheduler" : "null",
+            "yarn.scheduler.capacity.root.accessible-node-labels" : "*",
+            "yarn.scheduler.capacity.maximum-am-resource-percent" : "1",
+            "yarn.scheduler.capacity.root.acl_administer_queue" : "*",
+            'yarn.scheduler.capacity.queue-mappings-override.enable' : 'false',
+            "yarn.scheduler.capacity.root.default.capacity" : "100",
+            "yarn.scheduler.capacity.root.default.user-limit-factor" : "1",
+            "yarn.scheduler.capacity.root.queues" : "default",
+            "yarn.scheduler.capacity.root.capacity" : "100",
+            "yarn.scheduler.capacity.root.default.acl_submit_applications" : "*",
+            "yarn.scheduler.capacity.root.default.maximum-capacity" : "100",
+            "yarn.scheduler.capacity.node-locality-delay" : "40",
+            "yarn.scheduler.capacity.maximum-applications" : "10000",
+            "yarn.scheduler.capacity.root.default.state" : "RUNNING"
+          }
+        },
+        "hive-interactive-env":
+          {
+            'properties': {
+              'enable_hive_interactive': 'true',
+              'llap_queue_capacity':'50'
+            }
+          },
+        "hive-interactive-site":
+          {
+            'properties': {
+              'hive.llap.daemon.queue.name': 'default',
+              'hive.server2.tez.sessions.per.default.queue': '4',
+              'hive.tez.container.size':'4096'
+            }
+          },
+        "hive-env":
+          {
+            'properties': {
+              'hive_user': 'hive'
+            }
+          },
+        "yarn-site": {
+          "properties": {
+            "yarn.scheduler.minimum-allocation-mb": "1024",
+            "yarn.nodemanager.resource.memory-mb": "212992",
+            "yarn.nodemanager.resource.cpu-vcores": '25'
+          }
+        },
+        "tez-interactive-site": {
+          "properties": {
+            "tez.am.resource.memory.mb": "4096"
+          }
+        },
+        "hive-site":
+          {
+            'properties': {
+              'hive.tez.container.size': '1024'
+            }
+          },
+      }
+    }
+
+    clusterData = {
+      "cpu": 4,
+      "mapMemory": 30000,
+      "amMemory": 20000,
+      "reduceMemory": 20560,
+      "containers": 3,
+      "ramPerContainer": 82240,
+      "referenceNodeManagerHost" : {
+        "total_mem" : 328960 * 1024
+      },
+      "yarnMinContainerSize": 1024
+    }
+
+    configurations = {
+    }
+
+    # Tests based on concurrency (hive.server2.tez.sessions.per.default.queue)  config changes
+
+    ###################################################################
+    #  Test A: 'hive.server2.tez.sessions.per.default.queue' set to = 4
+    ###################################################################
+
+    # Test
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, self.hosts_9_total)
+    self.assertTrue('capacity-scheduler' not in configurations)
+    self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'], {'maximum': '22'})
+
+    self.assertTrue(configurations['hive-interactive-env']['properties']['num_llap_nodes'], 3)
+    self.assertTrue('num_llap_nodes_for_llap_daemons' not in configurations['hive-interactive-env']['properties'])
+
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'], '208896')
+
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.num.executors'], '25')
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.threadpool.size'], '25')
+
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.memory.size'], '106496')
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.enabled'], 'true')
+
+    self.assertEqual(configurations['hive-interactive-env']['properties']['llap_heap_size'], '96256')
+    self.assertEqual(configurations['hive-interactive-env']['properties']['hive_heapsize'], '2048')
+    self.assertEqual(configurations['hive-interactive-env']['property_attributes']['num_llap_nodes'], {'maximum': '9', 'minimum': '1', 'read_only': 'true'})
+
+    self.assertEqual(configurations['hive-interactive-env']['properties']['slider_am_container_mb'], '1024')
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.auto.convert.join.noconditionaltask.size'], '1145044992')
+
+    self.assertTrue('tez.am.resource.memory.mb' not in configurations['tez-interactive-site']['properties'])
+    self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.llap.daemon.queue.name'], {'entries': [{'value': 'default', 'label': 'default'}]})
+
+
+    ##################################################################
+    # Test B: 'hive.server2.tez.sessions.per.default.queue' set to = 9
+    ##################################################################
+    # Set the config
+    services['configurations']['hive-interactive-site']['properties']['hive.server2.tez.sessions.per.default.queue'] = 9
+
+    # Test
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, self.hosts_9_total)
+    self.assertTrue('capacity-scheduler' not in configurations)
+    self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'], {'maximum': '22'})
+
+    self.assertTrue(configurations['hive-interactive-env']['properties']['num_llap_nodes'], 3)
+    self.assertTrue('num_llap_nodes_for_llap_daemons' not in configurations['hive-interactive-env']['properties'])
+
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'], '207872')
+
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.num.executors'], '25')
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.threadpool.size'], '25')
+
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.memory.size'], '105472')
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.enabled'], 'true')
+
+    self.assertEqual(configurations['hive-interactive-env']['properties']['llap_heap_size'], '96256')
+    self.assertEqual(configurations['hive-interactive-env']['properties']['hive_heapsize'], '3600')
+    self.assertEqual(configurations['hive-interactive-env']['property_attributes']['num_llap_nodes'], {'maximum': '9', 'minimum': '1', 'read_only': 'true'})
+
+    self.assertEqual(configurations['hive-interactive-env']['properties']['slider_am_container_mb'], '1024')
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.auto.convert.join.noconditionaltask.size'], '1145044992')
+
+    self.assertTrue('tez.am.resource.memory.mb' not in configurations['tez-interactive-site']['properties'])
+    self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.llap.daemon.queue.name'], {'entries': [{'value': 'default', 'label': 'default'}]})
+
+
+    ###################################################################
+    # Test C: 'hive.server2.tez.sessions.per.default.queue' set to = 10
+    ###################################################################
+    # Set the config
+    services['configurations']['hive-interactive-site']['properties']['hive.server2.tez.sessions.per.default.queue'] = 10
+
+    # Test
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, self.hosts_9_total)
+    self.assertTrue('capacity-scheduler' not in configurations)
+    self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'], {'maximum': '22'})
+
+    self.assertTrue(configurations['hive-interactive-env']['properties']['num_llap_nodes'], 3)
+    self.assertTrue('num_llap_nodes_for_llap_daemons' not in configurations['hive-interactive-env']['properties'])
+
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'], '204800')
+
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.num.executors'], '25')
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.threadpool.size'], '25')
+
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.memory.size'], '102400')
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.enabled'], 'true')
+
+    self.assertEqual(configurations['hive-interactive-env']['properties']['llap_heap_size'], '96256')
+    self.assertEqual(configurations['hive-interactive-env']['properties']['hive_heapsize'], '4000')
+    self.assertEqual(configurations['hive-interactive-env']['property_attributes']['num_llap_nodes'], {'maximum': '9', 'minimum': '1', 'read_only': 'true'})
+
+    self.assertEqual(configurations['hive-interactive-env']['properties']['slider_am_container_mb'], '1024')
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.auto.convert.join.noconditionaltask.size'], '1145044992')
+
+    self.assertTrue('tez.am.resource.memory.mb' not in configurations['tez-interactive-site']['properties'])
+    self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.llap.daemon.queue.name'], {'entries': [{'value': 'default', 'label': 'default'}]})
+
+
+
+
 
   # Test 16: (1). only 'default' queue exists at root level in capacity-scheduler, and
   #          'capacity-scheduler' configs are passed-in as single "/n" separated string  and
@@ -4918,6 +5328,9 @@
                                   "capacity-scheduler":{"properties":{
                                     "capacity-scheduler": "yarn.scheduler.capacity.root.queues=ndfqueue,leaf\n" +
                                                           "yarn.scheduler.capacity.root.ndfqueue.queues=ndfqueue1,ndfqueue2\n"}}}
+    services["changed-configurations"]= []
+
+
     hosts = self.prepareHosts([])
     result = self.stackAdvisor.validateConfigurations(services, hosts)
     expectedItems = [
@@ -4948,6 +5361,8 @@
           "stack_versions": ["2.4", "2.3", "2.2", "2.1", "2.0.6"]
         }
       },
+      "changed-configurations": [
+      ],
       "configurations": configurations,
       "services": [],
       "ambari-server-properties": {}}
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-secured.json b/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-secured.json
index 7054e8f..4e7d857 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-secured.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/ranger-kms-secured.json
@@ -751,7 +751,7 @@
             "hdfs_log_dir_prefix": "/var/log/hadoop", 
             "hdfs_user_nofile_limit": "128000", 
             "hdfs_user": "hdfs", 
-            "hdfs_principal_name": "hdfs-test_cluster01@EXAMPLE.COM", 
+            "hdfs_principal_name": "hdfs-cl1@EXAMPLE.COM",
             "keyserver_host": " ", 
             "namenode_opt_maxnewsize": "128m", 
             "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab", 
diff --git a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
index 5bfa1a9..d4d28c9 100644
--- a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
@@ -989,11 +989,34 @@
       "yarnMinContainerSize": 256
     }
 
-    hosts = {}
+    hosts = {
+      "items": [
+      {
+        "Hosts": {
+          "cpu_count": 6,
+          "total_mem": 50331648,
+          "disk_info": [
+            {"mountpoint": "/"},
+            {"mountpoint": "/dev/shm"},
+            {"mountpoint": "/vagrant"},
+            {"mountpoint": "/"},
+            {"mountpoint": "/dev/shm"},
+            {"mountpoint": "/vagrant"}
+          ],
+          "public_host_name": "c6401.ambari.apache.org",
+          "host_name": "c6401.ambari.apache.org"
+        },
+      }
+      ]
+    }
 
     services = {
       "services":
-        [
+        [{
+           "StackServices": {
+             "service_name": "YARN"
+           }, "components": []
+         },
          {
             "StackServices": {
               "service_name" : "HIVE",
@@ -1007,6 +1030,8 @@
         "stack_name" : "HDP",
         "stack_version": "2.6"
       },
+      "changed-configurations": [
+      ],
       "configurations": configurations,
       "ambari-server-properties": {"ambari-server.user":"ambari_user"}
     }
@@ -1016,6 +1041,7 @@
       'yarn-env': {
         'properties': {
           'min_user_id': '500',
+          'apptimelineserver_heapsize': '8072',
           'service_check.queue.name': 'default'
         }
       },
@@ -1131,11 +1157,17 @@
           'yarn.scheduler.minimum-allocation-vcores': '1',
           'yarn.scheduler.maximum-allocation-vcores': '4',
           'yarn.nodemanager.resource.memory-mb': '768',
+          'yarn.nodemanager.local-dirs': '/hadoop/yarn/local,/dev/shm/hadoop/yarn/local,/vagrant/hadoop/yarn/local',
+          'yarn.nodemanager.log-dirs': '/hadoop/yarn/log,/dev/shm/hadoop/yarn/log,/vagrant/hadoop/yarn/log',
+          'yarn.timeline-service.entity-group-fs-store.app-cache-size': '10',
           'yarn.scheduler.minimum-allocation-mb': '256',
           'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath': '',
+          'yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round': '1.0',
           'yarn.nodemanager.resource.cpu-vcores': '4',
           'yarn.scheduler.maximum-allocation-mb': '768',
-          'yarn.nodemanager.linux-container-executor.group': 'hadoop'
+          'yarn.nodemanager.linux-container-executor.group': 'hadoop',
+          'yarn.timeline-service.leveldb-state-store.path': '/hadoop/yarn/timeline',
+          'yarn.timeline-service.leveldb-timeline-store.path': '/hadoop/yarn/timeline'
         },
         'property_attributes': {
           'yarn.authorization-provider': {
@@ -1267,6 +1299,8 @@
         "components": []
       }
       ],
+      "changed-configurations": [
+      ],
       "configurations": configurations
     }
 
@@ -1282,7 +1316,8 @@
         'properties': {
           'yarn_user': 'custom_yarn',
           'service_check.queue.name': 'default',
-          'min_user_id': '500'
+          'min_user_id': '500',
+          'apptimelineserver_heapsize': '2048'
         }
       },
       'ranger-yarn-plugin-properties': {
@@ -1302,21 +1337,433 @@
           'yarn.nodemanager.resource.memory-mb': '1280',
           'yarn.scheduler.minimum-allocation-mb': '256',
           'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath': '',
+          'yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round': '1.0',
           'yarn.nodemanager.resource.cpu-vcores': '4',
           'yarn.scheduler.maximum-allocation-mb': '1280',
-          'yarn.nodemanager.linux-container-executor.group': 'hadoop'
+          'yarn.nodemanager.linux-container-executor.group': 'hadoop',
+          'yarn.nodemanager.local-dirs': '/hadoop/yarn/local,/dev/shm/hadoop/yarn/local,/vagrant/hadoop/yarn/local',
+          'yarn.nodemanager.log-dirs': '/hadoop/yarn/log,/dev/shm/hadoop/yarn/log,/vagrant/hadoop/yarn/log',
+          'yarn.timeline-service.entity-group-fs-store.app-cache-size': '7',
+          'yarn.timeline-service.leveldb-state-store.path': '/hadoop/yarn/timeline',
+          'yarn.timeline-service.leveldb-timeline-store.path': '/hadoop/yarn/timeline'
+
         }
       }
     }
 
-    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
+    hosts = {
+      "items": [
+        {
+          "Hosts": {
+            "cpu_count": 6,
+            "total_mem": 4096,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          },
+        }
+      ]
+    }
+
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, hosts)
     self.assertEquals(configurations, expected)
+
     configurations['yarn-env']['properties']['yarn_user'] = 'yarn'
     expected['yarn-env']['properties']['yarn_user'] = 'yarn'
     expected['ranger-yarn-plugin-properties']['properties']['REPOSITORY_CONFIG_USERNAME'] = 'yarn'
-    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, hosts)
     self.assertEquals(configurations, expected)
 
+
+
+  def test_recommendYARNConfigurations_for_ats_heapsize_and_cache(self):
+    configurations = {
+      "yarn-env": {
+        "properties": {
+          "yarn_user" : "custom_yarn"
+        }
+      },
+      "ranger-yarn-plugin-properties": {
+        "properties": {
+          "ranger-yarn-plugin-enabled" : "Yes",
+          "REPOSITORY_CONFIG_USERNAME":"yarn"
+        }
+      }
+    }
+    services = {
+      "services" : [{
+        "StackServices": {
+          "service_name" : "YARN",
+          "service_version" : "2.7.3.2.6"
+        },
+        "components": []
+      }
+      ],
+      "changed-configurations": [
+      ],
+      "configurations": configurations
+    }
+
+
+    clusterData = {
+      "cpu": 4,
+      "containers" : 5,
+      "ramPerContainer": 256,
+      "yarnMinContainerSize": 256
+    }
+    expected = {
+      'yarn-env': {
+        'properties': {
+          'yarn_user': 'custom_yarn',
+          'service_check.queue.name': 'default',
+          'min_user_id': '500',
+          'apptimelineserver_heapsize': '1024'
+        }
+      },
+      'ranger-yarn-plugin-properties': {
+        'properties': {
+          'ranger-yarn-plugin-enabled': 'Yes',
+          'REPOSITORY_CONFIG_USERNAME': 'custom_yarn'
+        }
+      },
+      'yarn-site': {
+        'properties': {
+          'hadoop.registry.rm.enabled': 'false',
+          'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes': '',
+          'yarn.authorization-provider': 'org.apache.ranger.authorization.yarn.authorizer.RangerYarnAuthorizer',
+          'yarn.acl.enable': 'true',
+          'yarn.scheduler.minimum-allocation-vcores': '1',
+          'yarn.scheduler.maximum-allocation-vcores': '4',
+          'yarn.nodemanager.resource.memory-mb': '1280',
+          'yarn.scheduler.minimum-allocation-mb': '256',
+          'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath': '',
+          'yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round': '1.0',
+          'yarn.nodemanager.resource.cpu-vcores': '4',
+          'yarn.scheduler.maximum-allocation-mb': '1280',
+          'yarn.nodemanager.linux-container-executor.group': 'hadoop',
+          'yarn.nodemanager.local-dirs': '/hadoop/yarn/local,/dev/shm/hadoop/yarn/local,/vagrant/hadoop/yarn/local',
+          'yarn.nodemanager.log-dirs': '/hadoop/yarn/log,/dev/shm/hadoop/yarn/log,/vagrant/hadoop/yarn/log',
+          'yarn.timeline-service.entity-group-fs-store.app-cache-size': '3',
+          'yarn.timeline-service.leveldb-state-store.path': '/hadoop/yarn/timeline',
+          'yarn.timeline-service.leveldb-timeline-store.path': '/hadoop/yarn/timeline'
+
+        }
+      }
+    }
+
+    hosts = {
+      "items": [
+        {
+          "Hosts": {
+            "cpu_count": 6,
+            "total_mem": 2048,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          },
+        }
+      ]
+    }
+
+
+
+
+    '''
+    Test 1 :
+    I/P:
+       - 'changed-configurations' is empty (doesnt have 'yarn.timeline-service.entity-group-fs-store.app-cache-size')
+       - 'host_mem' = 2048
+    O/P :
+       -  Config value recommended for:
+           - yarn.timeline-service.entity-group-fs-store.app-cache-size = 3
+           - apptimelineserver_heapsize = 1024
+    '''
+
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, hosts)
+    self.assertEquals(configurations, expected)
+
+
+
+    '''
+    Test 2 :
+    I/P:
+       - 'changed-configurations' is empty (doesnt have 'yarn.timeline-service.entity-group-fs-store.app-cache-size')
+       - 'host_mem' = 4096
+    O/P :
+       -  Config value recommended for:
+           - yarn.timeline-service.entity-group-fs-store.app-cache-size = 7
+           - apptimelineserver_heapsize = 2048
+    '''
+    hosts = {
+      "items": [
+        {
+          "Hosts": {
+            "cpu_count": 6,
+            "total_mem": 4096,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          },
+        }
+      ]
+    }
+
+    expected['yarn-env']['properties']['apptimelineserver_heapsize'] = '2048'
+    expected['yarn-site']['properties']['yarn.timeline-service.entity-group-fs-store.app-cache-size'] = '7'
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, hosts)
+    self.assertEquals(configurations, expected)
+
+
+
+    '''
+    Test 3 :
+    I/P:
+       - 'changed-configurations' is empty (doesnt have 'yarn.timeline-service.entity-group-fs-store.app-cache-size')
+       - 'host_mem' = 8192
+    O/P :
+       -  Config value recommended for:
+           - yarn.timeline-service.entity-group-fs-store.app-cache-size = 10
+           - apptimelineserver_heapsize = 4096
+    '''
+    hosts = {
+      "items": [
+        {
+          "Hosts": {
+            "cpu_count": 6,
+            "total_mem": 8192,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          },
+        }
+      ]
+    }
+
+    expected['yarn-env']['properties']['apptimelineserver_heapsize'] = '4096'
+    expected['yarn-site']['properties']['yarn.timeline-service.entity-group-fs-store.app-cache-size'] = '10'
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, hosts)
+    self.assertEquals(configurations, expected)
+
+
+
+    '''
+    Test 4 :
+    I/P:
+       - 'changed-configurations' has 'yarn.timeline-service.entity-group-fs-store.app-cache-size'
+       - 'host_mem' = 2048
+    O/P :
+       -  Config value recommended for:
+           - apptimelineserver_heapsize = 4096
+    '''
+
+    services["changed-configurations"] = [
+      {
+        u'old_value': u'10',
+        u'type': u'yarn-site',
+        u'name': u'yarn.timeline-service.entity-group-fs-store.app-cache-size'
+      }
+    ]
+
+    services["configurations"] = {
+      "yarn-env": {
+        "properties": {
+          "yarn_user" : "custom_yarn",
+        }
+      },
+      "yarn-site": {
+        "properties": {
+          "yarn.timeline-service.entity-group-fs-store.app-cache-size" : "7"
+        }
+      },
+      "ranger-yarn-plugin-properties": {
+        "properties": {
+          "ranger-yarn-plugin-enabled" : "Yes",
+          "REPOSITORY_CONFIG_USERNAME":"yarn"
+        }
+      }
+    }
+
+    hosts = {
+      "items": [
+        {
+          "Hosts": {
+            "cpu_count": 6,
+            "total_mem": 4096,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          },
+        }
+      ]
+    }
+
+
+
+    '''
+    Test 5 :
+    I/P:
+       - 'changed-configurations' has 'yarn.timeline-service.entity-group-fs-store.app-cache-size'
+       - 'host_mem' = 4096
+    O/P :
+       -  Config value recommended for:
+           - apptimelineserver_heapsize = 2048
+    '''
+
+    services["changed-configurations"] = [
+      {
+        u'old_value': u'10',
+        u'type': u'yarn-site',
+        u'name': u'yarn.timeline-service.entity-group-fs-store.app-cache-size'
+      }
+    ]
+
+    services["configurations"] = {
+      "yarn-env": {
+        "properties": {
+          "yarn_user" : "custom_yarn",
+        }
+      },
+      "yarn-site": {
+        "properties": {
+          "yarn.timeline-service.entity-group-fs-store.app-cache-size" : "7"
+        }
+      },
+      "ranger-yarn-plugin-properties": {
+        "properties": {
+          "ranger-yarn-plugin-enabled" : "Yes",
+          "REPOSITORY_CONFIG_USERNAME":"yarn"
+        }
+      }
+    }
+
+    hosts = {
+      "items": [
+        {
+          "Hosts": {
+            "cpu_count": 6,
+            "total_mem": 4096,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          },
+        }
+      ]
+    }
+
+
+    expected['yarn-env']['properties']['apptimelineserver_heapsize'] = '2048'
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, hosts)
+    self.assertEquals(configurations, expected)
+
+
+
+    '''
+    Test 6 :
+    I/P:
+       - 'changed-configurations' has 'yarn.timeline-service.entity-group-fs-store.app-cache-size'
+       - 'host_mem' = 8196
+    O/P :
+       -  Config value recommended for:
+           - Shouldn't have yarn.timeline-service.entity-group-fs-store.app-cache-size
+           - apptimelineserver_heapsize = 4572
+    '''
+
+    services["changed-configurations"] = [
+      {
+        u'old_value': u'10',
+        u'type': u'yarn-site',
+        u'name': u'yarn.timeline-service.entity-group-fs-store.app-cache-size'
+      }
+    ]
+
+    services["configurations"] = {
+      "yarn-env": {
+        "properties": {
+          "yarn_user" : "custom_yarn",
+        }
+      },
+      "yarn-site": {
+        "properties": {
+          "yarn.timeline-service.entity-group-fs-store.app-cache-size" : "3"
+        }
+      },
+      "ranger-yarn-plugin-properties": {
+        "properties": {
+          "ranger-yarn-plugin-enabled" : "Yes",
+          "REPOSITORY_CONFIG_USERNAME":"yarn"
+        }
+      }
+    }
+
+    hosts = {
+      "items": [
+        {
+          "Hosts": {
+            "cpu_count": 6,
+            "total_mem": 16392,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          },
+        }
+      ]
+    }
+
+
+    expected['yarn-env']['properties']['apptimelineserver_heapsize'] = '4572'
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, hosts)
+    self.assertEquals(configurations, expected)
+
+
   def test_recommendKAFKAConfigurations(self):
     configurations = {
       "kafka-env": {
diff --git a/ambari-server/src/test/python/unitTests.py b/ambari-server/src/test/python/unitTests.py
index a8e72e9..bff8959 100644
--- a/ambari-server/src/test/python/unitTests.py
+++ b/ambari-server/src/test/python/unitTests.py
@@ -125,7 +125,7 @@
   @param base_stack_folder: Path to stacks folder that does not include the version number. This can potentially be None.
   @param common_services_parent_dir: Path to the common-services directory for a specified service, not including the version.
   @param service: Service name
-  :return A list of paths to insert to sys.path by following the chain of inheritence.
+  :return A list of paths to insert to sys.path by follwing the chain of inheritence.
   """
   paths_to_import = []
 
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.1/metainfo.xml b/ambari-server/src/test/resources/extensions/EXT/0.1/metainfo.xml
index 77a832c..27f5902 100644
--- a/ambari-server/src/test/resources/extensions/EXT/0.1/metainfo.xml
+++ b/ambari-server/src/test/resources/extensions/EXT/0.1/metainfo.xml
@@ -23,7 +23,7 @@
     <min-stack-versions>
       <stack>
         <name>HDP</name>
-        <version>0.1.*</version>
+        <version>0.1</version>
       </stack>
     </min-stack-versions>
   </prerequisites>
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml b/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
index 04f733c..0d37b3e 100644
--- a/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
+++ b/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
@@ -20,11 +20,12 @@
     <active>true</active>
   </versions>
   <extends>0.1</extends>
+  <auto-link>true</auto-link>
   <prerequisites>
     <min-stack-versions>
       <stack>
         <name>HDP</name>
-        <version>0.2.*</version>
+        <version>0.2</version>
       </stack>
     </min-stack-versions>
   </prerequisites>
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml b/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml
new file mode 100644
index 0000000..d827314
--- /dev/null
+++ b/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <versions>
+    <active>true</active>
+  </versions>
+  <extends>0.2</extends>
+  <auto-link>true</auto-link>
+  <prerequisites>
+    <min-stack-versions>
+      <stack>
+        <name>HDP</name>
+        <version>0.2</version>
+      </stack>
+    </min-stack-versions>
+  </prerequisites>
+</metainfo>
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.3/services/OOZIE2/metainfo.xml b/ambari-server/src/test/resources/extensions/EXT/0.3/services/OOZIE2/metainfo.xml
new file mode 100644
index 0000000..9176551
--- /dev/null
+++ b/ambari-server/src/test/resources/extensions/EXT/0.3/services/OOZIE2/metainfo.xml
@@ -0,0 +1,118 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>OOZIE2</name>
+      <comment>System for workflow coordination and execution of Apache Hadoop jobs</comment>
+      <version>4.0.0</version>
+
+      <components>
+        <component>
+          <name>OOZIE2_SERVER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/oozie2_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>OOZIE2_CLIENT</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/oozie2_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>oozie2.noarch</name>
+            </package>
+            <package>
+              <name>oozie2-client.noarch</name>
+            </package>
+            <package>
+              <name>extjs-2.2-1</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+        <config-type>oozie2-site</config-type>
+      </configuration-dependencies>
+
+      <themes>
+        <theme>
+          <fileName>broken_theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+    </service>
+  </services>
+</metainfo>
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.3/services/OOZIE2/themes/broken_theme.json b/ambari-server/src/test/resources/extensions/EXT/0.3/services/OOZIE2/themes/broken_theme.json
new file mode 100644
index 0000000..6e8b5bf
--- /dev/null
+++ b/ambari-server/src/test/resources/extensions/EXT/0.3/services/OOZIE2/themes/broken_theme.json
@@ -0,0 +1,3 @@
+{
+  "configuration": {
+}
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test.xml b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test.xml
index 8d506bf..037e39a 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test.xml
@@ -146,7 +146,7 @@
         </pre-upgrade>
         <pre-downgrade copy-upgrade="true" />
         <upgrade>
-          <task xsi:type="restart-task" />
+          <task xsi:type="restart-task" timeout-config="upgrade.parameter.zk-server.timeout"/>
         </upgrade>
         <post-upgrade>
           <task xsi:type="configure" id="hdp_2_1_1_zookeeper_new_config_type" />
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test_checks.xml b/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test_checks.xml
index 4d4d972..b7027c5 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test_checks.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test_checks.xml
@@ -78,7 +78,16 @@
       </service>
       <service-check>false</service-check>
     </group>
-    
+
+    <group xsi:type="cluster" name="HBASE" title="Update HBase Configuration">
+      <skippable>true</skippable>
+      <execute-stage service="HBASE" component="HBASE_MASTER" title="Update HBase Configuration">
+        <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.HBaseEnvMaxDirectMemorySizeAction">
+          <summary>Update HBase Env Configuration</summary>
+        </task>
+      </execute-stage>
+    </group>
+
     <group name="SERVICE_CHECK_1" title="Post-Master Service Checks" xsi:type="service-check">
       <priority>
         <service>HDFS</service>
@@ -108,6 +117,16 @@
         <message>Please run additional tests</message>
       </batch>
     </group>
+
+    <!-- This group will be ignored because it is an invalid syntax. It is not a cluster type but contains an execute-stage -->
+    <group name="HBASE" title="Update HBase Configuration">
+      <skippable>true</skippable>
+      <execute-stage service="HBASE" component="HBASE_MASTER" title="Update HBase Configuration">
+        <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.HBaseEnvMaxDirectMemorySizeAction">
+          <summary>Update HBase Env Configuration</summary>
+        </task>
+      </execute-stage>
+    </group>
     
     <group name="SERVICE_CHECK_2" title="Post-Slave Service Checks" xsi:type="service-check">
       <priority>
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/metainfo.xml
new file mode 100644
index 0000000..b52857b
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/metainfo.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <versions>
+	  <upgrade>0.2</upgrade>
+    </versions>
+</metainfo>
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/repos/repoinfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/repos/repoinfo.xml
new file mode 100644
index 0000000..9b3b1c7
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/repos/repoinfo.xml
@@ -0,0 +1,63 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <os family="redhat6">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos6</baseurl>
+      <repoid>HDP-1.1.1.16</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.15</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+      <mirrorslist></mirrorslist>
+    </repo>
+    <repo>
+      <baseurl></baseurl>
+      <repoid>epel</repoid>
+      <reponame>epel</reponame>
+      <unique>true</unique>
+      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch]]></mirrorslist>
+    </repo>
+  </os>
+  <os family="centos5">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5</baseurl>
+      <repoid>HDP-1.1.1.16</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5</baseurl>
+      <repoid>HDP-UTILS-1.1.0.15</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+      <mirrorslist></mirrorslist>
+    </repo>
+    <repo>
+      <baseurl></baseurl>
+      <repoid>epel</repoid>
+      <reponame>epel</reponame>
+      <unique>true</unique>
+      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-5&arch=$basearch]]></mirrorslist>
+    </repo>
+  </os>
+</reposinfo>
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HBASE/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HBASE/metainfo.xml
new file mode 100644
index 0000000..48123f0
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HBASE/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <extends>common-services/HBASE/1.0</extends>
+    </service>
+  </services>
+</metainfo>
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/global.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/global.xml
new file mode 100644
index 0000000..bcab577
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/global.xml
@@ -0,0 +1,145 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>namenode_host</name>
+    <value></value>
+    <description>NameNode Host.</description>
+  </property>
+  <property>
+    <name>dfs_name_dir</name>
+    <value>/hadoop/hdfs/namenode</value>
+    <description>NameNode Directories.</description>
+  </property>
+  <property>
+    <name>snamenode_host</name>
+    <value></value>
+    <description>Secondary NameNode.</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_dir</name>
+    <value>/hadoop/hdfs/namesecondary</value>
+    <description>Secondary NameNode checkpoint dir.</description>
+  </property>
+  <property>
+    <name>datanode_hosts</name>
+    <value></value>
+    <description>List of Datanode Hosts.</description>
+  </property>
+  <property>
+    <name>dfs_data_dir</name>
+    <value>/hadoop/hdfs/data</value>
+    <description>Data directories for Data Nodes.</description>
+  </property>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>dfs_webhdfs_enabled</name>
+    <value>true</value>
+    <description>WebHDFS enabled</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>Default size of Java new generation for NameNode (Java option -XX:NewSize) Note: The value of NameNode new generation size (default size of Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize property is 1/8 the value of maximum heap size (-Xmx).</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>640</value>
+    <description>NameNode maximum new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
+  <property>
+    <name>datanode_du_reserved</name>
+    <value>1</value>
+    <description>Reserved space for HDFS</description>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+  </property>
+  <property>
+    <name>dfs_datanode_failed_volume_tolerated</name>
+    <value>0</value>
+    <description>DataNode volumes failure toleration</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_period</name>
+    <value>21600</value>
+    <description>HDFS Maximum Checkpoint Delay</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_size</name>
+    <value>0.5</value>
+    <description>FS Checkpoint Size.</description>
+  </property>
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>keytab_path</name>
+    <value>/etc/security/keytabs</value>
+    <description>KeyTab Directory.</description>
+  </property>
+
+</configuration>
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hadoop-env.xml
new file mode 100644
index 0000000..8fb8c7f
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hadoop-env.xml
@@ -0,0 +1,223 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>Default size of Java new generation for NameNode (Java option -XX:NewSize) Note: The value of NameNode new generation size (default size of Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize property is 1/8 the value of maximum heap size (-Xmx).</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>200</value>
+    <description>NameNode maximum new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <value>users</value>
+    <description>Proxy user group.</description>
+  </property>
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>hdfs_user</name>
+    <value>hdfs</value>
+    <description>User and Groups.</description>
+  </property>
+  <property>
+    <name>ignore_groupsusers_create</name>
+    <value>false</value>
+    <description>Whether to ignores failures on users and group creation</description>
+  </property>
+  <property>
+    <name>smokeuser</name>
+    <value>ambari-qa</value>
+    <description>User executing service checks</description>
+  </property>
+  <property>
+    <name>user_group</name>
+    <value>hadoop</value>
+    <description>Proxy user group.</description>
+  </property>
+
+  <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <description>hadoop-env.sh content</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:ConcGCThreads=4 -XX:+UseConcMarkSweepGC -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER={{hdfs_user}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+if [ -d "/usr/lib/tez" ]; then
+  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+fi
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
+    </value>
+  </property>
+
+</configuration>
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hbase-site.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hbase-site.xml
new file mode 100644
index 0000000..5024e85
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hbase-site.xml
@@ -0,0 +1,137 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.regionserver.msginterval</name>
+    <value>1000</value>
+    <description>Interval between messages from the RegionServer to HMaster
+    in milliseconds.  Default is 15. Set this value low if you want unit
+    tests to be responsive.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.pause</name>
+    <value>5000</value>
+    <description>General client pause value.  Used mostly as value to wait
+    before running a retry of a failed get, region lookup, etc.</description>
+  </property>
+  <property>
+    <name>hbase.master.meta.thread.rescanfrequency</name>
+    <value>10000</value>
+    <description>How long the HMaster sleeps (in milliseconds) between scans of
+    the root and meta tables.
+    </description>
+  </property>
+  <property>
+    <name>hbase.server.thread.wakefrequency</name>
+    <value>1000</value>
+    <description>Time to sleep in between searches for work (in milliseconds).
+    Used as sleep interval by service threads such as META scanner and log roller.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value>5</value>
+    <description>Count of RPC Server instances spun up on RegionServers
+    Same property is used by the HMaster for count of master handlers.
+    Default is 10.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.period</name>
+    <value>6000</value>
+    <description>Length of time the master will wait before timing out a region
+    server lease. Since region servers report in every second (see above), this
+    value has been reduced so that the master will notice a dead region server
+    sooner. The default is 30 seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase master web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase regionserver web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port.auto</name>
+    <value>true</value>
+    <description>Info server auto port bind. Enables automatic port
+    search if hbase.regionserver.info.port is already in use.
+    Enabled for testing to run multiple tests on one machine.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.thread.wakefrequency</name>
+    <value>3000</value>
+    <description>The interval between checks for expired region server leases.
+    This value has been reduced due to the other reduced values above so that
+    the master will notice a dead region server sooner. The default is 15 seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.optionalcacheflushinterval</name>
+    <value>10000</value>
+    <description>
+    Amount of time to wait since the last time a region was flushed before
+    invoking an optional cache flush. Default 60,000.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.safemode</name>
+    <value>false</value>
+    <description>
+    Turn on/off safe mode in region server. Always on for production, always off
+    for tests.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>67108864</value>
+    <description>
+    Maximum desired file size for an HRegion.  If filesize exceeds
+    value + (value / 2), the HRegion is split in two.  Default: 256M.
+
+    Keep the maximum filesize small so we split more often in tests.
+    </description>
+  </property>
+  <property>
+    <name>hadoop.log.dir</name>
+    <value>${user.dir}/../logs</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>21818</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
+</configuration>
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hdfs-log4j.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hdfs-log4j.xml
new file mode 100644
index 0000000..649472d
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hdfs-log4j.xml
@@ -0,0 +1,199 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <value>
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security audit appender
+#
+hadoop.security.logger=INFO,console
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# Rolling File Appender
+#
+
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+log4j.appender.RFA.MaxFileSize=256MB
+log4j.appender.RFA.MaxBackupIndex=10
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+    </value>
+  </property>
+
+</configuration>
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hdfs-site.xml
new file mode 100644
index 0000000..2b979d7
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,396 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<!-- file system properties -->
+
+  <property>
+    <name>dfs.name.dir</name>
+    <!-- cluster variant -->
+    <value>/mnt/hmc/hadoop/hdfs/namenode</value>
+    <description>Determines where on the local filesystem the DFS name node
+      should store the name table.  If this is a comma-delimited list
+      of directories then the name table is replicated in all of the
+      directories, for redundancy. </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.support.append</name>
+    <value>true</value>
+    <description>to enable dfs append</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value>false</value>
+    <description>to enable webhdfs</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.failed.volumes.tolerated</name>
+    <value>0</value>
+    <description>#of failed disks dn would tolerate</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.block.local-path-access.user</name>
+    <value>hbase</value>
+    <description>the user who is allowed to perform short
+    circuit reads.
+    </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.data.dir</name>
+    <value>/mnt/hmc/hadoop/hdfs/data</value>
+    <description>Determines where on the local filesystem an DFS data node
+  should store its blocks.  If this is a comma-delimited
+  list of directories, then data will be stored in all named
+  directories, typically on different devices.
+  Directories that do not exist are ignored.
+  </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.hosts.exclude</name>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
+    <description>Names a file that contains a list of hosts that are
+    not permitted to connect to the namenode.  The full pathname of the
+    file must be specified.  If the value is empty, no hosts are
+    excluded.</description>
+  </property>
+
+  <property>
+    <name>dfs.hosts</name>
+    <value>/etc/hadoop/conf/dfs.include</value>
+    <description>Names a file that contains a list of hosts that are
+    permitted to connect to the namenode. The full pathname of the file
+    must be specified.  If the value is empty, all hosts are
+    permitted.</description>
+  </property>
+
+  <property>
+    <name>dfs.replication.max</name>
+    <value>50</value>
+    <description>Maximal block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.replication</name>
+    <value>3</value>
+    <description>Default block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.safemode.threshold.pct</name>
+    <value>1.0f</value>
+    <description>
+        Specifies the percentage of blocks that should satisfy
+        the minimal replication requirement defined by dfs.replication.min.
+        Values less than or equal to 0 mean not to start in safe mode.
+        Values greater than 1 will make safe mode permanent.
+        </description>
+  </property>
+
+  <property>
+    <name>dfs.balance.bandwidthPerSec</name>
+    <value>6250000</value>
+    <description>
+        Specifies the maximum amount of bandwidth that each datanode
+        can utilize for the balancing purpose in term of
+        the number of bytes per second.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.address</name>
+    <value>0.0.0.0:50010</value>
+  </property>
+
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:50075</value>
+  </property>
+
+  <property>
+    <name>dfs.block.size</name>
+    <value>134217728</value>
+    <description>The default block size for new files.</description>
+  </property>
+
+  <property>
+    <name>dfs.http.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50070</value>
+<description>The name of the default file system.  Either the
+literal string "local" or a host:port for HDFS.</description>
+<final>true</final>
+</property>
+
+<property>
+<name>dfs.datanode.du.reserved</name>
+<!-- cluster variant -->
+<value>1073741824</value>
+<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+</description>
+</property>
+
+<property>
+<name>dfs.datanode.ipc.address</name>
+<value>0.0.0.0:8010</value>
+<description>
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+</description>
+</property>
+
+<property>
+<name>dfs.blockreport.initialDelay</name>
+<value>120</value>
+<description>Delay for first block report in seconds.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>40</value>
+<description>The number of server threads for the namenode.</description>
+</property>
+
+<property>
+<name>dfs.datanode.max.xcievers</name>
+<value>1024</value>
+<description>PRIVATE CONFIG VARIABLE</description>
+</property>
+
+<!-- Permissions configuration -->
+
+<property>
+<name>dfs.umaskmode</name>
+<value>077</value>
+<description>
+The octal umask used when creating files and directories.
+</description>
+</property>
+
+<property>
+<name>dfs.web.ugi</name>
+<!-- cluster variant -->
+<value>gopher,gopher</value>
+<description>The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+</description>
+</property>
+
+<property>
+<name>dfs.permissions</name>
+<value>true</value>
+<description>
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+</description>
+</property>
+
+<property>
+<name>dfs.permissions.supergroup</name>
+<value>hdfs</value>
+<description>The name of the group of super-users.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>100</value>
+<description>Added to grow Queue size so that more client connections are allowed</description>
+</property>
+
+<property>
+<name>ipc.server.max.response.size</name>
+<value>5242880</value>
+</property>
+<property>
+<name>dfs.block.access.token.enable</name>
+<value>true</value>
+<description>
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+<description>
+Kerberos principal name for the NameNode
+</description>
+</property>
+
+<property>
+<name>dfs.secondary.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+    <description>
+        Kerberos principal name for the secondary NameNode.
+    </description>
+  </property>
+
+
+<!--
+  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+-->
+  <property>
+    <name>dfs.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+     <description>The Kerberos principal for the host that the NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <!-- cluster variant -->
+    <name>dfs.secondary.http.address</name>
+    <value>hdp2.cybervisiontech.com.ua:50090</value>
+    <description>Address of secondary namenode web server</description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.https.port</name>
+    <value>50490</value>
+    <description>The https port where secondary-namenode binds</description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.principal</name>
+    <value>HTTP/_HOST@</value>
+    <description>
+      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+      HTTP SPENGO specification.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.keytab</name>
+    <value>/nn.service.keytab</value>
+    <description>
+      The Kerberos keytab file with the credentials for the
+      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.kerberos.principal</name>
+    <value>dn/_HOST@</value>
+ <description>
+        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+ <description>
+        Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+  <description>
+        Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.keytab.file</name>
+    <value>/dn.service.keytab</value>
+ <description>
+        The filename of the keytab file for the DataNode.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.https.port</name>
+    <value>50470</value>
+ <description>The https port where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.https.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50470</value>
+  <description>The https address where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir.perm</name>
+    <value>750</value>
+<description>The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.</description>
+  </property>
+
+  <property>
+  <name>dfs.access.time.precision</name>
+  <value>0</value>
+  <description>The access time for HDFS file is precise upto this value.
+               The default value is 1 hour. Setting a value of 0 disables
+               access times for HDFS.
+  </description>
+</property>
+
+<property>
+ <name>dfs.cluster.administrators</name>
+ <value> hdfs</value>
+ <description>ACL for who all can view the default servlets in the HDFS</description>
+</property>
+
+<property>
+  <name>ipc.server.read.threadpool.size</name>
+  <value>5</value>
+  <description></description>
+</property>
+
+</configuration>
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/metainfo.xml
new file mode 100644
index 0000000..da61660
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/metainfo.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <extends>common-services/HDFS/1.0</extends>
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-policy</config-type>
+        <config-type>hdfs-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/package/dummy-script.py b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/package/dummy-script.py
new file mode 100644
index 0000000..35de4bb
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/package/dummy-script.py
@@ -0,0 +1,20 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HIVE/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HIVE/metainfo.xml
new file mode 100644
index 0000000..9c122b2
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HIVE/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <extends>common-services/HIVE/1.0</extends>
+    </service>
+  </services>
+</metainfo>
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/MAPREDUCE/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/MAPREDUCE/metainfo.xml
new file mode 100644
index 0000000..3b0b3d9
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/MAPREDUCE/metainfo.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>MAPREDUCE</name>
+      <extends>common-services/MAPREDUCE/1.0</extends>
+    </service>
+  </services>
+</metainfo>
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/ZOOKEEPER/metainfo.xml
new file mode 100644
index 0000000..9c8a299
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/ZOOKEEPER/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <extends>common-services/ZOOKEEPER/1.0</extends>
+    </service>
+  </services>
+</metainfo>
diff --git a/ambari-web/app/app.js b/ambari-web/app/app.js
index e32084c..c22d71c 100644
--- a/ambari-web/app/app.js
+++ b/ambari-web/app/app.js
@@ -69,12 +69,25 @@
   upgradeState: 'INIT',
 
   /**
+   * Check if upgrade is in INIT state
+   * 'INIT' is set on upgrade start and when it's finished
+   * @type {boolean}
+   */
+  upgradeInit: Em.computed.equal('upgradeState', 'INIT'),
+
+  /**
    * flag is true when upgrade process is running
    * @returns {boolean}
    */
   upgradeInProgress: Em.computed.equal('upgradeState', 'IN_PROGRESS'),
 
   /**
+   * Checks if update process is completed
+   * @type {boolean}
+   */
+  upgradeCompleted: Em.computed.equal('upgradeState', 'COMPLETED'),
+
+  /**
    * flag is true when upgrade process is waiting for user action
    * to proceed, retry, perform manual steps etc.
    * @returns {boolean}
diff --git a/ambari-web/app/controllers/global/background_operations_controller.js b/ambari-web/app/controllers/global/background_operations_controller.js
index d833661..de420b5 100644
--- a/ambari-web/app/controllers/global/background_operations_controller.js
+++ b/ambari-web/app/controllers/global/background_operations_controller.js
@@ -217,7 +217,7 @@
         return;
       }
       var rq = this.get("services").findProperty('id', request.Requests.id);
-      var isRunning = this.isRequestRunning(request);
+      var isRunning = request.Requests.request_status === 'IN_PROGRESS';
       var requestParams = this.parseRequestContext(request.Requests.request_context);
       this.assignScheduleId(request, requestParams);
       currentRequestIds.push(request.Requests.id);
@@ -282,16 +282,6 @@
   },
 
   /**
-   * identify whether request is running by task counters
-   * @param request
-   * @return {Boolean}
-   */
-  isRequestRunning: function (request) {
-    return (request.Requests.task_count -
-      (request.Requests.aborted_task_count + request.Requests.completed_task_count + request.Requests.failed_task_count
-        + request.Requests.timed_out_task_count - request.Requests.queued_task_count)) > 0;
-  },
-  /**
    * identify whether there is only one host in request
    * @param inputs
    * @return {Boolean}
diff --git a/ambari-web/app/controllers/installer.js b/ambari-web/app/controllers/installer.js
index 369e163..db9623a 100644
--- a/ambari-web/app/controllers/installer.js
+++ b/ambari-web/app/controllers/installer.js
@@ -203,24 +203,9 @@
 
     for (var hostName in rawHosts) {
       var host = rawHosts[hostName];
-      var disksOverallCapacity = 0;
-      var diskFree = 0;
-      host.disk_info.forEach(function (disk) {
-        disksOverallCapacity += parseFloat(disk.size);
-        diskFree += parseFloat(disk.available);
-      });
       hosts.pushObject(Em.Object.create({
           id: host.name,
-          ip: host.ip,
-          osType: host.os_type,
-          osArch: host.os_arch,
           hostName: host.name,
-          publicHostName: host.name,
-          cpu: host.cpu,
-          memory: host.memory,
-          diskInfo: host.disk_info,
-          diskTotal: disksOverallCapacity / (1024 * 1024),
-          diskFree: diskFree / (1024 * 1024),
           hostComponents: host.hostComponents || []
         }
       ))
@@ -707,6 +692,7 @@
    */
   getSupportedOSListSuccessCallback: function (response, request, data) {
     var self = this;
+    var stack_default = data.versionDefinition.VersionDefinition.stack_default;
     var existedOS = data.versionDefinition.operating_systems;
     var existedMap = {};
     existedOS.map(function (existedOS) {
@@ -720,8 +706,7 @@
           repo.Repositories.base_url = '';
         });
         existedOS.push(supportedOS);
-      }
-      if(existedMap[supportedOS.OperatingSystems.os_type]) {
+      } else if (stack_default) { // only overwrite if it is stack default, otherwise use url from /version_definition
         existedMap[supportedOS.OperatingSystems.os_type].repositories.forEach(function (repo) {
           supportedOS.repositories.forEach(function (supportedRepo) {
             if (supportedRepo.Repositories.repo_id == repo.Repositories.repo_id) {
@@ -754,16 +739,6 @@
           this.setSelected(data.stackInfo.isStacksExistInDb);
         }
       }
-      // log diagnosis data for abnormal number of repos
-      var post_diagnosis = false;
-      data.versionDefinition.operating_systems.map(function(item) {
-        if (item.repositories.length > 2) {
-          post_diagnosis = true;
-        }
-      });
-      if (post_diagnosis) {
-        this.postUserPref('stack_response_diagnosis', data);
-      }
     }
   },
 
diff --git a/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step1_controller.js b/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step1_controller.js
index b54986b..197596c 100644
--- a/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step1_controller.js
+++ b/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step1_controller.js
@@ -105,7 +105,7 @@
     self.get('addableComponents').forEach(function (componentName) {
       self.updateComponent(componentName);
     }, self);
-    self.set('isLoaded', true);
+    self.set('isRecommendationsLoaded', true);
   },
 
   /**
diff --git a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
index 238b4bc..0f2efb0 100644
--- a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
+++ b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
@@ -70,6 +70,11 @@
 
   /**
    * @type {boolean}
+   */
+  showPauseButton: Em.computed.and('!App.upgradeSuspended', '!App.upgradeCompleted', '!App.upgradeInit'),
+
+  /**
+   * @type {boolean}
    * @default true
    */
   downgradeAllowed: true,
diff --git a/ambari-web/app/controllers/main/alerts/manage_alert_notifications_controller.js b/ambari-web/app/controllers/main/alerts/manage_alert_notifications_controller.js
index 73c19c6..df15513 100644
--- a/ambari-web/app/controllers/main/alerts/manage_alert_notifications_controller.js
+++ b/ambari-web/app/controllers/main/alerts/manage_alert_notifications_controller.js
@@ -149,6 +149,11 @@
       value: '',
       defaultValue: ''
     },
+    scriptDispatchProperty: {
+      label: Em.I18n.t('alerts.actions.manage_alert_notifications_popup.scriptDispatchProperty'),
+      value: '',
+      defaultValue: ''
+    },
     customProperties: Em.A([])
   }),
 
@@ -167,7 +172,7 @@
    * used in Type combobox
    * @type {Array}
    */
-  methods: ['EMAIL', 'SNMP', 'Custom SNMP'],
+  methods: ['EMAIL', 'SNMP', 'Custom SNMP', 'Alert Script'],
 
   /**
    * List of available value for Severity Filter
@@ -283,7 +288,8 @@
     'mail.smtp.from',
     'mail.smtp.host',
     'mail.smtp.port',
-    'mail.smtp.starttls.enable'
+    'mail.smtp.starttls.enable',
+    'ambari.dispatch-property.script'
   ],
 
   validationMap: {
@@ -332,7 +338,8 @@
         errorKey: 'hostError',
         validator: 'hostsValidation'
       }
-    ]
+    ],
+    AlertScript:[]
   },
 
   /**
@@ -427,6 +434,7 @@
     inputFields.set('severityFilter.value', selectedAlertNotification.get('alertStates'));
     inputFields.set('global.value', selectedAlertNotification.get('global'));
     inputFields.set('allGroups.value', selectedAlertNotification.get('global') ? 'all' : 'custom');
+    inputFields.set('scriptDispatchProperty.value', properties['ambari.dispatch-property.script'] || '');
     // not allow to edit global field
     inputFields.set('global.disabled', true);
     inputFields.set('description.value', selectedAlertNotification.get('description'));
@@ -478,6 +486,8 @@
 
         isCustomSNMPMethodSelected: Em.computed.equal('controller.inputFields.method.value', 'Custom SNMP'),
 
+        isAlertScriptMethodSelected: Em.computed.equal('controller.inputFields.method.value', 'Alert Script'),
+
         methodObserver: function () {
           var currentMethod = this.get('controller.inputFields.method.value'),
             validationMap = self.get('validationMap');
@@ -557,7 +567,7 @@
         hostsValidation: function() {
           var inputValue = this.get('controller.inputFields.host.value').trim(),
             hostError = false;
-          if (!this.get('isEmailMethodSelected')) {
+          if (!this.get('isEmailMethodSelected') && !this.get('isAlertScriptMethodSelected')) {
             var array = inputValue.split(',');
             hostError = array.some(function(hostname) {
               return hostname && !validator.isHostname(hostname.trim());
@@ -757,7 +767,7 @@
       properties['ambari.dispatch.snmp.community'] = inputFields.get('community.value');
       properties['ambari.dispatch.recipients'] = inputFields.get('host.value').replace(/\s/g, '').split(',');
       properties['ambari.dispatch.snmp.port'] = inputFields.get('port.value');
-    } else {
+    } else if(inputFields.get('method.value') === 'Custom SNMP') {
       properties['ambari.dispatch.snmp.version'] = inputFields.get('version.value');
       properties['ambari.dispatch.snmp.oids.trap'] = inputFields.get('OIDs.value');
       properties['ambari.dispatch.snmp.oids.subject'] = inputFields.get('OIDs.value');
@@ -765,6 +775,10 @@
       properties['ambari.dispatch.snmp.community'] = inputFields.get('community.value');
       properties['ambari.dispatch.recipients'] = inputFields.get('host.value').replace(/\s/g, '').split(',');
       properties['ambari.dispatch.snmp.port'] = inputFields.get('port.value');
+    }else if (inputFields.get('method.value') === 'Alert Script') {
+      var scriptDispatchProperty = inputFields.get('scriptDispatchProperty.value').trim();
+      if( scriptDispatchProperty != '')
+          properties['ambari.dispatch-property.script'] = scriptDispatchProperty;
     }
     inputFields.get('customProperties').forEach(function (customProperty) {
       properties[customProperty.name] = customProperty.value;
@@ -791,6 +805,8 @@
       notificationType = "SNMP";
     } else if(notificationType === "SNMP") {
       notificationType = "AMBARI_SNMP";
+    } else if(notificationType === "Alert Script"){
+      notificationType = "ALERT_SCRIPT";
     }
     return notificationType;
   },
@@ -801,6 +817,8 @@
       notificationTypeText = "Custom SNMP";
     } else if(notificationType === "AMBARI_SNMP") {
       notificationTypeText = "SNMP";
+    } else if(notificationType === "ALERT_SCRIPT"){
+      notificationTypeText = "Alert Script";
     }
     return notificationTypeText;
   },
diff --git a/ambari-web/app/controllers/main/service/add_controller.js b/ambari-web/app/controllers/main/service/add_controller.js
index ee7719c..c11bcf2 100644
--- a/ambari-web/app/controllers/main/service/add_controller.js
+++ b/ambari-web/app/controllers/main/service/add_controller.js
@@ -123,6 +123,9 @@
           var self = this;
           var dfd = $.Deferred();
           this.load('cluster');
+          this.set('content.additionalClients', []);
+          this.set('installClientQueueLength', 0);
+          this.set('installClietsQueue', App.ajaxQueue.create({abortOnError: false}));
           this.loadKerberosDescriptorConfigs().done(function() {
             self.loadServiceConfigGroups();
             self.loadConfigThemes().then(function() {
diff --git a/ambari-web/app/controllers/main/service/info/summary.js b/ambari-web/app/controllers/main/service/info/summary.js
index 1554c61..d7fff18 100644
--- a/ambari-web/app/controllers/main/service/info/summary.js
+++ b/ambari-web/app/controllers/main/service/info/summary.js
@@ -369,7 +369,7 @@
     return App.ModalPopup.show({
       header: Em.I18n.t('services.service.summary.alerts.popup.header').format(context.get('displayName')),
       autoHeight: false,
-      classNames: ['sixty-percent-width-modal'],
+      classNames: ['sixty-percent-width-modal', 'service-alerts-popup'],
       bodyClass: Em.View.extend({
         templateName: require('templates/main/service/info/service_alert_popup'),
         classNames: ['service-alerts'],
diff --git a/ambari-web/app/controllers/main/service/manage_config_groups_controller.js b/ambari-web/app/controllers/main/service/manage_config_groups_controller.js
index 45c840a..a260652 100644
--- a/ambari-web/app/controllers/main/service/manage_config_groups_controller.js
+++ b/ambari-web/app/controllers/main/service/manage_config_groups_controller.js
@@ -289,15 +289,13 @@
    */
   loadHosts: function() {
     this.set('isLoaded', false);
-    if (this.get('isInstaller')) {
-      var allHosts = this.get('isAddService') ? App.router.get('addServiceController').get('allHosts') : App.router.get('installerController').get('allHosts');
-      this.set('clusterHosts', allHosts);
-      this.loadConfigGroups(this.get('serviceName'));
-    }
-    else {
+    if (this.get('isInstaller') && !this.get('isAddService')) {
+      var hostNames = App.router.get('installerController').get('allHosts').mapProperty('hostName').join();
+      this.loadInstallerHostsFromServer(hostNames);
+    } else {
       this.loadHostsFromServer();
-      this.loadConfigGroups(this.get('serviceName'));
     }
+    this.loadConfigGroups(this.get('serviceName'));
   },
 
   /**
@@ -1000,6 +998,83 @@
         this.set('disablePrimary', !modified);
       }.observes('subViewController.isHostsModified')
     });
+  },
+
+  loadInstallerHostsFromServer: function (hostNames) {
+    return App.ajax.send({
+      name: 'hosts.info.install',
+      sender: this,
+      data: {
+        hostNames: hostNames
+      },
+      success: 'loadInstallerHostsSuccessCallback'
+    });
+  },
+
+  loadInstallerHostsSuccessCallback: function (data) {
+    var rawHosts = App.router.get('installerController.content.hosts'),
+      masterComponents = App.router.get('installerController.content.masterComponentHosts'),
+      slaveComponents = App.router.get('installerController.content.slaveComponentHosts'),
+      hosts = [];
+    masterComponents.forEach(function (component) {
+      var host = rawHosts[component.hostName];
+      if (host.hostComponents) {
+        host.hostComponents.push(Em.Object.create({
+          componentName: component.component,
+          displayName: component.display_name
+        }));
+      } else {
+        rawHosts[component.hostName].hostComponents = [
+          Em.Object.create({
+            componentName: component.component,
+            displayName: component.display_name
+          })
+        ]
+      }
+    });
+    slaveComponents.forEach(function (component) {
+      component.hosts.forEach(function (rawHost) {
+        var host = rawHosts[rawHost.hostName];
+        if (host.hostComponents) {
+          host.hostComponents.push(Em.Object.create({
+            componentName: component.componentName,
+            displayName: component.displayName
+          }));
+        } else {
+          rawHosts[rawHost.hostName].hostComponents = [
+            Em.Object.create({
+              componentName: component.componentName,
+              displayName: component.displayName
+            })
+          ]
+        }
+      });
+    });
+
+    data.items.forEach(function (host) {
+      var disksOverallCapacity = 0,
+        diskFree = 0;
+      host.Hosts.disk_info.forEach(function (disk) {
+        disksOverallCapacity += parseFloat(disk.size);
+        diskFree += parseFloat(disk.available);
+      });
+      hosts.pushObject(Em.Object.create({
+        id: host.Hosts.host_name,
+        ip: host.Hosts.ip,
+        osType: host.Hosts.os_type,
+        osArch: host.Hosts.os_arch,
+        hostName: host.Hosts.host_name,
+        publicHostName: host.Hosts.public_host_name,
+        cpu: host.Hosts.cpu_count,
+        memory: host.Hosts.total_mem.toFixed(2),
+        diskInfo: host.Hosts.disk_info,
+        diskTotal: disksOverallCapacity / (1024 * 1024),
+        diskFree: diskFree / (1024 * 1024),
+        hostComponents: (rawHosts[host.Hosts.host_name] && rawHosts[host.Hosts.host_name].hostComponents) || []
+      }));
+    });
+
+    this.set('clusterHosts', hosts);
   }
 
 });
diff --git a/ambari-web/app/controllers/wizard.js b/ambari-web/app/controllers/wizard.js
index c3a54cf..efda62d 100644
--- a/ambari-web/app/controllers/wizard.js
+++ b/ambari-web/app/controllers/wizard.js
@@ -114,39 +114,21 @@
   allHosts: function () {
     var dbHosts = this.get('content.hosts');
     var hosts = [];
-    var hostComponents = [];
 
     for (var hostName in dbHosts) {
-      hostComponents = [];
-      var disksOverallCapacity = 0;
-      var diskFree = 0;
+      var hostComponents = [];
       dbHosts[hostName].hostComponents.forEach(function (componentName) {
         hostComponents.push(Em.Object.create({
           componentName: componentName,
           displayName: App.format.role(componentName, false)
         }));
       });
-      dbHosts[hostName].disk_info.forEach(function (disk) {
-        disksOverallCapacity += parseFloat(disk.size);
-        diskFree += parseFloat(disk.available);
-      });
 
       hosts.push(Em.Object.create({
         id: hostName,
         hostName: hostName,
-        publicHostName: hostName,
-        diskInfo: dbHosts[hostName].disk_info,
-        diskTotal: disksOverallCapacity / (1024 * 1024),
-        diskFree: diskFree / (1024 * 1024),
-        disksMounted: dbHosts[hostName].disk_info.length,
-        cpu: dbHosts[hostName].cpu,
-        memory: dbHosts[hostName].memory,
-        osType: dbHosts[hostName].osType ? dbHosts[hostName].osType: 0,
-        osArch: dbHosts[hostName].osArch ? dbHosts[hostName].osArch : 0,
-        ip: dbHosts[hostName].ip ? dbHosts[hostName].ip: 0,
-        hostComponents: hostComponents,
-        maintenanceState: dbHosts[hostName].maintenance_state
-      }))
+        hostComponents: hostComponents
+      }));
     }
     return hosts;
   }.property('content.hosts'),
@@ -771,13 +753,6 @@
       if (_host.bootStatus === 'REGISTERED') {
         hosts[_host.name] = {
           name: _host.name,
-          cpu: _host.cpu,
-          memory: _host.memory,
-          disk_info: _host.disk_info,
-          os_type: _host.os_type,
-          os_arch: _host.os_arch,
-          ip: _host.ip,
-          maintenance_state: _host.maintenance_state,
           bootStatus: _host.bootStatus,
           isInstalled: false,
           id: indx++
@@ -1031,16 +1006,16 @@
           })
         });
         //configGroup copied into plain JS object to avoid Converting circular structure to JSON
-        var hostNames = configGroup.get('hosts').map(function(host_name) {return hosts[host_name].id;});
+        var hostIds = configGroup.get('hosts').map(function(host_name) {return hosts[host_name].id;});
         serviceConfigGroups.push({
           id: configGroup.get('id'),
           name: configGroup.get('name'),
           description: configGroup.get('description'),
-          hosts: hostNames.slice(),
+          hosts: hostIds.slice(),
           properties: properties.slice(),
           is_default: configGroup.get('isDefault'),
           is_for_installed_service: isForInstalledService,
-          is_for_update: configGroup.isForUpdate || configGroup.get('hash') != this.getConfigGroupHash(configGroup, hostNames),
+          is_for_update: configGroup.get('isForUpdate') || configGroup.get('hash') !== this.getConfigGroupHash(configGroup, configGroup.get('hosts')),
           service_name: configGroup.get('serviceName'),
           service_id: configGroup.get('serviceName'),
           desired_configs: configGroup.get('desiredConfigs'),
@@ -1308,13 +1283,6 @@
     response.items.forEach(function (item, indx) {
       installedHosts[item.Hosts.host_name] = {
         name: item.Hosts.host_name,
-        cpu: item.Hosts.cpu_count,
-        memory: item.Hosts.total_mem,
-        disk_info: item.Hosts.disk_info,
-        osType: item.Hosts.os_type,
-        osArch: item.Hosts.os_arch,
-        ip: item.Hosts.ip,
-        maintenance_state: item.Hosts.maintenance_state,
         bootStatus: "REGISTERED",
         isInstalled: true,
         hostComponents: item.host_components,
diff --git a/ambari-web/app/controllers/wizard/step1_controller.js b/ambari-web/app/controllers/wizard/step1_controller.js
index 2ac295b..22456af 100644
--- a/ambari-web/app/controllers/wizard/step1_controller.js
+++ b/ambari-web/app/controllers/wizard/step1_controller.js
@@ -63,6 +63,13 @@
    */
   networkIssuesExist: Em.computed.everyBy('content.stacks', 'stackDefault', true),
 
+  /**
+   * No stacks have repo update URL section (aka "latest") defined in repoinfo.xml
+   *
+   * @type {boolean}
+   */
+  stackRepoUpdateLinkExists: Em.computed.someBy('content.stacks', 'stackRepoUpdateLinkExists', true),
+
   optionsToSelect: {
     'usePublicRepo': {
       index: 0,
diff --git a/ambari-web/app/controllers/wizard/step7/assign_master_controller.js b/ambari-web/app/controllers/wizard/step7/assign_master_controller.js
index 8bdab28..e8eaf47 100644
--- a/ambari-web/app/controllers/wizard/step7/assign_master_controller.js
+++ b/ambari-web/app/controllers/wizard/step7/assign_master_controller.js
@@ -65,7 +65,7 @@
     
     switch (action) {
       case 'ADD':
-        if (hostComponent.componentName == "HIVE_SERVER_INTERACTIVE") {
+        if (hostComponent.componentName === "HIVE_SERVER_INTERACTIVE") {
           this.getPendingBatchRequests(hostComponent);  
         } else {
           this.showPopup(hostComponent);
@@ -94,9 +94,9 @@
 
   pendingBatchRequestsAjaxError: function(data) {
     var error = Em.I18n.t('services.service.actions.run.yarnRefreshQueues.error');
-    if(data && data.responseText){
+    if (data && data.responseText) {
       try {
-        var json = $.parseJSON(data.responseText);
+        var json = JSON.parse(data.responseText);
         error += json.message;
       } catch (err) {}
     }
@@ -105,21 +105,11 @@
 
   pendingBatchRequestsAjaxSuccess : function(data, opt, params) {
     var self = this;
-    var showAlert = false;
-    if (data.hasOwnProperty('items') && data.items.length > 0) {
-      data.items.forEach( function(_item) {
-        _item.RequestSchedule.batch.batch_requests.forEach( function(batchRequest) {
-          // Check if a DELETE request on HIVE_SERVER_INTERACTIVE is in progress
-          if (batchRequest.request_type == "DELETE" && batchRequest.request_uri.indexOf("HIVE_SERVER_INTERACTIVE") > -1) {
-            showAlert = true;
-          }
-        });
-      });
-    }
-    if (showAlert) {
-      App.showAlertPopup(Em.I18n.t('services.service.actions.hsi.alertPopup.header'), Em.I18n.t('services.service.actions.hsi.alertPopup.body'), function() {
+    if (this.shouldShowAlertOnBatchRequest(data)) {
+      App.showAlertPopup(Em.I18n.t('services.service.actions.hsi.alertPopup.header'),
+        Em.I18n.t('services.service.actions.hsi.alertPopup.body'), function() {
         var configWidgetContext = self.get('configWidgetContext');
-        var config = self.get('configWidgetContext.config');
+        var config = configWidgetContext.get('config');
         configWidgetContext.toggleProperty('controller.forceUpdateBoundaries');
         var value = config.get('initialValue');
         config.set('value', value);
@@ -131,6 +121,21 @@
       this.showPopup(params.hostComponent);
     }
   },
+
+  shouldShowAlertOnBatchRequest: function(data) {
+    var showAlert = false;
+    if (data.hasOwnProperty('items') && data.items.length > 0) {
+      data.items.forEach( function(_item) {
+        _item.RequestSchedule.batch.batch_requests.forEach( function(batchRequest) {
+          // Check if a DELETE request on HIVE_SERVER_INTERACTIVE is in progress
+          if (batchRequest.request_type === "DELETE" && batchRequest.request_uri.indexOf("HIVE_SERVER_INTERACTIVE") > -1) {
+            showAlert = true;
+          }
+        });
+      });
+    }
+    return showAlert;
+  },
   
   showPopup: function(hostComponent) {
     var missingDependentServices = this.getAllMissingDependentServices();
@@ -309,6 +314,7 @@
 
       this.set("hosts", result);
       this.sortHosts(result);
+      this.set('isHostsLoaded', true);
     }
   },
 
@@ -461,7 +467,6 @@
       var configActionComponent = self.get('configActionComponent');
       var componentHostName = self.getSelectedHostName(configActionComponent.componentName);
       var config = self.get('configWidgetContext.config');
-      var oldValueKey = context.get('controller.wizardController.name') === 'installerController' ? 'initialValue' : 'savedValue';
 
       // TODO remove after stack advisor is able to handle this case
       // workaround for hadoop.proxyuser.{{hiveUser}}.hosts after adding Hive Server Interactive from Install Wizard
@@ -484,114 +489,172 @@
       configActionComponent.hostName = componentHostName;
       config.set('configActionComponent', configActionComponent);
       /* TODO uncomment after stack advisor is able to handle this case
-      context.get('controller').loadConfigRecommendations([{
+       var oldValueKey = context.get('controller.wizardController.name') === 'installerController' ? 'initialValue' : 'savedValue';
+       context.get('controller').loadConfigRecommendations([{
         type: App.config.getConfigTagFromFileName(config.get('fileName')),
         name: config.get('name'),
         old_value: config.get(oldValueKey)
       }]);
       */
+      self.resolveDependencies(dependencies, serviceConfigs, context);
+    });
+  },
 
-      // TODO remove after stack advisor is able to handle this case
-      // workaround for hadoop.proxyuser.{{hiveUser}}.hosts after adding Hive Server Interactive
-      if (dependencies) {
-        var foreignKeys = {};
-        if (dependencies.foreignKeys) {
-          dependencies.foreignKeys.forEach(function (dependency) {
-            var matchingProperty = serviceConfigs.find(function (property) {
-              return property.get('filename') === App.config.getOriginalFileName(dependency.fileName) && property.get('name') === dependency.propertyName;
+  /**
+   * TODO remove after stack advisor is able to handle this case
+   * workaround for hadoop.proxyuser.{{hiveUser}}.hosts after adding Hive Server Interactive
+   * @param {object} dependencies
+   * @param {array} serviceConfigs
+   * @param {Em.Object} context
+   */
+  resolveDependencies: function(dependencies, serviceConfigs, context) {
+    if (dependencies) {
+      var foreignKeys = this.getDependenciesForeignKeys(dependencies, serviceConfigs);
+
+      if (dependencies.properties && dependencies.initializer) {
+        var initializer = App.get(dependencies.initializer.name);
+        var setup = Em.getProperties(foreignKeys, dependencies.initializer.setupKeys);
+        initializer.setup(setup);
+        var blueprintObject = {};
+        dependencies.properties.forEach(function (property) {
+          var propertyObject = Em.getProperties(property, ['name', 'fileName']);
+          if (property.nameTemplate) {
+            var name = property.nameTemplate;
+            Em.keys(foreignKeys).forEach(function (key) {
+              name = name.replace('{{' + key + '}}', foreignKeys[key]);
             });
-            if (matchingProperty) {
-              foreignKeys[dependency.key] = matchingProperty.get('value');
-            }
+            propertyObject.name = name;
+          }
+          if (!blueprintObject[property.fileName]) {
+            blueprintObject[property.fileName] = {
+              properties: {}
+            };
+          }
+          var result = initializer.initialValue(propertyObject, {
+            masterComponentHosts: this.getMasterComponents(dependencies, context)
           });
-        }
-        if (dependencies.properties && dependencies.initializer) {
-          var initializer = App.get(dependencies.initializer.name);
-          var setup = Em.getProperties(foreignKeys, dependencies.initializer.setupKeys);
-          initializer.setup(setup);
-          var blueprintObject = {};
-          dependencies.properties.forEach(function (property) {
-            var propertyObject = Em.getProperties(property, ['name', 'fileName']);
-            if (property.nameTemplate) {
-              var name = property.nameTemplate;
-              Em.keys(foreignKeys).forEach(function (key) {
-                name = name.replace('{{' + key + '}}', foreignKeys[key]);
+
+          var propertiesMap = blueprintObject[propertyObject.fileName].properties;
+          propertiesMap[propertyObject.name] = result.value;
+
+          if (property.isHostsList) {
+            var service = App.config.get('serviceByConfigTypeMap')[propertyObject.fileName];
+            if (service) {
+              var serviceName = service.get('serviceName');
+              var configs = serviceName === context.get('controller.selectedService.serviceName') ? serviceConfigs :
+                context.get('controller.stepConfigs').findProperty('serviceName', serviceName).get('configs');
+              var originalFileName = App.config.getOriginalFileName(propertyObject.fileName);
+              var currentProperty = configs.find(function (configProperty) {
+                return configProperty.get('filename') === originalFileName && configProperty.get('name') === propertyObject.name;
               });
-              propertyObject.name = name;
-            }
-            if (!blueprintObject[property.fileName]) {
-              blueprintObject[property.fileName] = {
-                properties: {}
-              };
-            }
-            var masterComponents = [];
-            if (self.get('content.controllerName')) {
-              var savedMasterComponents = context.get('controller.content.masterComponentHosts').filter(function (componentObject) {
-                return dependencies.initializer.componentNames.contains(componentObject.component);
-              });
-              masterComponents = savedMasterComponents.map(function (componentObject) {
-                var masterComponent = Em.getProperties(componentObject, ['component', 'hostName']);
-                masterComponent.isInstalled = true;
-                return masterComponent;
-              });
-            } else {
-              var hostsMap = blueprintUtils.getComponentForHosts();
-              Em.keys(hostsMap).forEach(function (hostName) {
-                hostsMap[hostName].forEach(function (componentName) {
-                  if (dependencies.initializer.componentNames.contains(componentName)) {
-                    masterComponents.push({
-                      component: componentName,
-                      hostName: hostName,
-                      isInstalled: true
-                    });
-                  }
-                });
-              });
-            }
-            var result = initializer.initialValue(propertyObject, {
-              masterComponentHosts: masterComponents
-            });
-            var propertiesMap = blueprintObject[propertyObject.fileName].properties;
-            propertiesMap[propertyObject.name] = result.value;
-            if (property.isHostsList) {
-              var service = App.config.get('serviceByConfigTypeMap')[propertyObject.fileName];
-              if (service) {
-                var serviceName = service.get('serviceName');
-                var configs = serviceName === context.get('controller.selectedService.serviceName') ? serviceConfigs :
-                  context.get('controller.stepConfigs').findProperty('serviceName', serviceName).get('configs');
-                var originalFileName = App.config.getOriginalFileName(propertyObject.fileName);
-                var currentProperty = configs.find(function (configProperty) {
-                  return configProperty.get('filename') === originalFileName && configProperty.get('name') === propertyObject.name;
-                });
-                if (currentProperty) {
-                  propertiesMap[propertyObject.name] = currentProperty.get('value');
-                  App.config.updateHostsListValue(propertiesMap, propertyObject.fileName, propertyObject.name, propertyObject.value, property.isHostsArray);
-                }
+              if (currentProperty) {
+                propertiesMap[propertyObject.name] = currentProperty.get('value');
+                App.config.updateHostsListValue(propertiesMap, propertyObject.fileName, propertyObject.name, propertyObject.value, property.isHostsArray);
               }
             }
-            context.get('controller').loadRecommendationsSuccess({
-              resources: [
-                {
-                  recommendations: {
-                    blueprint: {
-                      configurations: blueprintObject
-                    }
-                  }
-                }
-              ]
-            }, null, {
-              dataToSend: {
-                changed_configurations: [{
-                  type: App.config.getConfigTagFromFileName(config.get('fileName')),
-                  name: config.get('name'),
-                  old_value: config.get(oldValueKey)
-                }]
-              }
-            });
-            initializer.cleanup();
-          });
+          }
+          this.saveRecommendations(context, blueprintObject);
+          initializer.cleanup();
+        }, this);
+      }
+    }
+  },
+
+  /**
+   *
+   * @param {Em.Object} context
+   * @param {object} blueprintObject
+   */
+  saveRecommendations: function(context, blueprintObject) {
+    var oldValueKey = context.get('controller.wizardController.name') === 'installerController' ? 'initialValue' : 'savedValue';
+    var config = this.get('configWidgetContext.config');
+
+    context.get('controller').loadRecommendationsSuccess({
+      resources: [
+        {
+          recommendations: {
+            blueprint: {
+              configurations: blueprintObject
+            }
+          }
         }
+      ]
+    }, null, {
+      dataToSend: {
+        changed_configurations: [{
+          type: App.config.getConfigTagFromFileName(config.get('fileName')),
+          name: config.get('name'),
+          old_value: config.get(oldValueKey)
+        }]
       }
     });
-  }
+  },
+
+  /**
+   *
+   * @param dependencies
+   * @param serviceConfigs
+   * @returns {{}}
+   */
+  getDependenciesForeignKeys: function(dependencies, serviceConfigs) {
+    var foreignKeys = {};
+    if (dependencies.foreignKeys) {
+      dependencies.foreignKeys.forEach(function (dependency) {
+        var matchingProperty = serviceConfigs.find(function (property) {
+          return property.get('filename') === App.config.getOriginalFileName(dependency.fileName) && property.get('name') === dependency.propertyName;
+        });
+        if (matchingProperty) {
+          foreignKeys[dependency.key] = matchingProperty.get('value');
+        }
+      });
+    }
+    return foreignKeys;
+  },
+
+  /**
+   *
+   * @param dependencies
+   * @param context
+   * @returns {Array}
+   */
+  getMasterComponents: function(dependencies, context) {
+    var masterComponents = [];
+    if (this.get('content.controllerName')) {
+      var savedMasterComponents = context.get('controller.content.masterComponentHosts').filter(function (componentObject) {
+        return dependencies.initializer.componentNames.contains(componentObject.component);
+      });
+      masterComponents = savedMasterComponents.map(function (componentObject) {
+        var masterComponent = Em.getProperties(componentObject, ['component', 'hostName']);
+        masterComponent.isInstalled = true;
+        return masterComponent;
+      });
+    } else {
+      var hostsMap = blueprintUtils.getComponentForHosts();
+      Em.keys(hostsMap).forEach(function (hostName) {
+        hostsMap[hostName].forEach(function (componentName) {
+          if (dependencies.initializer.componentNames.contains(componentName)) {
+            masterComponents.push({
+              component: componentName,
+              hostName: hostName,
+              isInstalled: true
+            });
+          }
+        });
+      });
+    }
+    return masterComponents;
+  },
+
+  getHosts: function () {
+    var result,
+      parentController = this.get('content.controllerName');
+    if (parentController) {
+      result = this._super();
+    } else {
+      result = this.get('hosts').mapProperty('host_name');
+    }
+    return result;
+  },
+
+  clearStepOnExit: Em.K
 });
\ No newline at end of file
diff --git a/ambari-web/app/controllers/wizard/step7_controller.js b/ambari-web/app/controllers/wizard/step7_controller.js
index 8e14b70..6685c01 100644
--- a/ambari-web/app/controllers/wizard/step7_controller.js
+++ b/ambari-web/app/controllers/wizard/step7_controller.js
@@ -1186,6 +1186,7 @@
               this.get('stepConfigs').findProperty('serviceName', service.serviceName).get('configs').pushObject(overriddenSCP);
             }
           }, this);
+          modelGroup.set('hash', this.get('wizardController').getConfigGroupHash(modelGroup));
         }, this);
         service.set('configGroups', App.ServiceConfigGroup.find().filterProperty('serviceName', service.get('serviceName')));
       }
diff --git a/ambari-web/app/controllers/wizard/step8_controller.js b/ambari-web/app/controllers/wizard/step8_controller.js
index 4678d03..7e318e0 100644
--- a/ambari-web/app/controllers/wizard/step8_controller.js
+++ b/ambari-web/app/controllers/wizard/step8_controller.js
@@ -18,6 +18,7 @@
 
 var App = require('app');
 var stringUtils = require('utils/string_utils');
+var fileUtils = require('utils/file_utils');
 
 App.WizardStep8Controller = Em.Controller.extend(App.AddSecurityConfigs, App.wizardDeployProgressControllerMixin, App.ConfigOverridable, App.ConfigsSaverMixin, {
 
@@ -1764,7 +1765,171 @@
     });
   },
 
+  getComponentsForHost: function(host) {
+    if(!host.hostComponents) {
+      App.router.get('installerController').get('allHosts');
+    }
+    var componentNameDetail = [];
+    host.hostComponents.mapProperty('componentName').forEach(function (componentName) {
+      if(componentName === 'CLIENT') {
+        this.get('content.clients').mapProperty('component_name').forEach(function (clientComponent) {
+          componentNameDetail.push({ name : clientComponent });
+        }, this);
+        return;
+      }
+      componentNameDetail.push({ name : componentName });
+    }, this);
+    return componentNameDetail;
+  },
+
+  getPropertyAttributesForConfigType : function(configs) {
+    //Currently only looks for final properties, if any
+    var finalProperties = configs.filterProperty('isFinal', 'true');
+    var propertyAttributes = {};
+    finalProperties.forEach(function (finalProperty) {
+      propertyAttributes[finalProperty['name']] = "true";
+    });
+    var finalPropertyMap = {};
+    if (!App.isEmptyObject(finalProperties)) {
+      finalPropertyMap = {
+        "isFinal": propertyAttributes
+      };
+    }
+    return finalPropertyMap;
+  },
+
+  getConfigurationDetailsForConfigType: function(configs) {
+    var configDetails = {};
+    var self = this;
+    configs.forEach(function (propertyObj) {
+      configDetails[propertyObj['name']] = propertyObj['value'];
+    }, this);
+    var configurationsDetails = {
+      "properties_attributes": self.getPropertyAttributesForConfigType(configs),
+      "properties": configDetails
+    };
+    return configurationsDetails;
+  },
+
+  hostInExistingHostGroup: function (newHost, cluster_template_host_groups) {
+    var hostGroupMatched = false;
+      cluster_template_host_groups.some(function (existingHostGroup) {
+        if(!hostGroupMatched) {
+        var fqdnInHostGroup =  existingHostGroup.hosts[0].fqdn;
+        var componentsInExistingHostGroup = this.getRegisteredHosts().filterProperty('hostName', fqdnInHostGroup)[0].hostComponents;
+        if(componentsInExistingHostGroup.length !== newHost.hostComponents.length) {
+          return;
+        } else {
+          var componentMismatch = false;
+          componentsInExistingHostGroup.forEach(function (componentInExistingHostGroup, index) {
+          if(!componentMismatch) {
+            if(!newHost.hostComponents.mapProperty('componentName').includes(componentInExistingHostGroup.componentName)) {
+              componentMismatch = true;
+            }
+          }
+          });
+          if(!componentMismatch) {
+            hostGroupMatched = true;
+            existingHostGroup["cardinality"]["cardinality"] = parseInt(existingHostGroup["cardinality"]["cardinality"]) + 1;
+            existingHostGroup.hosts.push({"fqdn" : newHost.hostName})
+            return true;
+          }
+        }
+        }
+      }, this);
+    return hostGroupMatched;
+  },
+
+  generateBlueprint: function () {
+    console.log("Prepare blueprint for download...");
+    var blueprint = {};
+    var self = this;
+    //service configurations
+    var totalConf = [];
+    //Add cluster-env
+    var clusterEnv = this.get('configs').filterProperty('filename', 'cluster-env.xml');
+    var configurations = {};
+    configurations["cluster-env"] = self.getConfigurationDetailsForConfigType(clusterEnv);
+    totalConf.push(configurations);
+    //Add configurations for selected services
+    this.get('selectedServices').forEach(function (service) {
+      Object.keys(service.get('configTypes')).forEach(function (type) {
+        if (!this.get('serviceConfigTags').someProperty('type', type)) {
+          var configs = this.get('configs').filterProperty('filename', App.config.getOriginalFileName(type));
+          var configurations = {};
+          configurations[type] = self.getConfigurationDetailsForConfigType(configs);
+          totalConf.push(configurations);
+        }
+      }, this);
+    }, this);
+
+    //TODO address configGroups
+    var host_groups = [];
+    var cluster_template_host_groups = [];
+    var counter = 0;
+
+    this.getRegisteredHosts().filterProperty('isInstalled', false).map(function (host) {
+      var clusterTemplateHostGroupDetail = {};
+      if(self.hostInExistingHostGroup(host, cluster_template_host_groups)) {
+        return;
+      }
+
+      var hostGroupId = "host_group_" + counter;
+      var cardinality = {"cardinality": 1};
+      var hostGroupDetail = {
+        "name": hostGroupId,
+        "components": self.getComponentsForHost(host),
+        cardinality
+      };
+      hostGroupDetail.toJSON = function () {
+      var hostGroupDetailResult = {};
+      for (var x in this) {
+        if (x === "cardinality") {
+          hostGroupDetailResult[x] = (this[x]["cardinality"]).toString();
+          } else {
+            hostGroupDetailResult[x] = this[x];
+        }
+      }
+      return hostGroupDetailResult;
+      }
+      host_groups.push(hostGroupDetail);
+      var hostListForGroup = [];
+      var hostDetail = {
+        "fqdn": host.hostName
+      }
+      hostListForGroup.push(hostDetail);
+      clusterTemplateHostGroupDetail = {
+        "name": hostGroupId,
+        "hosts": hostListForGroup,
+        cardinality
+      };
+      clusterTemplateHostGroupDetail.toJSON = function () {
+        return _.omit(this, [ "cardinality" ]);
+      };
+
+      cluster_template_host_groups.push(clusterTemplateHostGroupDetail);
+      counter++;
+    }, this);
+
+    var selectedStack = App.Stack.find().findProperty('isSelected', true);
+    blueprint = { //TODO: bp name
+        'configurations':totalConf,
+        'host_groups':host_groups,
+        'Blueprints':{'stack_name':selectedStack.get('stackName'), 'stack_version':selectedStack.get('stackVersion')}
+    };
+    fileUtils.downloadTextFile(JSON.stringify(blueprint), 'json', 'blueprint.json')
+
+    var cluster_template = {
+      "blueprint": App.clusterStatus.clusterName,
+      "config_recommendation_strategy" : "NEVER_APPLY",
+      "provision_action" : "INSTALL_AND_START",
+      "configurations":[],
+      "host_groups":cluster_template_host_groups
+    };
+    fileUtils.downloadTextFile(JSON.stringify(cluster_template), 'json', 'clustertemplate.json')
+  },
+
   downloadCSV: function() {
     App.router.get('kerberosWizardStep5Controller').getCSVData(false);
   }
-});
+});
\ No newline at end of file
diff --git a/ambari-web/app/controllers/wizard/step9_controller.js b/ambari-web/app/controllers/wizard/step9_controller.js
index 4697dad..9f27f65 100644
--- a/ambari-web/app/controllers/wizard/step9_controller.js
+++ b/ambari-web/app/controllers/wizard/step9_controller.js
@@ -424,7 +424,7 @@
         }
         break;
       case 'CUSTOM_COMMAND':
-        role = App.format.commandDetail(task.command_detail, task.request_input);
+        role = App.format.commandDetail(task.command_detail, task.request_input, task.ops_display_name);
       case 'EXECUTE' :
       case 'SERVICE_CHECK' :
         switch (task.status) {
diff --git a/ambari-web/app/mappers/stack_mapper.js b/ambari-web/app/mappers/stack_mapper.js
index 4b6a6f8..88ffc73 100644
--- a/ambari-web/app/mappers/stack_mapper.js
+++ b/ambari-web/app/mappers/stack_mapper.js
@@ -28,6 +28,7 @@
     stack_name: 'stack_name',
     stack_version: 'stack_version',
     stack_default: 'stack_default',
+    stack_repo_update_link_exists: 'stack_repo_update_link_exists',
     show_available: 'show_available',
     type: 'type',
     repository_version: 'repository_version',
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index a2edf06..0c15a19 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -125,6 +125,7 @@
   'common.more': 'More...',
   'common.print':'Print',
   'common.deploy':'Deploy',
+  'common.generate.blueprint':'Generate Blueprint',
   'common.message':'Message',
   'common.tasks':'Tasks',
   'common.open':'Open',
@@ -754,11 +755,7 @@
   'installer.step3.hostWarningsPopup.report.user': '<br><br>######################################<br># Users<br>#<br># A space delimited list of users who should not exist.<br># Provided so that administrators can easily copy paths into scripts, email etc.<br># Example: userdel hdfs<br>######################################<br>USERS<br>',
   'installer.step3.hostWarningsPopup.report.folder': '\\ /folder',
   'installer.step3.hostWarningsPopup.checks': 'Host Checks found',
-  'installer.step3.hostWarningsPopup.notice.beginning': 'After manually resolving the issues, click <b>Rerun Checks</b>.' +
-    '<br>To manually resolve issues on <b>each host</b> run the HostCleanup script (Python 2.6 or greater is required):<br>',
-  'installer.step3.hostWarningsPopup.notice.command': 'python /usr/lib/python2.6/site-packages/ambari_agent/HostCleanup.py --silent --skip=users',
-  'installer.step3.hostWarningsPopup.notice.end': '<div class="alert alert-warning"><b>Note</b>: Clean up of Firewall and Transparent Huge Page issues are not supported by the HostCleanup script.</div>' +
-    '<div class="alert alert-warning"><b>Note</b>: To clean up in interactive mode, remove <b>--silent</b> option. To clean up all resources, including <i>users</i>, remove <b>--skip=users</b> option. Use <b>--help</b> for a list of available options.</div>',
+  'installer.step3.hostWarningsPopup.notice.beginning': 'After manually resolving the issues, click <b>Rerun Checks</b>.',
   'installer.step3.hostWarningsPopup.summary':'{0} on {1}',
   'installer.step3.hostWarningsPopup.jdk':'JDK Issues',
   'installer.step3.hostWarningsPopup.jdk.name':'JDK not found at <i>{0}</i>',
@@ -1892,6 +1889,7 @@
   'admin.stackUpgrade.dialog.details.hide': "hide details",
   'admin.stackUpgrade.dialog.notActive': "Waiting to execute the next task...",
   'admin.stackUpgrade.dialog.prepareUpgrade.header': "Preparing the Upgrade...",
+  'admin.stackUpgrade.dialog.skipped.failures':'There were automatically skipped failed steps.  Please resolve each failure before continuing with the upgrade.',
   'services.service.start':'Start',
   'services.service.stop':'Stop',
   'services.service.metrics':'Metrics',
@@ -2547,6 +2545,7 @@
   'alerts.actions.manage_alert_notifications_popup.confirmDeleteBody':'Are you sure you want to delete {0} notification?',
   'alerts.actions.manage_alert_notifications_popup.error.name.empty': 'Notification name is required',
   'alerts.actions.manage_alert_notifications_popup.error.name.existed': 'Notification name already exists',
+  'alerts.actions.manage_alert_notifications_popup.scriptDispatchProperty':'Script Dispatch Property',
 
   'hosts.host.add':'Add New Hosts',
   'hosts.table.noHosts':'No hosts to display',
@@ -3163,7 +3162,7 @@
   'menu.item.dashboard':'Dashboard',
   'menu.item.services':'Services',
   'menu.item.hosts':'Hosts',
-  'menu.item.admin':'Admin',
+  'menu.item.admin':'Cluster Admin',
   'menu.item.alerts': 'Alerts',
   'menu.item.views':'<i class="glyphicon glyphicon-th"></i>',
   'menu.item.views.noViews':'No Views',
diff --git a/ambari-web/app/mixins/common/configs/configs_saver.js b/ambari-web/app/mixins/common/configs/configs_saver.js
index 7d8721d..4a4163e 100644
--- a/ambari-web/app/mixins/common/configs/configs_saver.js
+++ b/ambari-web/app/mixins/common/configs/configs_saver.js
@@ -368,10 +368,10 @@
   /*********************************** 3. GENERATING JSON TO SAVE *****************************/
 
   /**
-   * Map that contains last used timestamp per filename.
+   * Map that contains last used timestamp.
    * There is a case when two config groups can update same filename almost simultaneously
-   * so they have equal timestamp only and this causes collision. So to prevent this we need to check
-   * if specific filename with specific timestamp is not saved yet
+   * so they have equal timestamp and this causes collision. So to prevent this we need to check
+   * if specific filename with specific timestamp is not saved yet.
    *
    * @type {Object}
    */
@@ -389,14 +389,9 @@
     var desired_config = [];
     if (Em.isArray(configsToSave) && Em.isArray(fileNamesToSave) && fileNamesToSave.length && configsToSave.length) {
       serviceConfigNote = serviceConfigNote || "";
-      var tagVersion = "version" + (new Date).getTime();
-      fileNamesToSave.forEach(function(fName) {
 
-        /** @see <code>_timeStamps<code> **/
-        if (this.get('_timeStamps')[fName] === tagVersion) {
-          tagVersion = "version" + ((new Date).getTime() + 1);
-        }
-        this.get('_timeStamps')[fName] = tagVersion;
+      fileNamesToSave.forEach(function(fName) {
+        var tagVersion = this.getUniqueTag();
 
         if (this.allowSaveSite(fName)) {
           var properties = configsToSave.filterProperty('filename', fName);
@@ -409,6 +404,23 @@
   },
 
   /**
+   * generate unique tag
+   * @returns {string}
+   */
+  getUniqueTag: function() {
+    var timestamp = (new Date).getTime();
+    var tagVersion = "version" + timestamp;
+
+    while(this.get('_timeStamps')[tagVersion]) {
+      timestamp++;
+      tagVersion = "version" + timestamp;
+    }
+    /** @see <code>_timeStamps<code> **/
+    this.get('_timeStamps')[tagVersion] = true;
+    return tagVersion;
+  },
+
+  /**
    * For some file names we have a restriction
    * and can't save them, in this case method will return false
    *
diff --git a/ambari-web/app/mixins/wizard/assign_master_components.js b/ambari-web/app/mixins/wizard/assign_master_components.js
index 3e2a09a..7c4929f 100644
--- a/ambari-web/app/mixins/wizard/assign_master_components.js
+++ b/ambari-web/app/mixins/wizard/assign_master_components.js
@@ -250,10 +250,22 @@
   selectedServicesMasters: [],
 
   /**
+   * Is hosts data loaded
+   * @type {bool}
+   */
+  isHostsLoaded: false,
+
+  /**
+   * Are recommendations loaded
+   * @type {bool}
+   */
+  isRecommendationsLoaded: false,
+
+  /**
    * Is data for current step loaded
    * @type {bool}
    */
-  isLoaded: false,
+  isLoaded: Em.computed.and('isHostsLoaded', 'isRecommendationsLoaded'),
 
   /**
    * Is back from the next step
@@ -341,7 +353,7 @@
     }, this);
 
     return mapping.sortProperty('host_name');
-  }.property("selectedServicesMasters.@each.selectedHost", 'selectedServicesMasters.@each.isHostNameValid'),
+  }.property('selectedServicesMasters.@each.selectedHost', 'selectedServicesMasters.@each.isHostNameValid', 'isLoaded'),
 
   /**
    * Count of hosts without masters
@@ -484,7 +496,8 @@
   clearStep: function () {
     this.setProperties({
       hosts: [],
-      isLoaded: false,
+      isHostsLoaded: false,
+      isRecommendationsLoaded: false,
       backFromNextStep: false,
       selectedServicesMasters: [],
       servicesMasters: []
@@ -494,6 +507,10 @@
     }, this);
   },
 
+  clearStepOnExit: function () {
+    this.clearStep();
+  },
+
   /**
    * Load controller data (hosts, host components etc)
    * @method loadStep
@@ -511,7 +528,7 @@
       this.set('backFromNextStep',true);
     }
     this.getRecommendedHosts({
-      hosts: this.get('hosts').mapProperty('host_name')
+      hosts: this.getHosts()
     }).then(function() {
       self.loadStepCallback(self.createComponentInstallationObjects(), self);
     });
@@ -527,7 +544,7 @@
     self.get('addableComponents').forEach(function (componentName) {
       self.updateComponent(componentName);
     }, self);
-    self.set('isLoaded', true);
+    self.set('isRecommendationsLoaded', true);
     if (self.thereIsNoMasters() && !self.get('mastersToCreate').length) {
       App.router.send('next');
     }
@@ -579,25 +596,40 @@
    * @method renderHostInfo
    */
   renderHostInfo: function () {
-    var hostInfo = this.get('content.hosts');
-    var result = [];
+    var isInstaller = (this.get('wizardController.name') === 'installerController' || this.get('content.controllerName') === 'installerController');
+    App.ajax.send({
+      name: isInstaller ? 'hosts.info.install' : 'hosts.high_availability.wizard',
+      sender: this,
+      data: {
+        hostNames: isInstaller ? this.getHosts().join() : null
+      },
+      success: 'loadWizardHostsSuccessCallback'
+    });
+  },
 
-    for (var index in hostInfo) {
-      var _host = hostInfo[index];
+  loadWizardHostsSuccessCallback: function (data) {
+    var hostInfo = this.get('content.hosts'),
+      result = [];
+    data.items.forEach(function (host) {
+      var hostName = host.Hosts.host_name,
+        _host = hostInfo[hostName],
+        cpu = host.Hosts.cpu_count,
+        memory = host.Hosts.total_mem.toFixed(2);
       if (_host.bootStatus === 'REGISTERED') {
         result.push(Em.Object.create({
-          host_name: _host.name,
-          cpu: _host.cpu,
-          memory: _host.memory,
-          disk_info: _host.disk_info,
-          maintenance_state: _host.maintenance_state,
+          host_name: hostName,
+          cpu: cpu,
+          memory: memory,
+          disk_info: host.Hosts.disk_info,
+          maintenance_state: host.Hosts.maintenance_state,
           isInstalled: _host.isInstalled,
-          host_info: Em.I18n.t('installer.step5.hostInfo').fmt(_host.name, numberUtils.bytesToSize(_host.memory, 1, 'parseFloat', 1024), _host.cpu)
+          host_info: Em.I18n.t('installer.step5.hostInfo').fmt(hostName, numberUtils.bytesToSize(memory, 1, 'parseFloat', 1024), cpu)
         }));
       }
-    }
-    this.set("hosts", result);
+    }, this);
+    this.set('hosts', result);
     this.sortHosts(this.get('hosts'));
+    this.set('isHostsLoaded', true);
   },
 
   /**
@@ -1101,7 +1133,8 @@
   },
 
   recommendAndValidate: function(callback) {
-    var self = this;
+    var self = this,
+      hostNames = this.getHosts();
 
     if (this.get('validationInProgress')) {
       this.set('runQueuedValidation', true);
@@ -1112,11 +1145,11 @@
 
     // load recommendations with partial request
     this.getRecommendedHosts({
-      hosts: this.get('hosts').mapProperty('host_name'),
+      hosts: hostNames,
       components: this.getCurrentComponentHostMap()
     }).then(function() {
       self.validateSelectedHostComponents({
-        hosts: self.get('hosts').mapProperty('host_name'),
+        hosts: hostNames,
         blueprint: self.get('recommendations')
       }).then(function() {
         if (callback) {
@@ -1224,5 +1257,9 @@
         self.set('submitButtonClicked', false);
       }
     });
+  },
+
+  getHosts: function () {
+    return Em.keys(this.get('content.hosts'));
   }
 });
diff --git a/ambari-web/app/mixins/wizard/wizardHostsLoading.js b/ambari-web/app/mixins/wizard/wizardHostsLoading.js
index 93dab02..950b51e 100644
--- a/ambari-web/app/mixins/wizard/wizardHostsLoading.js
+++ b/ambari-web/app/mixins/wizard/wizardHostsLoading.js
@@ -55,12 +55,8 @@
     data.items.forEach(function (item) {
       hosts[item.Hosts.host_name] = {
         name: item.Hosts.host_name,
-        cpu: item.Hosts.cpu_count,
-        memory: item.Hosts.total_mem,
-        disk_info: item.Hosts.disk_info,
         bootStatus: "REGISTERED",
-        isInstalled: true,
-        maintenance_state: item.Hosts.maintenance_state
+        isInstalled: true
       };
     });
     App.db.setHosts(hosts);
diff --git a/ambari-web/app/models/stack.js b/ambari-web/app/models/stack.js
index dbc2d72..47d1c44 100644
--- a/ambari-web/app/models/stack.js
+++ b/ambari-web/app/models/stack.js
@@ -23,6 +23,7 @@
   stackName: DS.attr('string'),
   stackVersion: DS.attr('string'),
   stackDefault: DS.attr('boolean'),
+  stackRepoUpdateLinkExists: DS.attr('boolean'),
   minJdkVersion: DS.attr('string'),
   maxJdkVersion: DS.attr('string'),
   repositoryVersion: DS.attr('string'),
diff --git a/ambari-web/app/routes/add_service_routes.js b/ambari-web/app/routes/add_service_routes.js
index 89a4a36..1615f0d 100644
--- a/ambari-web/app/routes/add_service_routes.js
+++ b/ambari-web/app/routes/add_service_routes.js
@@ -167,6 +167,7 @@
       controller.dataLoading().done(function () {
         controller.loadAllPriorSteps().done(function () {
           App.logger.logTimerIfMoreThan(consoleMsg.format(2));
+          wizardStep2Controller.set('wizardController', controller);
           controller.connectOutlet('wizardStep5', controller.get('content'));
         });
       });
diff --git a/ambari-web/app/routes/installer.js b/ambari-web/app/routes/installer.js
index daefa48..eae03a7 100644
--- a/ambari-web/app/routes/installer.js
+++ b/ambari-web/app/routes/installer.js
@@ -315,6 +315,7 @@
       });
       controller.setCurrentStep('5');
       controller.loadAllPriorSteps().done(function () {
+        wizardStep5Controller.set('wizardController', controller);
         controller.connectOutlet('wizardStep5', controller.get('content'));
         self.scrollTop();
         console.timeEnd('step5 connectOutlets');
diff --git a/ambari-web/app/routes/main.js b/ambari-web/app/routes/main.js
index 1f881e0..30cc8aa 100644
--- a/ambari-web/app/routes/main.js
+++ b/ambari-web/app/routes/main.js
@@ -653,7 +653,7 @@
 
       route: '/serviceAutoStart',
       enter: function(router, transition) {
-        if (router.get('loggedIn') && !App.isAuthorized('CLUSTER.MANAGE_AUTO_START')) {
+        if (router.get('loggedIn') && !App.isAuthorized('CLUSTER.MANAGE_AUTO_START') && !App.isAuthorized('SERVICE.MANAGE_AUTO_START')) {
           router.transitionTo('main.dashboard.index');
         }
       },
diff --git a/ambari-web/app/styles/alerts.less b/ambari-web/app/styles/alerts.less
index 5134daf..e4b21a7 100644
--- a/ambari-web/app/styles/alerts.less
+++ b/ambari-web/app/styles/alerts.less
@@ -104,14 +104,14 @@
   .col0,
   td:first-child,
   th:first-child {
-    width: 35%;
+    width: 15%;
 
   }
 
   .col1,
   td:first-child + td,
   th:first-child + th {
-    width: 15%;
+    width: 35%;
   }
 
   .col2,
@@ -388,6 +388,21 @@
       padding-right: 5px;
       min-width: 20px;
     }
+    .row {
+      margin-left: 0;
+      margin-right: 0;
+    }
+  }
+}
+
+.service-alerts-popup {
+  .modal {
+    .modal-content {
+      .modal-body {
+        padding-left: 0;
+        padding-right: 0;
+      }
+    }
   }
 }
 
diff --git a/ambari-web/app/styles/application.less b/ambari-web/app/styles/application.less
index 23c534b..95990b8 100644
--- a/ambari-web/app/styles/application.less
+++ b/ambari-web/app/styles/application.less
@@ -1983,9 +1983,15 @@
     width: 940px;
     padding: 0 15px;
   }
+
   .contribview .main-container {
     width: auto;
   }
+
+  .contribview .navbar .main-container {
+    width: 940px;
+    margin: 0 auto;
+  }
 }
 
 .filter-combobox{
@@ -2665,4 +2671,13 @@
   &.overlay-visible {
     display: block;
   }
-}
\ No newline at end of file
+}
+
+.step-marker {
+  .step-index {
+    display: block;
+    margin-top: -1px;
+    margin-left: 0.3px;
+  }
+}
+
diff --git a/ambari-web/app/styles/bootstrap_overrides.less b/ambari-web/app/styles/bootstrap_overrides.less
index a856d82..aabf6f2 100644
--- a/ambari-web/app/styles/bootstrap_overrides.less
+++ b/ambari-web/app/styles/bootstrap_overrides.less
@@ -355,6 +355,11 @@
     width: auto;
   }
 
+  .contribview .navbar .main-container {
+    width: 1170px;
+    margin: 0 auto;
+  }
+
   .thumbnails {
     margin-left: -30px;
   }
diff --git a/ambari-web/app/styles/theme/bootstrap-ambari.css b/ambari-web/app/styles/theme/bootstrap-ambari.css
index b2f5ca4..77bb086 100644
--- a/ambari-web/app/styles/theme/bootstrap-ambari.css
+++ b/ambari-web/app/styles/theme/bootstrap-ambari.css
@@ -212,6 +212,9 @@
   background-color: #429929;
   border: 1px solid #3FAE2A;
 }
+.btn-success {
+  border: none;
+}
 .btn-regular-default-state {
   background-color: #FFF;
   color: #666;
@@ -222,41 +225,52 @@
   border: 1px solid #3FAE2A;
   color: #FFF;
 }
-.btn-group.open .btn.dropdown-toggle {
+.btn-group.open .btn.dropdown-toggle,
+.dropdown.open .btn.dropdown-toggle {
   box-shadow: inset 0px 0px 3px 0px #1391c1;
 }
 .btn-group.open .btn.dropdown-toggle,
-.btn-group.open .btn.dropdown-toggle.btn-default {
+.dropdown.open .btn.dropdown-toggle,
+.btn-group.open .btn.dropdown-toggle.btn-default,
+.dropdown.open .btn.dropdown-toggle.btn-default {
   background-color: #FFF;
   color: #666;
   border: 1px solid #cfd3d7;
 }
 .btn-group.open .btn.dropdown-toggle:hover,
-.btn-group.open .btn.dropdown-toggle.btn-default:hover {
+.dropdown.open .btn.dropdown-toggle:hover,
+.btn-group.open .btn.dropdown-toggle.btn-default:hover,
+.dropdown.open .btn.dropdown-toggle.btn-default:hover {
   background-color: #FFF;
   color: #666;
   border: 1px solid #cfd3d7;
 }
 .btn-group.open .btn.dropdown-toggle + .dropdown-menu > li > a:hover,
-.btn-group.open .btn.dropdown-toggle.btn-default + .dropdown-menu > li > a:hover {
+.dropdown.open .btn.dropdown-toggle + .dropdown-menu > li > a:hover,
+.btn-group.open .btn.dropdown-toggle.btn-default + .dropdown-menu > li > a:hover,
+.dropdown.open .btn.dropdown-toggle.btn-default + .dropdown-menu > li > a:hover {
   background-color: #808793;
   color: #FFF;
 }
-.btn-group.open .btn.dropdown-toggle.btn-primary {
+.btn-group.open .btn.dropdown-toggle.btn-primary,
+.dropdown.open .btn.dropdown-toggle.btn-primary {
   background-color: #3FAE2A;
   border: 1px solid #3FAE2A;
   color: #FFF;
 }
-.btn-group.open .btn.dropdown-toggle.btn-primary:hover {
+.btn-group.open .btn.dropdown-toggle.btn-primary:hover,
+.dropdown.open .btn.dropdown-toggle.btn-primary:hover {
   background-color: #3FAE2A;
   border: 1px solid #3FAE2A;
   color: #FFF;
 }
-.btn-group.open .btn.dropdown-toggle.btn-primary + .dropdown-menu > li > a:hover {
+.btn-group.open .btn.dropdown-toggle.btn-primary + .dropdown-menu > li > a:hover,
+.dropdown.open .btn.dropdown-toggle.btn-primary + .dropdown-menu > li > a:hover {
   background-color: #429929;
   color: #FFF;
 }
-.btn-group.open .dropdown-menu {
+.btn-group.open .dropdown-menu,
+.dropdown.open .dropdown-menu {
   font-family: 'Roboto', sans-serif;
   font-weight: normal;
   font-style: normal;
@@ -269,17 +283,22 @@
   color: #666;
   border: 1px solid #cfd3d7;
 }
-.btn-group.open .dropdown-menu > li {
+.btn-group.open .dropdown-menu > li,
+.dropdown.open .dropdown-menu > li {
   margin-bottom: 1px;
 }
-.btn-group.open .dropdown-menu > li > a {
+.btn-group.open .dropdown-menu > li > a,
+.dropdown.open .dropdown-menu > li > a {
   height: 24px;
 }
-.btn-group .btn.dropdown-toggle:first-child {
+.btn-group .btn.dropdown-toggle:first-child,
+.dropdown .btn.dropdown-toggle:first-child {
   min-width: 80px;
 }
 .btn-group .btn.dropdown-toggle.disabled,
-.btn-group .btn.dropdown-toggle[disabled] {
+.dropdown .btn.dropdown-toggle.disabled,
+.btn-group .btn.dropdown-toggle[disabled],
+.dropdown .btn.dropdown-toggle[disabled] {
   opacity: 0.6;
 }
 input.form-control {
@@ -347,9 +366,9 @@
 .form-control[disabled],
 .form-control[readonly],
 fieldset[disabled] .form-control {
-  color: #dbdee2;
-  border-color: #b2b8c1;
-  background-color: #b2b8c1;
+  color: #999999;
+  border-color: #cccccc;
+  background-color: #dddddd;
 }
 h2.table-title {
   font-family: 'Roboto', sans-serif;
@@ -370,6 +389,8 @@
 }
 .table input[type="checkbox"] + label {
   position: relative;
+  line-height: 1.3em;
+  font-size: initial;
   top: 4px;
   margin-bottom: 0;
 }
@@ -703,7 +724,7 @@
   content: '';
   position: absolute;
   left: 0;
-  top: 2px;
+  top: 4px;
   width: 10px;
   height: 10px;
   box-sizing: border-box;
@@ -716,7 +737,7 @@
   content: '\2714';
   color: #FFF;
   position: absolute;
-  top: 2px;
+  top: 0;
   left: 2px;
   font-size: 9px;
 }
@@ -1352,16 +1373,16 @@
 .accordion .panel-group .panel,
 .wizard .wizard-body .wizard-content .accordion .panel-group .panel {
   border-radius: 0px;
-  border: 1px solid;
-  border-color: #ccc transparent;
-  border-bottom: none;
+  border: none;
   margin-top: 0px;
 }
 .accordion .panel-group .panel .panel-heading,
 .wizard .wizard-body .wizard-content .accordion .panel-group .panel .panel-heading {
   height: 50px;
   padding: 15px 10px;
-  border: 1px solid transparent;
+  border: 1px solid;
+  border-color: #ddd transparent;
+  border-top: none;
   background: #fff;
 }
 .accordion .panel-group .panel .panel-heading .panel-title,
@@ -1371,29 +1392,25 @@
   font-style: normal;
   line-height: 1;
   color: #333;
-  color: #1491c1;
 }
 .accordion .panel-group .panel .panel-heading .panel-title > a,
 .wizard .wizard-body .wizard-content .accordion .panel-group .panel .panel-heading .panel-title > a {
   font-size: 18px;
+  color: #333;
 }
 .accordion .panel-group .panel .panel-heading .panel-title > i,
 .wizard .wizard-body .wizard-content .accordion .panel-group .panel .panel-heading .panel-title > i {
   font-size: 20px;
+  color: #1491c1;
 }
 .accordion .panel-group .panel .panel-heading:hover,
 .wizard .wizard-body .wizard-content .accordion .panel-group .panel .panel-heading:hover {
   background: #f3faff;
-  border: 1px solid #a7dff2;
   cursor: pointer;
 }
 .accordion .panel-group .panel .panel-body,
 .wizard .wizard-body .wizard-content .accordion .panel-group .panel .panel-body {
-  padding: 30px 20px;
-}
-.accordion .panel-group:last-child .panel,
-.wizard .wizard-body .wizard-content .accordion .panel-group:last-child .panel {
-  border-bottom: 1px solid #ccc;
+  padding: 15px 10px 20px 20px;
 }
 h1,
 h2,
@@ -1464,4 +1481,4 @@
 a:visited.disabled:hover,
 a:focus.disabled:hover {
   text-decoration: none;
-}
\ No newline at end of file
+}
diff --git a/ambari-web/app/styles/top-nav.less b/ambari-web/app/styles/top-nav.less
index 4ba11e5..2fd7db0 100644
--- a/ambari-web/app/styles/top-nav.less
+++ b/ambari-web/app/styles/top-nav.less
@@ -66,7 +66,7 @@
         border-radius: 50%;
         min-width: 20px;
         height: 20px;
-        padding: 0px 0px;
+        padding: 0 4px;
         color: @top-nav-ops-count-color;
         background-color: @top-nav-ops-count-bg-color;
         text-align: center;
diff --git a/ambari-web/app/styles/wizard.less b/ambari-web/app/styles/wizard.less
index 45d9e28..130896dd 100644
--- a/ambari-web/app/styles/wizard.less
+++ b/ambari-web/app/styles/wizard.less
@@ -177,6 +177,9 @@
         float:left;
         white-space: nowrap;
       }
+      #display-action {
+        visibility:visible;
+      }
     }
     #warningsSection {
       margin: 0px 10px;
diff --git a/ambari-web/app/templates/main/admin/stack_upgrade/stack_upgrade_wizard.hbs b/ambari-web/app/templates/main/admin/stack_upgrade/stack_upgrade_wizard.hbs
index f6be5d1..a5525be 100644
--- a/ambari-web/app/templates/main/admin/stack_upgrade/stack_upgrade_wizard.hbs
+++ b/ambari-web/app/templates/main/admin/stack_upgrade/stack_upgrade_wizard.hbs
@@ -39,12 +39,14 @@
     {{#if view.isLoaded}}
       <div>
         <div class="row">
-          {{#if isDowngrade}}
-            <button
-              class="btn btn-default" {{action confirmPauseDowngrade target="view"}}>{{t admin.stackUpgrade.pauseDowngrade}}</button>
-          {{else}}
-            <button
-              class="btn btn-default" {{action confirmPauseUpgrade target="view"}}>{{t admin.stackUpgrade.pauseUpgrade}}</button>
+          {{#if showPauseButton}}
+            {{#if isDowngrade}}
+              <button
+                class="btn btn-default" {{action confirmPauseDowngrade target="view"}}>{{t admin.stackUpgrade.pauseDowngrade}}</button>
+            {{else}}
+              <button
+                class="btn btn-default" {{action confirmPauseUpgrade target="view"}}>{{t admin.stackUpgrade.pauseUpgrade}}</button>
+            {{/if}}
           {{/if}}
         </div>
         {{#if view.runningItem}}
@@ -101,9 +103,13 @@
         {{#if view.plainManualItem}}
           <div class="panel panel-default details-box">
             <p class="manual-steps-title"><strong>{{t admin.stackUpgrade.dialog.manual}}</strong></p>
-            {{#each message in view.manualItem.messages}}
-              <p class="manual-steps-content">{{message}}</p>
-            {{/each}}
+            {{#if view.manualItem.messages.length}}
+              {{#each message in view.manualItem.messages}}
+                <p class="manual-steps-content">{{message}}</p>
+              {{/each}}
+            {{else}}
+              <p class="manual-steps-content">{{t admin.stackUpgrade.dialog.skipped.failures}}</p>
+            {{/if}}
             <label class="message">
               {{view App.CheckboxView checkedBinding="view.isManualDone" labelTranslate="admin.stackUpgrade.dialog.manualDone"}}
             </label>
diff --git a/ambari-web/app/templates/main/admin/stack_upgrade/upgrade_version_box.hbs b/ambari-web/app/templates/main/admin/stack_upgrade/upgrade_version_box.hbs
index 13dca9a..0772215 100644
--- a/ambari-web/app/templates/main/admin/stack_upgrade/upgrade_version_box.hbs
+++ b/ambari-web/app/templates/main/admin/stack_upgrade/upgrade_version_box.hbs
@@ -32,6 +32,9 @@
   {{#if view.isPatch}}
     <i class="glyphicon glyphicon-umbrella"></i>&nbsp;{{t common.patch}}
   {{/if}}
+  {{#if view.isService}}
+    <i class="glyphicon glyphicon-umbrella"></i>&nbsp;{{t common.service}}
+  {{/if}}
 </p>
 
 
diff --git a/ambari-web/app/templates/main/alerts.hbs b/ambari-web/app/templates/main/alerts.hbs
index a8cc513..40469d3 100644
--- a/ambari-web/app/templates/main/alerts.hbs
+++ b/ambari-web/app/templates/main/alerts.hbs
@@ -33,15 +33,15 @@
   <table class="table advanced-header-table table-hover alerts-table" id="alert-definitions-table">
     <thead>
     {{#view view.sortView classNames="label-row" contentBinding="view.filteredContent"}}
-      {{view view.parentView.nameSort class="first name-sorting"}}
-      {{view view.parentView.statusSort class="status-sorting"}}
+      {{view view.parentView.statusSort class="first status-sorting"}}
+      {{view view.parentView.nameSort class="name-sorting"}}
       {{view view.parentView.serviceSort class="service-sorting"}}
       {{view view.parentView.lastTriggeredSort class="last-triggred-sorting"}}
       {{view view.parentView.enabledSort class="enabled-sorting"}}
     {{/view}}
     <tr class="filter-row">
-      <th class="first">{{view view.nameFilterView class="name-filter"}}</th>
-      <th>{{view view.stateFilterView class="status-filter"}}</th>
+      <th class="first">{{view view.stateFilterView class="status-filter"}}</th>
+      <th>{{view view.nameFilterView class="name-filter"}}</th>
       <th>{{view view.serviceFilterView class="service-filter"}}</th>
       <th>{{view view.triggeredFilterView class="last-triggered-filter"}}</th>
       <th>{{view view.enabledFilterView class="enabled-filter"}}</th>
@@ -52,13 +52,13 @@
       {{#if view.pageContent}}
         {{#each alertDefinition in view.pageContent}}
           <tr>
-            <td class="first alert-name">
+            <td class="first alert-status">
+              {{view App.AlertDefinitionSummary contentBinding="alertDefinition"}}
+            </td>
+            <td class="alert-name">
               <span {{bindAttr title="alertDefinition.type"}} {{bindAttr class=":type-icon  alertDefinition.typeIconClass"}}></span>
               <a href="#" {{action "gotoAlertDetails" alertDefinition}}>{{alertDefinition.label}}</a>
             </td>
-            <td class="alert-status">
-              {{view App.AlertDefinitionSummary contentBinding="alertDefinition"}}
-            </td>
             <td class="alert-service">{{alertDefinition.serviceDisplayName}}</td>
             <td class="alert-time">
               <time class="timeago" {{bindAttr data-original-title="alertDefinition.lastTriggeredFormatted"}}>{{alertDefinition.lastTriggeredAgoFormatted}}</time>
diff --git a/ambari-web/app/templates/main/alerts/create_alert_notification.hbs b/ambari-web/app/templates/main/alerts/create_alert_notification.hbs
index 5b40bca..7ec5b1e 100644
--- a/ambari-web/app/templates/main/alerts/create_alert_notification.hbs
+++ b/ambari-web/app/templates/main/alerts/create_alert_notification.hbs
@@ -315,6 +315,18 @@
     {{/if}}
     {{! alert-notification Custom SNMP end }}
 
+    {{! alert-notification Alert Script }}
+    {{#if view.isAlertScriptMethodSelected}}
+    <div class="form-group">
+      <label class="control-label col-md-2">{{controller.inputFields.scriptDispatchProperty.label}}</label>
+
+      <div class="col-md-10">
+         {{view Em.TextField valueBinding="controller.inputFields.scriptDispatchProperty.value" class="form-control"}}
+      </div>
+    </div>
+    {{/if}}
+    {{! alert-notification Alert Script end}}
+
     {{! alert-notification custom properties }}
     {{#each customProperty in controller.inputFields.customProperties}}
       <div class="form-group">
diff --git a/ambari-web/app/templates/wizard/step1.hbs b/ambari-web/app/templates/wizard/step1.hbs
index 769aca3..5faf056 100644
--- a/ambari-web/app/templates/wizard/step1.hbs
+++ b/ambari-web/app/templates/wizard/step1.hbs
@@ -81,8 +81,10 @@
               {{! Public Repository radio }}
               <div {{bindAttr class=":col-sm-4 :radio :big-radio :public-radio :wizard-plain-text"}}>
                 {{view view.usePublicRepoRadioButton classNames="display-inline-block" labelIdentifier="use-public-repo"}}
-                {{#if networkIssuesExist}}
-                  <a id="public-disabled-link" class="display-inline-block" {{action "openPublicOptionDisabledWindow" target="view"}}>{{t installer.step1.selectUseRepoOptions.public.networkLost}}</a>
+                {{#if stackRepoUpdateLinkExists}}
+                  {{#if networkIssuesExist}}
+                    <a id="public-disabled-link" class="display-inline-block" {{action "openPublicOptionDisabledWindow" target="view"}}>{{t installer.step1.selectUseRepoOptions.public.networkLost}}</a>
+                  {{/if}}
                 {{/if}}
               </div>
               {{!--Local repo radio--}}
diff --git a/ambari-web/app/templates/wizard/step10.hbs b/ambari-web/app/templates/wizard/step10.hbs
index 3c1012e..fef02b4 100644
--- a/ambari-web/app/templates/wizard/step10.hbs
+++ b/ambari-web/app/templates/wizard/step10.hbs
@@ -56,7 +56,7 @@
 
 <div class="wizard-footer col-md-12">
   <div class="btn-area">
-    <button type="button" class="btn btn-success pull-right" {{bindAttr disabled="App.router.btnClickInProgress"}} {{action complete}}>
+    <button type="button" {{QAAttr "wizard-next"}} class="btn btn-success pull-right" {{bindAttr disabled="App.router.btnClickInProgress"}} {{action complete}}>
       {{#if App.router.nextBtnClickInProgress}}
         {{view App.SpinnerView tagName="span" classNames="service-button-spinner"}}
       {{/if}}
diff --git a/ambari-web/app/templates/wizard/step3.hbs b/ambari-web/app/templates/wizard/step3.hbs
index 2a96379..e00012c 100644
--- a/ambari-web/app/templates/wizard/step3.hbs
+++ b/ambari-web/app/templates/wizard/step3.hbs
@@ -96,7 +96,7 @@
                       </a>
                     </td>
                     <td class="step3-table-action" {{QAAttr "confirm-hosts-table-body-cell"}}>
-                      <a class="action" href="#" {{action remove target="view"}} {{bindAttr disabled="isBackDisabled"}} {{QAAttr "remove-host-button"}}>
+                      <a class="action" id="display-action" href="#" {{action remove target="view"}} {{bindAttr disabled="isBackDisabled"}} {{QAAttr "remove-host-button"}}>
                         <i class="glyphicon glyphicon-trash" {{translateAttr title="common.remove"}}></i>
                       </a>
                     </td>
diff --git a/ambari-web/app/templates/wizard/step3/step3_host_warnings_popup.hbs b/ambari-web/app/templates/wizard/step3/step3_host_warnings_popup.hbs
index 07ff31b..e234254 100644
--- a/ambari-web/app/templates/wizard/step3/step3_host_warnings_popup.hbs
+++ b/ambari-web/app/templates/wizard/step3/step3_host_warnings_popup.hbs
@@ -20,9 +20,7 @@
     <div id="host-warnings">
       <div class="notice">
         <span>{{t installer.step3.hostWarningsPopup.checks}} <b>{{view.warningsNotice}}</b>.<br>
-          {{t installer.step3.hostWarningsPopup.notice.beginning}}
-          <div class="code-snippet" {{QAAttr "host-cleanup-script"}}>{{t installer.step3.hostWarningsPopup.notice.command}}</div>
-          {{t installer.step3.hostWarningsPopup.notice.end}}</span>
+          {{t installer.step3.hostWarningsPopup.notice.beginning}}</span>
       </div>
       <div class="row">
         <form class="form-horizontal">
diff --git a/ambari-web/app/templates/wizard/step4.hbs b/ambari-web/app/templates/wizard/step4.hbs
index c688981..5a08250 100644
--- a/ambari-web/app/templates/wizard/step4.hbs
+++ b/ambari-web/app/templates/wizard/step4.hbs
@@ -34,17 +34,17 @@
         </tr>
         </thead>
         <tbody>
-        {{#each controller}}
+        {{#each service in controller}}
           {{#unless isHiddenOnSelectServicePage}}
-            <tr {{QAAttr "service-row"}} {{bindAttr class="isSelected:active isSelected:service-selected"}}>
-              <td {{QAAttr "service-name"}}>{{displayNameOnSelectServicePage}}</td>
-              <td {{QAAttr "service-version"}}>{{serviceVersionDisplay}}</td>
-              <td {{QAAttr "service-description"}}>{{{comments}}}</td>
+            <tr {{QAAttr "service-row"}} {{bindAttr class="service.isSelected:active service.isSelected:service-selected"}} {{action toggleCheckBox service target="view"}}>
+              <td {{QAAttr "service-name"}}>{{service.displayNameOnSelectServicePage}}</td>
+              <td {{QAAttr "service-version"}}>{{service.serviceVersionDisplay}}</td>
+              <td {{QAAttr "service-description"}}>{{{service.comments}}}</td>
               <td>
                 <div class="checkbox">
-                  {{view App.CheckboxView checkboxClassNamesBinding="serviceName" data-qa="toggle-service"
-                  disabledBinding="isDisabled"
-                  checkedBinding="isSelected"
+                  {{view App.CheckboxView checkboxClassNamesBinding="service.serviceName" data-qa="toggle-service"
+                  disabledBinding="service.isDisabled"
+                  checkedBinding="service.isSelected"
                   }}
                 </div>
               </td>
@@ -61,14 +61,14 @@
 <div class="wizard-footer col-md-12">
   <div class="btn-area">
     {{#unless view.parentView.controller.hideBackButton}}
-      <button type="button" class="btn btn-default pull-left installer-back-btn" {{bindAttr disabled="App.router.btnClickInProgress"}} {{action back}}>
+      <button type="button" {{QAAttr "wizard-back"}} class="btn btn-default pull-left installer-back-btn" {{bindAttr disabled="App.router.btnClickInProgress"}} {{action back}}>
         &larr; {{t common.back}}
         {{#if App.router.backBtnClickInProgress}}
           {{view App.SpinnerView tagName="span" classNames="service-button-spinner"}}
         {{/if}}
       </button>
     {{/unless}}
-    <button type="button" class="btn btn-success pull-right" {{bindAttr disabled="isSubmitDisabled"}} {{action submit target="controller"}}>
+    <button type="button" {{QAAttr "wizard-next"}} class="btn btn-success pull-right" {{bindAttr disabled="isSubmitDisabled"}} {{action submit target="controller"}}>
       {{#if App.router.nextBtnClickInProgress}}
         {{view App.SpinnerView tagName="span" classNames="service-button-spinner"}}
       {{/if}}
diff --git a/ambari-web/app/templates/wizard/step6.hbs b/ambari-web/app/templates/wizard/step6.hbs
index c06177e..6dfb5f1 100644
--- a/ambari-web/app/templates/wizard/step6.hbs
+++ b/ambari-web/app/templates/wizard/step6.hbs
@@ -95,13 +95,13 @@
 
 <div class="wizard-footer col-md-12">
   <div class="btn-area">
-    <button type="button" class="btn btn-default installer-back-btn" {{bindAttr disabled="App.router.btnClickInProgress"}} {{action back}}>
+    <button type="button" {{QAAttr "wizard-back"}} class="btn btn-default installer-back-btn" {{bindAttr disabled="App.router.btnClickInProgress"}} {{action back}}>
       &larr; {{t common.back}}
       {{#if App.router.backBtnClickInProgress}}
         {{view App.SpinnerView tagName="span" classNames="service-button-spinner"}}
       {{/if}}
     </button>
-    <button type="button" class="btn btn-success pull-right" {{bindAttr disabled="submitDisabled"}} {{action next}}>
+    <button type="button" {{QAAttr "wizard-next"}} class="btn btn-success pull-right" {{bindAttr disabled="submitDisabled"}} {{action next}}>
       {{#if App.router.nextBtnClickInProgress}}
         {{view App.SpinnerView tagName="span" classNames="service-button-spinner"}}
       {{/if}}
diff --git a/ambari-web/app/templates/wizard/step7.hbs b/ambari-web/app/templates/wizard/step7.hbs
index aecaf82..206c3cb 100644
--- a/ambari-web/app/templates/wizard/step7.hbs
+++ b/ambari-web/app/templates/wizard/step7.hbs
@@ -33,7 +33,7 @@
 
 <div class="wizard-footer col-md-12">
   <div class="btn-area">
-    <button type="button" class="btn btn-default installer-back-btn" {{bindAttr disabled="App.router.btnClickInProgress"}} {{action back}}>
+    <button type="button" {{QAAttr "wizard-back"}} class="btn btn-default installer-back-btn" {{bindAttr disabled="App.router.btnClickInProgress"}} {{action back}}>
       &larr; {{t common.back}}
       {{#if App.router.backBtnClickInProgress}}
         {{view App.SpinnerView tagName="span" classNames="service-button-spinner"}}
@@ -43,7 +43,7 @@
       {{#if App.supports.preInstallChecks}}
         <a class="btn btn-default" {{action runPreInstallChecks target="App.router.preInstallChecksController"}} {{bindAttr disabled="isSubmitDisabled"}}><i class="glyphicon glyphicon-exclamation"></i> {{t installer.step7.preInstallChecks}}</a>
       {{/if}}
-      <button type="button" class="btn btn-success" {{bindAttr disabled="isSubmitDisabled"}} {{action submit target="controller"}}>
+      <button type="button" class="btn btn-success" {{QAAttr "wizard-next"}} {{bindAttr disabled="isSubmitDisabled"}} {{action submit target="controller"}}>
         {{#if App.router.nextBtnClickInProgress}}
           {{view App.SpinnerView tagName="span" classNames="service-button-spinner"}}
         {{/if}}
diff --git a/ambari-web/app/templates/wizard/step8.hbs b/ambari-web/app/templates/wizard/step8.hbs
index f0e7b83..907b3d4 100644
--- a/ambari-web/app/templates/wizard/step8.hbs
+++ b/ambari-web/app/templates/wizard/step8.hbs
@@ -84,13 +84,13 @@
 
 <div class="wizard-footer col-md-12">
   <div class="btn-area">
-    <button type="button" class="btn btn-default pull-left installer-back-btn" {{bindAttr disabled="App.router.btnClickInProgress"}} {{action back}}>
+    <button type="button" {{QAAttr "wizard-back"}} class="btn btn-default pull-left installer-back-btn" {{bindAttr disabled="App.router.btnClickInProgress"}} {{action back}}>
       &larr; {{t common.back}}
       {{#if App.router.backBtnClickInProgress}}
         {{view App.SpinnerView tagName="span" classNames="service-button-spinner"}}
       {{/if}}
     </button>
-    <button type="button" class="btn btn-success pull-right"
+    <button type="button" {{QAAttr "wizard-next"}} class="btn btn-success pull-right"
        id="spinner" {{bindAttr disabled="controller.isSubmitDisabled"}} {{action submit target="controller"}}>
       {{#if App.router.nextBtnClickInProgress}}
         {{view App.SpinnerView tagName="span" classNames="service-button-spinner"}}
@@ -99,5 +99,10 @@
     </button>
     <button type="button" class="btn btn-info pull-right" {{action printReview target="view"}}>{{t common.print}}</button>
     <button type="button" {{bindAttr class=":btn :btn-primary :pull-right :mrm controller.showDownloadCsv::hide"}} {{action downloadCSV target="controller"}}>{{t admin.kerberos.wizard.step5.downloadCSV}}</button>
+    {{#unless App.router.clusterInstallCompleted}}
+      <button type="button" class="btn btn-success pull-right" {{action generateBlueprint target="controller"}}><i class="glyphicon glyphicon-download-alt"></i>&nbsp;
+        {{t common.generate.blueprint}}
+      </button>
+    {{/unless}}
   </div>
 </div>
diff --git a/ambari-web/app/templates/wizard/step9.hbs b/ambari-web/app/templates/wizard/step9.hbs
index 3b7d17f..5d63798 100644
--- a/ambari-web/app/templates/wizard/step9.hbs
+++ b/ambari-web/app/templates/wizard/step9.hbs
@@ -127,7 +127,7 @@
 </div>
 <div class="wizard-footer col-md-12">
     <div class="btn-area">
-      <button class="btn btn-success pull-right" {{bindAttr disabled="isNextButtonDisabled"}} {{action submit target="controller"}} {{QAAttr "wizard-next"}}>
+      <button class="btn btn-success pull-right" {{QAAttr "wizard-next"}} {{bindAttr disabled="isNextButtonDisabled"}} {{action submit target="controller"}} {{QAAttr "wizard-next"}}>
         {{#if App.router.nextBtnClickInProgress}}
           {{view App.SpinnerView tagName="span" classNames="service-button-spinner"}}
         {{/if}}
diff --git a/ambari-web/app/utils/ajax/ajax.js b/ambari-web/app/utils/ajax/ajax.js
index 2d157fe..f7d0914 100644
--- a/ambari-web/app/utils/ajax/ajax.js
+++ b/ambari-web/app/utils/ajax/ajax.js
@@ -231,7 +231,7 @@
   },
 
   'common.request.polling': {
-    'real': '/clusters/{clusterName}/requests/{requestId}?fields=tasks/Tasks/request_id,tasks/Tasks/command,tasks/Tasks/command_detail,tasks/Tasks/start_time,tasks/Tasks/end_time,tasks/Tasks/exit_code,tasks/Tasks/host_name,tasks/Tasks/id,tasks/Tasks/role,tasks/Tasks/status,tasks/Tasks/structured_out,Requests/*&tasks/Tasks/stage_id={stageId}',
+    'real': '/clusters/{clusterName}/requests/{requestId}?fields=tasks/Tasks/request_id,tasks/Tasks/command,tasks/Tasks/command_detail,tasks/Tasks/ops_display_name,tasks/Tasks/start_time,tasks/Tasks/end_time,tasks/Tasks/exit_code,tasks/Tasks/host_name,tasks/Tasks/id,tasks/Tasks/role,tasks/Tasks/status,tasks/Tasks/structured_out,Requests/*&tasks/Tasks/stage_id={stageId}',
     'mock': '/data/background_operations/host_upgrade_tasks.json'
   },
 
@@ -539,12 +539,14 @@
     'mock': '/data/alerts/alert_instances_history.json'
   },
   'background_operations.get_most_recent': {
-    'real': '/clusters/{clusterName}/requests?to=end&page_size={operationsCount}&fields=Requests',
+    'real': '/clusters/{clusterName}/requests?to=end&page_size={operationsCount}&fields=' +
+    'Requests/end_time,Requests/id,Requests/progress_percent,Requests/request_context,' +
+    'Requests/request_status,Requests/start_time,Requests/cluster_name&minimal_response=true',
     'mock': '/data/background_operations/list_on_start.json',
     'testInProduction': true
   },
   'background_operations.get_by_request': {
-    'real': '/clusters/{clusterName}/requests/{requestId}?fields=*,tasks/Tasks/request_id,tasks/Tasks/command,tasks/Tasks/command_detail,tasks/Tasks/host_name,tasks/Tasks/id,tasks/Tasks/role,tasks/Tasks/status&minimal_response=true',
+    'real': '/clusters/{clusterName}/requests/{requestId}?fields=*,tasks/Tasks/request_id,tasks/Tasks/command,tasks/Tasks/command_detail,tasks/Tasks/ops_display_name,tasks/Tasks/host_name,tasks/Tasks/id,tasks/Tasks/role,tasks/Tasks/status&minimal_response=true',
     'mock': '/data/background_operations/task_by_request{requestId}.json',
     'testInProduction': true
   },
@@ -1684,7 +1686,7 @@
     'mock': '/data/stack_versions/upgrade_item.json'
   },
   'admin.upgrade.service_checks': {
-    'real': '/clusters/{clusterName}/upgrades/{upgradeId}/upgrade_groups?upgrade_items/UpgradeItem/status=COMPLETED&upgrade_items/tasks/Tasks/status.in(FAILED,ABORTED,TIMEDOUT)&upgrade_items/tasks/Tasks/command=SERVICE_CHECK&fields=upgrade_items/tasks/Tasks/command_detail,upgrade_items/tasks/Tasks/status&minimal_response=true'
+    'real': '/clusters/{clusterName}/upgrades/{upgradeId}/upgrade_groups?upgrade_items/UpgradeItem/status=COMPLETED&upgrade_items/tasks/Tasks/status.in(FAILED,ABORTED,TIMEDOUT)&upgrade_items/tasks/Tasks/command=SERVICE_CHECK&fields=upgrade_items/tasks/Tasks/command_detail,tasks/Tasks/ops_display_name,upgrade_items/tasks/Tasks/status&minimal_response=true'
   },
   'admin.upgrade.update.options': {
     'real': '/clusters/{clusterName}/upgrades/{upgradeId}',
@@ -1707,6 +1709,7 @@
     'type': 'POST',
     'format': function (data) {
       return {
+        timeout : 600000,
         data: JSON.stringify({
           "Upgrade": {
             "repository_version": data.value,
@@ -1968,7 +1971,7 @@
     'mock': '/data/wizard/deploy/5_hosts/get_host_state.json'
   },
   'wizard.step9.load_log': {
-    'real': '/clusters/{cluster}/requests/{requestId}?fields=tasks/Tasks/command,tasks/Tasks/command_detail,tasks/Tasks/exit_code,tasks/Tasks/start_time,tasks/Tasks/end_time,tasks/Tasks/host_name,tasks/Tasks/id,tasks/Tasks/role,tasks/Tasks/status&minimal_response=true',
+    'real': '/clusters/{cluster}/requests/{requestId}?fields=tasks/Tasks/command,tasks/Tasks/command_detail,tasks/Tasks/ops_display_name,tasks/Tasks/exit_code,tasks/Tasks/start_time,tasks/Tasks/end_time,tasks/Tasks/host_name,tasks/Tasks/id,tasks/Tasks/role,tasks/Tasks/status&minimal_response=true',
     'mock': '/data/wizard/deploy/5_hosts/poll_{numPolls}.json',
     'format': function () {
       return {
@@ -2240,7 +2243,7 @@
   },
 
   'wizard.stacks_versions_definitions': {
-    'real': '/version_definitions?fields=VersionDefinition/stack_default,VersionDefinition/max_jdk,VersionDefinition/min_jdk,operating_systems/repositories/Repositories/*,operating_systems/OperatingSystems/*,VersionDefinition/stack_services,VersionDefinition/repository_version' +
+    'real': '/version_definitions?fields=VersionDefinition/stack_default,VersionDefinition/stack_repo_update_link_exists,VersionDefinition/max_jdk,VersionDefinition/min_jdk,operating_systems/repositories/Repositories/*,operating_systems/OperatingSystems/*,VersionDefinition/stack_services,VersionDefinition/repository_version' +
       '&VersionDefinition/show_available=true&VersionDefinition/stack_name={stackName}',
     'mock': '/data/wizard/stack/{stackName}_version_definitions.json'
   },
@@ -2695,6 +2698,10 @@
     'real': '/clusters/{clusterName}/hosts?fields=Hosts/cpu_count,Hosts/disk_info,Hosts/total_mem,Hosts/ip,Hosts/os_type,Hosts/os_arch,Hosts/public_host_name,host_components&minimal_response=true',
     'mock': ''
   },
+  'hosts.info.install': {
+    'real': '/hosts?Hosts/host_name.in({hostNames})&fields=Hosts/cpu_count,Hosts/disk_info,Hosts/total_mem,Hosts/ip,Hosts/os_type,Hosts/os_arch,Hosts/public_host_name&minimal_response=true',
+    'mock': ''
+  },
   'hosts.host_components.pre_load': {
     real: '',
     mock: '/data/hosts/HDP2/hosts.json',
diff --git a/ambari-web/app/utils/helper.js b/ambari-web/app/utils/helper.js
index 03a2e82..4867c65 100644
--- a/ambari-web/app/utils/helper.js
+++ b/ambari-web/app/utils/helper.js
@@ -694,10 +694,14 @@
    * @param {string} request_inputs
    * @return {string}
    */
-  commandDetail: function (command_detail, request_inputs) {
+  commandDetail: function (command_detail, request_inputs, ops_display_name) {
     var detailArr = command_detail.split(' ');
     var self = this;
     var result = '';
+    //if an optional operation display name has been specified in the service metainfo.xml
+    if (ops_display_name != null && ops_display_name.length > 0) {
+      result = result + ' ' + ops_display_name;
+    } else {
     detailArr.forEach( function(item) {
       // if the item has the pattern SERVICE/COMPONENT, drop the SERVICE part
       if (item.contains('/')) {
@@ -715,6 +719,7 @@
         result = result + ' ' + self.role(item, false);
       }
     });
+    }
 
     if (result.indexOf('Decommission:') > -1 || result.indexOf('Recommission:') > -1) {
       // for Decommission command, make sure the hostname is in lower case
diff --git a/ambari-web/app/utils/host_progress_popup.js b/ambari-web/app/utils/host_progress_popup.js
index c615cae..5bd02cd 100644
--- a/ambari-web/app/utils/host_progress_popup.js
+++ b/ambari-web/app/utils/host_progress_popup.js
@@ -548,7 +548,7 @@
       id: _task.Tasks.id,
       hostName: _task.Tasks.host_name,
       command: _task.Tasks.command.toLowerCase() === 'service_check' ? '' : _task.Tasks.command.toLowerCase(),
-      commandDetail: App.format.commandDetail(_task.Tasks.command_detail, _task.Tasks.request_inputs),
+      commandDetail: App.format.commandDetail(_task.Tasks.command_detail, _task.Tasks.request_inputs, _task.Tasks.ops_display_name),
       status: App.format.taskStatus(_task.Tasks.status),
       role: App.format.role(_task.Tasks.role, false),
       stderr: _task.Tasks.stderr,
diff --git a/ambari-web/app/views/common/assign_master_components_view.js b/ambari-web/app/views/common/assign_master_components_view.js
index 892bcae..001667a 100644
--- a/ambari-web/app/views/common/assign_master_components_view.js
+++ b/ambari-web/app/views/common/assign_master_components_view.js
@@ -49,6 +49,10 @@
 
   didInsertElement: function () {
     this.get('controller').loadStep();
+  },
+
+  willDestroyElement: function () {
+    this.get('controller').clearStepOnExit();
   }
 });
 
diff --git a/ambari-web/app/views/common/configs/widgets/slider_config_widget_view.js b/ambari-web/app/views/common/configs/widgets/slider_config_widget_view.js
index c7e810b..5c8ea78 100644
--- a/ambari-web/app/views/common/configs/widgets/slider_config_widget_view.js
+++ b/ambari-web/app/views/common/configs/widgets/slider_config_widget_view.js
@@ -113,7 +113,8 @@
     var configValue = this.get('config.value');
     var defaultGroupAttr = this.get('config.stackConfigProperty.valueAttributes');
     var groupAttr = this.get('configGroup') && defaultGroupAttr[this.get('configGroup.name')];
-    var boundary = (groupAttr && !Em.isNone(groupAttr[attribute])) ? groupAttr[attribute] : defaultGroupAttr[attribute];
+    var usedGroupAttr = (groupAttr && !Em.isNone(groupAttr[attribute])) ? groupAttr : defaultGroupAttr;
+    var boundary = usedGroupAttr[attribute];
 
     if (!this.get('referToSelectedGroup')) {
       if (attribute === 'minimum') {
@@ -126,6 +127,14 @@
         }
       }
     }
+    if (isNaN(boundary) && !isNaN(configValue)) {
+      if (attribute === 'minimum') {
+        return isNaN(usedGroupAttr['maximum']) ? configValue : Math.min(usedGroupAttr['maximum'], configValue).toString();
+      }
+      if (attribute === 'maximum') {
+        return isNaN(usedGroupAttr['minimum']) ? configValue : Math.max(usedGroupAttr['minimum'], configValue).toString();
+      }
+    }
     return boundary;
   },
   /**
@@ -251,12 +260,16 @@
     }
   },
 
+  mirrorValueObs: function () {
+    Em.run.once(this, 'mirrorValueObsOnce');
+  },
+
   /**
    * Check if <code>mirrorValue</code> was updated by user
    * Validate it. If value is correct, set it to slider and config.value
    * @method mirrorValueObs
    */
-  mirrorValueObs: function () {
+  mirrorValueObsOnce: function () {
     var mirrorValue = this.get('mirrorValue'),
       slider = this.get('slider'),
       min = this.get('minMirrorValue'),
diff --git a/ambari-web/app/views/common/host_progress_popup_body_view.js b/ambari-web/app/views/common/host_progress_popup_body_view.js
index 5cce8e7..057f8bc 100644
--- a/ambari-web/app/views/common/host_progress_popup_body_view.js
+++ b/ambari-web/app/views/common/host_progress_popup_body_view.js
@@ -532,7 +532,7 @@
    * @param {string} levelName
    * @method switchLevel
    */
-  switchLevel: function (levelName) {
+  switchLevel: function (levelName, isBackToLevel = false) {
     var dataSourceController = this.get('controller.dataSourceController');
     var args = [].slice.call(arguments);
     this.get('hostComponentLogs').clear();
@@ -543,7 +543,9 @@
       levelInfo.set('name', levelName);
       if (levelName === 'HOSTS_LIST') {
         this.set('isLevelLoaded', dataSourceController.requestMostRecent());
-        this.set('hostCategory', this.get('categories').findProperty('value', 'all'));
+        if (!isBackToLevel) {
+          this.set('hostCategory', this.get('categories').findProperty('value', 'all'));
+        }
       }
       else {
         if (levelName === 'TASK_DETAILS') {
@@ -552,12 +554,16 @@
         }
         else {
           if (levelName === 'REQUESTS_LIST') {
-            this.set('serviceCategory', this.get('categories').findProperty('value', 'all'));
+            if (!isBackToLevel) {
+              this.set('serviceCategory', this.get('categories').findProperty('value', 'all'));
+            }
             this.get('controller.hosts').clear();
             dataSourceController.requestMostRecent();
           }
           else {
-            this.set('taskCategory', this.get('categories').findProperty('value', 'all'));
+            if (!isBackToLevel) {
+              this.set('taskCategory', this.get('categories').findProperty('value', 'all'));
+            }
           }
         }
       }
@@ -630,7 +636,7 @@
     this.set("openedTaskId", 0);
     this.set("parentView.isLogWrapHidden", true);
     this.set("parentView.isTaskListHidden", false);
-    this.switchLevel("TASKS_LIST");
+    this.switchLevel("TASKS_LIST", true);
   },
 
   /**
@@ -643,7 +649,7 @@
     this.set("parentView.isTaskListHidden", true);
     this.get("controller").set("popupHeaderName", this.get("controller.serviceName"));
     this.get("controller").set("operationInfo", this.get('controller.servicesInfo').findProperty('name', this.get('controller.serviceName')));
-    this.switchLevel("HOSTS_LIST");
+    this.switchLevel("HOSTS_LIST", true);
   },
 
   /**
@@ -659,7 +665,7 @@
     this.set("parentView.isLogWrapHidden", true);
     this.set("hosts", null);
     this.get("controller").setBackgroundOperationHeader(false);
-    this.switchLevel("REQUESTS_LIST");
+    this.switchLevel("REQUESTS_LIST", true);
   },
 
   /**
diff --git a/ambari-web/app/views/common/quick_view_link_view.js b/ambari-web/app/views/common/quick_view_link_view.js
index dfaf953..47958f4 100644
--- a/ambari-web/app/views/common/quick_view_link_view.js
+++ b/ambari-web/app/views/common/quick_view_link_view.js
@@ -187,7 +187,7 @@
     if (!Em.isNone(quickLinksConfig)) {
       var protocolConfig = Em.get(quickLinksConfig, 'protocol');
       var checks = Em.get(protocolConfig, 'checks');
-      var sites = ['core-site', 'hdfs-site'];
+      var sites = ['core-site', 'hdfs-site', 'admin-properties'];
       if (checks) {
         checks.forEach(function (check) {
           var protocolConfigSiteProp = Em.get(check, 'site');
@@ -359,6 +359,11 @@
           host = hostObj.Hosts.public_host_name;
         }
       }
+    } else if (serviceName === 'RANGER') {
+      var siteConfigs = this.get('configProperties').findProperty('type', 'admin-properties').properties;
+      if (siteConfigs['policymgr_external_url']) {
+        host = siteConfigs['policymgr_external_url'].split('://')[1].split(':')[0];
+      }
     }
 
     var linkPort = this.setPort(Em.get(link, 'port'), protocol, configProperties);
@@ -404,24 +409,20 @@
       var quickLinks = [];
       var configProperties = this.get('configProperties');
       var protocol = this.setProtocol(configProperties, quickLinksConfig.get('protocol'));
-      var publicHostName = hosts[0].publicHostName;
 
       var links = Em.get(quickLinksConfig, 'links');
       links.forEach(function (link) {
         var componentName = link.component_name;
         var hostNameForComponent = hosts.findProperty('componentName',componentName);
         if (hostNameForComponent) {
-            publicHostName = hostNameForComponent.publicHostName;
+          var publicHostName = hostNameForComponent.publicHostName;
           if (link.protocol) {
             protocol = this.setProtocol(configProperties, link.protocol);
           }
-        }
-        if (componentName && !hostNameForComponent) {
-          return;
-        }
-        var newItem = this.getHostLink(link, publicHostName, protocol, configProperties, response); //quicklink generated for the hbs template
-        if (!Em.isNone(newItem)) {
-          quickLinks.push(newItem);
+          var newItem = this.getHostLink(link, publicHostName, protocol, configProperties, response); //quicklink generated for the hbs template
+          if (!Em.isNone(newItem)) {
+            quickLinks.push(newItem);
+          }
         }
       }, this);
       this.set('quickLinks', quickLinks);
@@ -637,9 +638,6 @@
             }
           default:
             hosts = hosts.concat(componentHosts);
-            if(hosts.length < 1 && this.getWithDefault('content.hostComponents', []).someProperty('isMaster')) {
-              hosts = this.findHosts(this.get('content.hostComponents').findProperty('isMaster').get('componentName'), response);
-            }
             break;
         }
       }, this);
diff --git a/ambari-web/app/views/main/admin.js b/ambari-web/app/views/main/admin.js
index 0fa84e8..509f380 100644
--- a/ambari-web/app/views/main/admin.js
+++ b/ambari-web/app/views/main/admin.js
@@ -46,7 +46,7 @@
         disabled: App.get('upgradeInProgress') || App.get('upgradeHolding')
       });
     }
-    if (App.isAuthorized('SERVICE.START_STOP, CLUSTER.MODIFY_CONFIGS') || (App.get('upgradeInProgress') || App.get('upgradeHolding'))) {
+    if ((App.isAuthorized('SERVICE.START_STOP, CLUSTER.MODIFY_CONFIGS') && App.isAuthorized('SERVICE.MANAGE_AUTO_START, CLUSTER.MANAGE_AUTO_START')) || (App.get('upgradeInProgress') || App.get('upgradeHolding'))) {
       if (App.supports.serviceAutoStart) {
         items.push({
           name: 'serviceAutoStart',
diff --git a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
index 081d7cd..f102402 100644
--- a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
+++ b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
@@ -73,6 +73,8 @@
 
   isPatch: Em.computed.equal('content.type', 'PATCH'),
 
+  isService: Em.computed.equal('content.type', 'SERVICE'),
+
   /**
    * @type {boolean}
    */
diff --git a/ambari-web/app/views/main/menu.js b/ambari-web/app/views/main/menu.js
index 6e79aba..feb3baf 100644
--- a/ambari-web/app/views/main/menu.js
+++ b/ambari-web/app/views/main/menu.js
@@ -115,7 +115,7 @@
             href: router.urlFor('main.admin.adminKerberos')
           });
         }
-        if (App.isAuthorized('SERVICE.START_STOP, CLUSTER.MODIFY_CONFIGS') || upg) {
+        if ((App.isAuthorized('SERVICE.START_STOP, CLUSTER.MODIFY_CONFIGS') && App.isAuthorized('SERVICE.MANAGE_AUTO_START, CLUSTER.MANAGE_AUTO_START')) || upg) {
           if (App.supports.serviceAutoStart) {
             categories.push({
               name: 'serviceAutoStart',
diff --git a/ambari-web/app/views/main/service/item.js b/ambari-web/app/views/main/service/item.js
index ac736a6..e25ade1 100644
--- a/ambari-web/app/views/main/service/item.js
+++ b/ambari-web/app/views/main/service/item.js
@@ -239,7 +239,8 @@
         });
       }
 
-      self.addActionMap().filterProperty('service', serviceName).forEach(function(item) {
+      if(App.isAuthorized('HOST.ADD_DELETE_COMPONENTS')){
+        self.addActionMap().filterProperty('service', serviceName).forEach(function(item) {
         if (App.get('components.addableToHost').contains(item.component)) {
 
           var isEnabled = App.HostComponent.find().filterProperty('componentName', item.component).length < App.get('allHostNames.length');
@@ -255,7 +256,8 @@
 
           options.push(item);
         }
-      });
+       });
+      }
 
       allMasters.forEach(function(master) {
         var component = App.StackServiceComponent.find(master);
diff --git a/ambari-web/app/views/main/service/reassign_view.js b/ambari-web/app/views/main/service/reassign_view.js
index 6885726..c7c1228 100644
--- a/ambari-web/app/views/main/service/reassign_view.js
+++ b/ambari-web/app/views/main/service/reassign_view.js
@@ -49,10 +49,6 @@
     data.items.forEach(function (item) {
       hosts[item.Hosts.host_name] = {
         name: item.Hosts.host_name,
-        cpu: item.Hosts.cpu_count,
-        memory: item.Hosts.total_mem,
-        disk_info: item.Hosts.disk_info,
-        maintenance_state: item.Hosts.maintenance_state,
         bootStatus: "REGISTERED",
         isInstalled: true
       };
diff --git a/ambari-web/app/views/wizard/step4_view.js b/ambari-web/app/views/wizard/step4_view.js
index 137b9f5..928d46c 100644
--- a/ambari-web/app/views/wizard/step4_view.js
+++ b/ambari-web/app/views/wizard/step4_view.js
@@ -21,6 +21,11 @@
 
 App.WizardStep4View = Em.View.extend({
 
-  templateName: require('templates/wizard/step4')
+  templateName: require('templates/wizard/step4'),
 
+  toggleCheckBox: function(event) {
+    if (event.context.get('isDisabled')) { return; }
+    var isSelected = event.context.get('isSelected');
+    event.context.set('isSelected', !isSelected);
+  }
 });
diff --git a/ambari-web/app/views/wizard/step9/hostLogPopupBody_view.js b/ambari-web/app/views/wizard/step9/hostLogPopupBody_view.js
index d983252..b951f1c 100644
--- a/ambari-web/app/views/wizard/step9/hostLogPopupBody_view.js
+++ b/ambari-web/app/views/wizard/step9/hostLogPopupBody_view.js
@@ -136,7 +136,7 @@
         taskInfo.set('id', _task.Tasks.id);
         taskInfo.set('requestId', _task.Tasks.request_id);
         taskInfo.set('command', _task.Tasks.command.toLowerCase() === 'service_check' ? '' : _task.Tasks.command.toLowerCase());
-        taskInfo.set('commandDetail', App.format.commandDetail(_task.Tasks.command_detail, _task.Tasks.request_inputs));
+        taskInfo.set('commandDetail', App.format.commandDetail(_task.Tasks.command_detail, _task.Tasks.request_inputs, _task.Tasks.ops_display_name));
         taskInfo.set('status', App.format.taskStatus(_task.Tasks.status));
         taskInfo.set('role', App.format.role(_task.Tasks.role, false));
         taskInfo.set('stderr', _task.Tasks.stderr);
diff --git a/ambari-web/pom.xml b/ambari-web/pom.xml
index 1c6c0c6..d7a1d18 100644
--- a/ambari-web/pom.xml
+++ b/ambari-web/pom.xml
@@ -126,29 +126,34 @@
       <plugin>
         <groupId>com.github.eirslett</groupId>
         <artifactId>frontend-maven-plugin</artifactId>
-        <version>1.3</version>
+        <version>1.4</version>
         <configuration>
           <nodeVersion>v4.5.0</nodeVersion>
-          <npmVersion>2.15.0</npmVersion>
+          <yarnVersion>v0.23.2</yarnVersion>
           <workingDirectory>${basedir}</workingDirectory>
           <npmInheritsProxyConfigFromMaven>false</npmInheritsProxyConfigFromMaven>
+          <!-- setting npm_config_tmp environment variable is a workaround for 
+               https://github.com/Medium/phantomjs/issues/673 -->
+          <environmentVariables>
+            <npm_config_tmp>/tmp/npm_config_tmp</npm_config_tmp>
+          </environmentVariables>
         </configuration>
         <executions>
           <execution>
-            <id>install node and npm</id>
+            <id>install node and yarn</id>
             <phase>generate-sources</phase>
             <goals>
-              <goal>install-node-and-npm</goal>
+              <goal>install-node-and-yarn</goal>
             </goals>
           </execution>
           <execution>
-            <id>npm install</id>
+            <id>yarn install</id>
             <phase>generate-sources</phase>
             <goals>
-              <goal>npm</goal>
+              <goal>yarn</goal>
             </goals>
             <configuration>
-              <arguments>install</arguments>
+              <arguments>install --ignore-engines --pure-lockfile</arguments>
             </configuration>
           </execution>
         </executions>
@@ -268,6 +273,7 @@
             <exclude>node_modules/**</exclude>
             <exclude>node/**</exclude>
             <exclude>npm-debug.log</exclude>
+            <exclude>yarn.lock</exclude>
           </excludes>
         </configuration>
         <executions>
diff --git a/ambari-web/test/controllers/global/background_operations_test.js b/ambari-web/test/controllers/global/background_operations_test.js
index 8d982bb..4a8235c 100644
--- a/ambari-web/test/controllers/global/background_operations_test.js
+++ b/ambari-web/test/controllers/global/background_operations_test.js
@@ -227,12 +227,7 @@
             Requests: {
               id: 1,
               request_context: '',
-              task_count: 1,
-              aborted_task_count: 0,
-              completed_task_count: 0,
-              failed_task_count: 0,
-              timed_out_task_count: 0,
-              queued_task_count: 0
+              request_status: 'IN_PROGRESS'
             }
           }
         ]
@@ -336,178 +331,6 @@
     });
   });
 
-  describe('#isRequestRunning()', function () {
-    var testCases = [
-      {
-        title: 'Counters are missing',
-        request: {
-          Requests: {}
-        },
-        result: false
-      },
-      {
-        title: 'Request has zero tasks',
-        request: {
-          Requests: {
-            task_count: 0,
-            aborted_task_count: 0,
-            completed_task_count: 0,
-            failed_task_count: 0,
-            timed_out_task_count: 0,
-            queued_task_count: 0
-          }
-        },
-        result: false
-      },
-      {
-        title: 'One task in running status',
-        request: {
-          Requests: {
-            task_count: 1,
-            aborted_task_count: 0,
-            completed_task_count: 0,
-            failed_task_count: 0,
-            timed_out_task_count: 0,
-            queued_task_count: 0
-          }
-        },
-        result: true
-      },
-      {
-        title: 'One task in queued status',
-        request: {
-          Requests: {
-            task_count: 1,
-            aborted_task_count: 0,
-            completed_task_count: 0,
-            failed_task_count: 0,
-            timed_out_task_count: 0,
-            queued_task_count: 1
-          }
-        },
-        result: true
-      },
-      {
-        title: 'One task in aborted status',
-        request: {
-          Requests: {
-            task_count: 1,
-            aborted_task_count: 1,
-            completed_task_count: 0,
-            failed_task_count: 0,
-            timed_out_task_count: 0,
-            queued_task_count: 0
-          }
-        },
-        result: false
-      },
-      {
-        title: 'One task in completed status',
-        request: {
-          Requests: {
-            task_count: 1,
-            aborted_task_count: 0,
-            completed_task_count: 1,
-            failed_task_count: 0,
-            timed_out_task_count: 0,
-            queued_task_count: 0
-          }
-        },
-        result: false
-      },
-      {
-        title: 'One task in failed status',
-        request: {
-          Requests: {
-            task_count: 1,
-            aborted_task_count: 0,
-            completed_task_count: 0,
-            failed_task_count: 1,
-            timed_out_task_count: 0,
-            queued_task_count: 0
-          }
-        },
-        result: false
-      },
-      {
-        title: 'One task in timed out status',
-        request: {
-          Requests: {
-            task_count: 1,
-            aborted_task_count: 0,
-            completed_task_count: 0,
-            failed_task_count: 0,
-            timed_out_task_count: 1,
-            queued_task_count: 0
-          }
-        },
-        result: false
-      },
-      {
-        title: 'One task in timed out status and the second one in running',
-        request: {
-          Requests: {
-            task_count: 2,
-            aborted_task_count: 0,
-            completed_task_count: 0,
-            failed_task_count: 0,
-            timed_out_task_count: 1,
-            queued_task_count: 0
-          }
-        },
-        result: true
-      },
-      {
-        title: 'One task in each status',
-        request: {
-          Requests: {
-            task_count: 5,
-            aborted_task_count: 1,
-            completed_task_count: 1,
-            failed_task_count: 1,
-            timed_out_task_count: 1,
-            queued_task_count: 1
-          }
-        },
-        result: true
-      },
-      {
-        title: 'One task in each status except queued',
-        request: {
-          Requests: {
-            task_count: 5,
-            aborted_task_count: 1,
-            completed_task_count: 1,
-            failed_task_count: 1,
-            timed_out_task_count: 1,
-            queued_task_count: 0
-          }
-        },
-        result: true
-      },
-      {
-        title: 'No tasks in running status',
-        request: {
-          Requests: {
-            task_count: 4,
-            aborted_task_count: 1,
-            completed_task_count: 1,
-            failed_task_count: 1,
-            timed_out_task_count: 1,
-            queued_task_count: 0
-          }
-        },
-        result: false
-      }
-    ];
-
-    testCases.forEach(function (test) {
-      it(test.title, function () {
-        expect(controller.isRequestRunning(test.request)).to.eql(test.result);
-      });
-    });
-  });
-
   describe('#isOneHost()', function () {
     var testCases = [
       {
diff --git a/ambari-web/test/controllers/installer_test.js b/ambari-web/test/controllers/installer_test.js
index 94af88e..bc91a8e 100644
--- a/ambari-web/test/controllers/installer_test.js
+++ b/ambari-web/test/controllers/installer_test.js
@@ -736,10 +736,6 @@
     it ('Should return hosts', function() {
       var hosts = {
         'h1': {
-          disk_info: Em.A([{
-            available: 1,
-            size: 10
-          }]),
           hostComponents: Em.A([])
         }
       };
@@ -765,14 +761,6 @@
       var res = JSON.parse(JSON.stringify(installerController.get('allHosts')));
       expect(res).to.eql([
         {
-          "diskInfo": [
-            {
-              "available": 1,
-              "size": 10
-            }
-          ],
-          "diskTotal": 0.0000095367431640625,
-          "diskFree": 9.5367431640625e-7,
           "hostComponents": [
             {
               "componentName": "component",
diff --git a/ambari-web/test/controllers/main/admin/highAvailability/journalNode/step1_controller_test.js b/ambari-web/test/controllers/main/admin/highAvailability/journalNode/step1_controller_test.js
index 17f5ed2..9ae300c 100644
--- a/ambari-web/test/controllers/main/admin/highAvailability/journalNode/step1_controller_test.js
+++ b/ambari-web/test/controllers/main/admin/highAvailability/journalNode/step1_controller_test.js
@@ -199,9 +199,9 @@
       expect(controller.updateComponent.calledWith('C1')).to.be.true;
     });
 
-    it('isLoaded should be true', function() {
+    it('isRecommendationsLoaded should be true', function() {
       controller.loadStepCallback([], controller);
-      expect(controller.get('isLoaded')).to.be.true;
+      expect(controller.get('isRecommendationsLoaded')).to.be.true;
     });
   });
 
diff --git a/ambari-web/test/controllers/main/alerts/manage_alert_notifications_controller_test.js b/ambari-web/test/controllers/main/alerts/manage_alert_notifications_controller_test.js
index 31da561..0d58afa 100644
--- a/ambari-web/test/controllers/main/alerts/manage_alert_notifications_controller_test.js
+++ b/ambari-web/test/controllers/main/alerts/manage_alert_notifications_controller_test.js
@@ -336,6 +336,9 @@
         port: {
           value: ''
         },
+        scriptDispatchProperty:{
+          value: ''
+        },
         customProperties: [
           {name: 'customName', value: 'customValue1', defaultValue: 'customValue1'},
           {name: 'customName2', value: 'customValue1', defaultValue: 'customValue1'}
@@ -401,6 +404,9 @@
           value: 'test1@test.test, test2@test.test'
         },
         port: {},
+        scriptDispatchProperty:{
+          value: ''
+        },
         customProperties: [
           {name: 'customName', value: 'customValue', defaultValue: 'customValue'}
         ]
@@ -495,6 +501,9 @@
         port: {
           value: ''
         },
+        scriptDispatchProperty:{
+          value: ''
+        },
         customProperties: [
           {name: 'customName', value: 'customValue1', defaultValue: 'customValue1'},
           {name: 'customName2', value: 'customValue1', defaultValue: 'customValue1'}
@@ -556,6 +565,9 @@
         port: {
           value: 161
         },
+        scriptDispatchProperty:{
+          value: ''
+        },
         customProperties: [
           {name: 'customName', value: 'customValue', defaultValue: 'customValue'}
         ]
@@ -649,6 +661,9 @@
         port: {
           value: ''
         },
+        scriptDispatchProperty:{
+          value: ''
+        },
         customProperties: [
           {name: 'customName', value: 'customValue1', defaultValue: 'customValue1'},
           {name: 'customName2', value: 'customValue1', defaultValue: 'customValue1'}
@@ -708,12 +723,160 @@
         port: {
           value: 161
         },
+        scriptDispatchProperty:{
+          value: ''
+        },
         customProperties: [
           {name: 'customName', value: 'customValue', defaultValue: 'customValue'}
         ]
       }));
 
-    })
+    });
+
+    it("should map properties from selectedAlertNotification to inputFields - ALERT_SCRIPT", function () {
+
+          controller.set('selectedAlertNotification', Em.Object.create({
+            name: 'test_alert_script',
+            global: true,
+            description: 'test_description',
+            groups: ['test1', 'test2'],
+            type: 'ALERT_SCRIPT',
+            alertStates: ['OK', 'UNKNOWN'],
+            properties: {
+              'ambari.dispatch-property.script': "com.mycompany.dispatch.syslog.script",
+              'customName': 'customValue'
+            }
+          }));
+
+          controller.set('inputFields', Em.Object.create({
+            name: {
+              value: ''
+            },
+            groups: {
+              value: []
+            },
+            global: {
+              value: false
+            },
+            allGroups: {
+              value: false
+            },
+            method: {
+              value: ''
+            },
+            email: {
+              value: ''
+            },
+            severityFilter: {
+              value: []
+            },
+            description: {
+              value: ''
+            },
+            SMTPServer: {
+              value: ''
+            },
+            SMTPPort: {
+              value: ''
+            },
+            SMTPUseAuthentication: {
+              value: ''
+            },
+            SMTPUsername: {
+              value: ''
+            },
+            SMTPPassword: {
+              value: ''
+            },
+            retypeSMTPPassword: {
+              value: ''
+            },
+            SMTPSTARTTLS: {
+              value: ''
+            },
+            emailFrom: {
+              value: ''
+            },
+            version: {
+              value: ''
+            },
+            OIDs: {
+              value: ''
+            },
+            community: {
+              value: ''
+            },
+            host: {
+              value: ''
+            },
+            port: {
+              value: ''
+            },
+            scriptDispatchProperty: {
+              value: ''
+            },
+            customProperties: [
+              {name: 'customName', value: 'customValue1', defaultValue: 'customValue1'},
+              {name: 'customName2', value: 'customValue1', defaultValue: 'customValue1'}
+            ]
+          }));
+
+          controller.fillEditCreateInputs();
+
+          expect(JSON.stringify(controller.get('inputFields'))).to.equal(JSON.stringify({
+            name: {
+              value: 'test_alert_script'
+            },
+            groups: {
+              value: ['test1', 'test2']
+            },
+            global: {
+              value: true,
+              disabled: true
+            },
+            allGroups: {
+              value: 'all'
+            },
+            method: {
+              value: 'Alert Script'
+            },
+            email: {
+              value: ''
+            },
+            severityFilter: {
+              value: ['OK', 'UNKNOWN']
+            },
+            description: {
+              value: 'test_description'
+            },
+            SMTPServer: {},
+            SMTPPort: {},
+            SMTPUseAuthentication: {
+              value: true
+            },
+            SMTPUsername: {},
+            SMTPPassword: {},
+            retypeSMTPPassword: {},
+            SMTPSTARTTLS: {
+              value: true
+            },
+            emailFrom: {},
+            version: {},
+            OIDs: {},
+            community: {},
+            host: {
+              value: ''
+            },
+            port: {},
+            scriptDispatchProperty: {
+               value: 'com.mycompany.dispatch.syslog.script'
+            },
+            customProperties: [
+              {name: 'customName', value: 'customValue', defaultValue: 'customValue'}
+            ]
+          }));
+
+        });
   });
 
   describe("#showCreateEditPopup()", function () {
diff --git a/ambari-web/test/controllers/main/service/add_controller_test.js b/ambari-web/test/controllers/main/service/add_controller_test.js
index a52c38f..1119176 100644
--- a/ambari-web/test/controllers/main/service/add_controller_test.js
+++ b/ambari-web/test/controllers/main/service/add_controller_test.js
@@ -167,25 +167,7 @@
   describe('#loadHostsSuccessCallback', function () {
 
     it('should load hosts to local db and model', function () {
-      var diskInfo = [
-          {
-            available: '600000',
-            used: '400000',
-            percent: '40%',
-            size: '10000000',
-            type: 'ext4',
-            mountpoint: '/'
-          },
-          {
-            available: '500000',
-            used: '300000',
-            percent: '50%',
-            size: '6000000',
-            type: 'ext4',
-            mountpoint: '/'
-          }
-        ],
-        hostComponents = [
+      var hostComponents = [
           [
             {
               HostRoles: {
@@ -219,31 +201,13 @@
           items: [
             {
               Hosts: {
-                cpu_count: 1,
-                disk_info: [
-                  diskInfo[0]
-                ],
                 host_name: 'h0',
-                ip: '10.1.1.0',
-                os_arch: 'x86_64',
-                os_type: 'centos6',
-                total_mem: 4194304,
-                maintenance_state: 'ON'
               },
               host_components: hostComponents[0]
             },
             {
               Hosts: {
-                cpu_count: 2,
-                disk_info: [
-                  diskInfo[1]
-                ],
-                host_name: 'h1',
-                ip: '10.1.1.1',
-                os_arch: 'x86',
-                os_type: 'centos5',
-                total_mem: 3145728,
-                maintenance_state: 'OFF'
+                host_name: 'h1'
               },
               host_components: hostComponents[1]
             }
@@ -252,29 +216,15 @@
         expected = {
           h0: {
             name: 'h0',
-            cpu: 1,
-            memory: 4194304,
-            disk_info: [diskInfo[0]],
-            osType: 'centos6',
-            osArch: 'x86_64',
-            ip: '10.1.1.0',
             bootStatus: 'REGISTERED',
             isInstalled: true,
-            maintenance_state: 'ON',
             hostComponents: hostComponents[0],
             id: 0
           },
           h1: {
             name: 'h1',
-            cpu: 2,
-            memory: 3145728,
-            disk_info: [diskInfo[1]],
-            osType: 'centos5',
-            osArch: 'x86',
-            ip: '10.1.1.1',
             bootStatus: 'REGISTERED',
             isInstalled: true,
-            maintenance_state: 'OFF',
             hostComponents: hostComponents[1],
             id: 1
           }
diff --git a/ambari-web/test/controllers/wizard/step5_test.js b/ambari-web/test/controllers/wizard/step5_test.js
index 3c9048d..57c33b2 100644
--- a/ambari-web/test/controllers/wizard/step5_test.js
+++ b/ambari-web/test/controllers/wizard/step5_test.js
@@ -94,56 +94,86 @@
 
   });
 
-  describe('#renderHostInfo', function () {
+  describe('#loadWizardHostsSuccessCallback', function () {
 
     var tests = Em.A([
       {
-        hosts: {
-          h1: {memory: 4, cpu: 1, name: 'host1', bootStatus: 'INIT'},
-          h2: {memory: 3, cpu: 1, name: 'host2', bootStatus: 'INIT'},
-          h3: {memory: 2, cpu: 1, name: 'host3', bootStatus: 'INIT'},
-          h4: {memory: 1, cpu: 1, name: 'host4', bootStatus: 'INIT'}
+        dbHosts: {
+          host1: {bootStatus: 'INIT'},
+          host2: {bootStatus: 'INIT'},
+          host3: {bootStatus: 'INIT'},
+          host4: {bootStatus: 'INIT'}
         },
+        hosts: [
+          {Hosts: {total_mem: 4, cpu_count: 1, host_name: 'host1', bootStatus: 'INIT'}},
+          {Hosts: {total_mem: 3, cpu_count: 1, host_name: 'host2', bootStatus: 'INIT'}},
+          {Hosts: {total_mem: 2, cpu_count: 1, host_name: 'host3', bootStatus: 'INIT'}},
+          {Hosts: {total_mem: 1, cpu_count: 1, host_name: 'host4', bootStatus: 'INIT'}}
+        ],
         m: 'no one host is REGISTERED',
         e: []
       },
       {
-        hosts: {
-          h1: {memory: 4, cpu: 1, name: 'host1', bootStatus: 'REGISTERED'},
-          h2: {memory: 3, cpu: 1, name: 'host2', bootStatus: 'REGISTERED'},
-          h3: {memory: 2, cpu: 1, name: 'host3', bootStatus: 'REGISTERED'},
-          h4: {memory: 1, cpu: 1, name: 'host4', bootStatus: 'REGISTERED'}
+        dbHosts: {
+          host1: {bootStatus: 'REGISTERED'},
+          host2: {bootStatus: 'REGISTERED'},
+          host3: {bootStatus: 'REGISTERED'},
+          host4: {bootStatus: 'REGISTERED'}
         },
+        hosts: [
+          {Hosts: {total_mem: 4, cpu_count: 1, host_name: 'host1', bootStatus: 'REGISTERED'}},
+          {Hosts: {total_mem: 3, cpu_count: 1, host_name: 'host2', bootStatus: 'REGISTERED'}},
+          {Hosts: {total_mem: 2, cpu_count: 1, host_name: 'host3', bootStatus: 'REGISTERED'}},
+          {Hosts: {total_mem: 1, cpu_count: 1, host_name: 'host4', bootStatus: 'REGISTERED'}}
+        ],
         m: 'all hosts are REGISTERED, memory',
         e: ['host1', 'host2', 'host3', 'host4']
       },
       {
-        hosts: {
-          h1: {memory: 1, cpu: 4, name: 'host1', bootStatus: 'REGISTERED'},
-          h2: {memory: 1, cpu: 3, name: 'host2', bootStatus: 'REGISTERED'},
-          h3: {memory: 1, cpu: 2, name: 'host3', bootStatus: 'REGISTERED'},
-          h4: {memory: 1, cpu: 1, name: 'host4', bootStatus: 'REGISTERED'}
+        dbHosts: {
+          host1: {bootStatus: 'REGISTERED'},
+          host2: {bootStatus: 'REGISTERED'},
+          host3: {bootStatus: 'REGISTERED'},
+          host4: {bootStatus: 'REGISTERED'}
         },
+        hosts: [
+          {Hosts: {total_mem: 1, cpu_count: 4, host_name: 'host1', bootStatus: 'REGISTERED'}},
+          {Hosts: {total_mem: 1, cpu_count: 3, host_name: 'host2', bootStatus: 'REGISTERED'}},
+          {Hosts: {total_mem: 1, cpu_count: 2, host_name: 'host3', bootStatus: 'REGISTERED'}},
+          {Hosts: {total_mem: 1, cpu_count: 1, host_name: 'host4', bootStatus: 'REGISTERED'}}
+        ],
         m: 'all hosts are REGISTERED, cpu',
         e: ['host1', 'host2', 'host3', 'host4']
       },
       {
-        hosts: {
-          h1: {memory: 1, cpu: 1, name: 'host4', bootStatus: 'REGISTERED'},
-          h2: {memory: 1, cpu: 1, name: 'host2', bootStatus: 'REGISTERED'},
-          h3: {memory: 1, cpu: 1, name: 'host3', bootStatus: 'REGISTERED'},
-          h4: {memory: 1, cpu: 1, name: 'host1', bootStatus: 'REGISTERED'}
+        dbHosts: {
+          host1: {bootStatus: 'REGISTERED'},
+          host2: {bootStatus: 'REGISTERED'},
+          host3: {bootStatus: 'REGISTERED'},
+          host4: {bootStatus: 'REGISTERED'}
         },
+        hosts: [
+          {Hosts: {total_mem: 1, cpu_count: 1, host_name: 'host4', bootStatus: 'REGISTERED'}},
+          {Hosts: {total_mem: 1, cpu_count: 1, host_name: 'host2', bootStatus: 'REGISTERED'}},
+          {Hosts: {total_mem: 1, cpu_count: 1, host_name: 'host3', bootStatus: 'REGISTERED'}},
+          {Hosts: {total_mem: 1, cpu_count: 1, host_name: 'host1', bootStatus: 'REGISTERED'}}
+        ],
         m: 'all hosts are REGISTERED, host_name',
         e: ['host1', 'host2', 'host3', 'host4']
       },
       {
-        hosts: {
-          h1: {memory: 2, cpu: 1, name: 'host1', bootStatus: 'REGISTERED'},
-          h2: {memory: 1, cpu: 2, name: 'host3', bootStatus: 'INIT'},
-          h3: {memory: 1, cpu: 1, name: 'host4', bootStatus: 'REGISTERED'},
-          h4: {memory: 1, cpu: 1, name: 'host2', bootStatus: 'INIT'}
+        dbHosts: {
+          host1: {bootStatus: 'REGISTERED'},
+          host2: {bootStatus: 'INIT'},
+          host3: {bootStatus: 'INIT'},
+          host4: {bootStatus: 'REGISTERED'}
         },
+        hosts: [
+          {Hosts: {total_mem: 2, cpu_count: 1, host_name: 'host1', bootStatus: 'REGISTERED'}},
+          {Hosts: {total_mem: 1, cpu_count: 2, host_name: 'host3', bootStatus: 'INIT'}},
+          {Hosts: {total_mem: 1, cpu_count: 1, host_name: 'host4', bootStatus: 'REGISTERED'}},
+          {Hosts: {total_mem: 1, cpu_count: 1, host_name: 'host2', bootStatus: 'INIT'}}
+        ],
         m: 'mix',
         e: ['host1', 'host4']
       }
@@ -151,8 +181,8 @@
 
     tests.forEach(function (test) {
       it(test.m, function () {
-        controller.set('content', {hosts: test.hosts});
-        controller.renderHostInfo();
+        controller.set('content', {hosts: test.dbHosts});
+        controller.loadWizardHostsSuccessCallback({items: test.hosts});
         var r = controller.get('hosts');
         expect(Em.A(r).mapProperty('host_name')).to.eql(test.e);
       });
diff --git a/ambari-web/test/controllers/wizard/step7/assign_master_controller_test.js b/ambari-web/test/controllers/wizard/step7/assign_master_controller_test.js
index e70055b..fd7637e 100644
--- a/ambari-web/test/controllers/wizard/step7/assign_master_controller_test.js
+++ b/ambari-web/test/controllers/wizard/step7/assign_master_controller_test.js
Binary files differ
diff --git a/ambari-web/test/controllers/wizard/step8_test.js b/ambari-web/test/controllers/wizard/step8_test.js
index 7cdb69a..1a3214e 100644
--- a/ambari-web/test/controllers/wizard/step8_test.js
+++ b/ambari-web/test/controllers/wizard/step8_test.js
@@ -50,7 +50,73 @@
   Em.Object.create({filename: 'falcon-startup.properties.xml', name: 'p1', value: 'v1'}),
   Em.Object.create({filename: 'falcon-startup.properties.xml', name: 'p2', value: 'v2'}),
   Em.Object.create({filename: 'falcon-runtime.properties.xml', name: 'p1', value: 'v1'}),
-  Em.Object.create({filename: 'falcon-runtime.properties.xml', name: 'p2', value: 'v2'})
+  Em.Object.create({filename: 'falcon-runtime.properties.xml', name: 'p2', value: 'v2'}),
+  Em.Object.create({filename: 'cluster-env.xml', name: 'p1', value: 'v1'}),
+]);
+
+var services = Em.A([
+        Em.Object.create({
+          serviceName: 's1',
+          isSelected: true,
+          isInstalled: false,
+          displayNameOnSelectServicePage: 's01',
+          isClientOnlyService: false,
+          serviceComponents: Em.A([
+            Em.Object.create({
+              isClient: true
+            })
+          ]),
+          configTypes: Em.A([
+            Em.Object.create({
+              type: 'cluster-env'
+            })
+          ]),
+          isHiddenOnSelectServicePage: false
+        }),
+        Em.Object.create({
+          serviceName: 's2',
+          isSelected: true,
+          isInstalled: false,
+          displayNameOnSelectServicePage: 's02',
+          serviceComponents: Em.A([
+            Em.Object.create({
+              isMaster: true
+            })
+          ]),
+          configTypes: Em.A([
+            Em.Object.create({
+              type: 'hdfs-site'
+            })
+          ]),
+          isHiddenOnSelectServicePage: false
+        }),
+        Em.Object.create({
+          serviceName: 's3',
+          isSelected: true,
+          isInstalled: false,
+          displayNameOnSelectServicePage: 's03',
+          serviceComponents: Em.A([
+            Em.Object.create({
+              isHAComponentOnly: true
+            })
+          ]),
+          configTypes: [],
+          isHiddenOnSelectServicePage: false
+        }),
+        Em.Object.create({
+          serviceName: 's4',
+          isSelected: true,
+          isInstalled: false,
+          displayNameOnSelectServicePage: 's03',
+          isClientOnlyService: true,
+          serviceComponents: Em.A([
+            Em.Object.create({
+              isClient: true
+            })
+          ]),
+          configTypes: [],
+          isHiddenOnSelectServicePage: false
+        })
 ]);
 
 function getController() {
@@ -211,54 +277,6 @@
   describe('#loadServices', function () {
 
     beforeEach(function () {
-      var services = Em.A([
-        Em.Object.create({
-          serviceName: 's1',
-          isSelected: true,
-          displayNameOnSelectServicePage: 's01',
-          isClientOnlyService: false,
-          serviceComponents: Em.A([
-            Em.Object.create({
-              isClient: true
-            })
-          ]),
-          isHiddenOnSelectServicePage: false
-        }),
-        Em.Object.create({
-          serviceName: 's2',
-          isSelected: true,
-          displayNameOnSelectServicePage: 's02',
-          serviceComponents: Em.A([
-            Em.Object.create({
-              isMaster: true
-            })
-          ]),
-          isHiddenOnSelectServicePage: false
-        }),
-        Em.Object.create({
-          serviceName: 's3',
-          isSelected: true,
-          displayNameOnSelectServicePage: 's03',
-          serviceComponents: Em.A([
-            Em.Object.create({
-              isHAComponentOnly: true
-            })
-          ]),
-          isHiddenOnSelectServicePage: false
-        }),
-        Em.Object.create({
-          serviceName: 's4',
-          isSelected: true,
-          displayNameOnSelectServicePage: 's03',
-          isClientOnlyService: true,
-          serviceComponents: Em.A([
-            Em.Object.create({
-              isClient: true
-            })
-          ]),
-          isHiddenOnSelectServicePage: false
-        })
-      ]);
       var selectedServices = services.filterProperty('isSelected');
       var slaveComponentHosts = Em.A([
         Em.Object.create({
@@ -2323,5 +2341,19 @@
     });
 
   });
-
+  //TODO
+  describe('#generateBlueprint', function () {
+     console.log("testing.......")
+     beforeEach(function () {
+         installerStep8Controller = getController();
+         installerStep8Controller.set('configs', configs);
+         installerStep8Controller.set('content.services', services.filterProperty('isSelected'))
+         installerStep8Controller.set('selectedServices', []);
+         sinon.spy(installerStep8Controller, 'getConfigurationDetailsForConfigType');
+     });
+     it('should call generateBlueprint', function() {
+       installerStep8Controller.generateBlueprint();
+       expect(installerStep8Controller.getConfigurationDetailsForConfigType.calledThrice).to.be.true;
+     });
+ });
 });
diff --git a/ambari-web/test/controllers/wizard_test.js b/ambari-web/test/controllers/wizard_test.js
index 078f8ae..1ce96ed 100644
--- a/ambari-web/test/controllers/wizard_test.js
+++ b/ambari-web/test/controllers/wizard_test.js
@@ -1257,8 +1257,8 @@
 
     it('should return all hosts', function () {
       var hosts = {
-        'h1': {hostComponents: ['c1', 'c2'], disk_info: [{size: 2, available: 1}]},
-        'h2': {hostComponents: ['c3', 'c4'], disk_info: [{size: 2, available: 1}]}
+        'h1': {hostComponents: ['c1', 'c2']},
+        'h2': {hostComponents: ['c3', 'c4']}
       };
 
       var content = Em.Object.create({
@@ -1271,19 +1271,6 @@
         {
           "id": "h1",
           "hostName": "h1",
-          "publicHostName": "h1",
-          "diskInfo": [
-            {
-              "size": 2,
-              "available": 1
-            }
-          ],
-          "diskTotal": 0.0000019073486328125,
-          "diskFree": 9.5367431640625e-7,
-          "disksMounted": 1,
-          "osType": 0,
-          "osArch": 0,
-          "ip": 0,
           "hostComponents": [
             {
               "componentName": "c1",
@@ -1298,19 +1285,6 @@
         {
           "id": "h2",
           "hostName": "h2",
-          "publicHostName": "h2",
-          "diskInfo": [
-            {
-              "size": 2,
-              "available": 1
-            }
-          ],
-          "diskTotal": 0.0000019073486328125,
-          "diskFree": 9.5367431640625e-7,
-          "disksMounted": 1,
-          "osType": 0,
-          "osArch": 0,
-          "ip": 0,
           "hostComponents": [
             {
               "componentName": "c3",
diff --git a/ambari-web/test/mixins/common/configs/configs_saver_test.js b/ambari-web/test/mixins/common/configs/configs_saver_test.js
index 6e65cf9..7815938 100644
--- a/ambari-web/test/mixins/common/configs/configs_saver_test.js
+++ b/ambari-web/test/mixins/common/configs/configs_saver_test.js
@@ -259,6 +259,19 @@
     })
   });
 
+  describe('#getUniqueTag', function() {
+
+    it('should generate unique tags', function() {
+      var tags = [];
+      for (var i = 0; i < 3; i++) {
+        tags.push(mixin.getUniqueTag());
+      }
+      expect(tags[1]).to.not.be.equal(tags[0]);
+      expect(tags[2]).to.not.be.equal(tags[1]);
+      expect(tags[0]).to.not.be.equal(tags[2]);
+    });
+  });
+
   describe('#getModifiedConfigs', function () {
     var configs = [
       Em.Object.create({
diff --git a/ambari-web/test/utils/helper_test.js b/ambari-web/test/utils/helper_test.js
index 4b9ec36..9dcbc4f 100644
--- a/ambari-web/test/utils/helper_test.js
+++ b/ambari-web/test/utils/helper_test.js
@@ -256,6 +256,8 @@
     describe('#App.format', function(){
       describe('#commandDetail()', function() {
         var command = "GANGLIA_MONITOR STOP";
+        var customCommandDetail = "Remove_Logical_Mycomponent Mycomponent";
+        var opsDisplayName = "Remove Logical Mycomponent";
         var ignored = "DECOMMISSION, NAMENODE";
         var removeString = "SERVICE/HDFS STOP";
         var nagiosState = "nagios_update_ignore ACTIONEXECUTE";
@@ -263,6 +265,9 @@
         it('should convert command to readable info', function() {
           expect(App.format.commandDetail(command)).to.be.equal(' Ganglia Monitor Stop');
         });
+        it('should use display name for operations if specified', function() {
+          expect(App.format.commandDetail(customCommandDetail, null, opsDisplayName)).to.be.equal(' Remove Logical Mycomponent');
+        });
         it('should ignore decommission command', function(){
           expect(App.format.commandDetail(ignored)).to.be.equal('  NameNode');
         });
diff --git a/ambari-web/test/views/common/configs/widgets/slider_config_widget_view_test.js b/ambari-web/test/views/common/configs/widgets/slider_config_widget_view_test.js
index 5cc055c..3be5b24 100644
--- a/ambari-web/test/views/common/configs/widgets/slider_config_widget_view_test.js
+++ b/ambari-web/test/views/common/configs/widgets/slider_config_widget_view_test.js
@@ -137,7 +137,15 @@
     });
   });
 
-  describe('#mirrorValueObs', function () {
+  describe('#mirrorValueObsOnce', function () {
+
+    beforeEach(function () {
+      sinon.stub(Em.run, 'once', Em.tryInvoke);
+    });
+
+    afterEach(function () {
+      Em.run.once.restore();
+    });
 
     describe('check int', function () {
 
@@ -307,6 +315,19 @@
       viewInt.set('config.group', {name: 'group1'});
       expect(viewInt.getValueAttributeByGroup('maximum')).to.equal('3072');
     });
+
+    it('minimum is missing', function () {
+      viewInt.set('config.stackConfigProperty.valueAttributes.minimum', undefined);
+      expect(viewInt.getValueAttributeByGroup('minimum')).to.equal('486');
+    });
+
+    it('minimum is missing, value is invalid', function () {
+      viewInt.get('config').setProperties({
+        'value': 3072,
+        'stackConfigProperty.valueAttributes.minimum': undefined
+      });
+      expect(viewInt.getValueAttributeByGroup('minimum')).to.equal('2096');
+    });
   });
 
   describe('#initSlider', function() {
diff --git a/ambari-web/test/views/common/quick_link_view_test.js b/ambari-web/test/views/common/quick_link_view_test.js
index fbdb711..48cdb28 100644
--- a/ambari-web/test/views/common/quick_link_view_test.js
+++ b/ambari-web/test/views/common/quick_link_view_test.js
@@ -205,7 +205,7 @@
       quickViewLinks.set('content.serviceName', 'YARN');
       mock.returns(quickLinksConfigYARN);
       quickViewLinks.loadQuickLinksConfigSuccessCallback({items: []});
-      expect(quickViewLinks.get('requiredSiteNames')).to.be.eql(["core-site", "hdfs-site", "hbase-site", "yarn-site"]);
+      expect(quickViewLinks.get('requiredSiteNames')).to.be.eql(["core-site", "hdfs-site", "admin-properties", "hbase-site", "yarn-site"]);
     });
   });
 
diff --git a/ambari-web/test/views/main/admin/highAvailability/resourceManager/wizard_view_test.js b/ambari-web/test/views/main/admin/highAvailability/resourceManager/wizard_view_test.js
index d30be42..9f3b6a6 100644
--- a/ambari-web/test/views/main/admin/highAvailability/resourceManager/wizard_view_test.js
+++ b/ambari-web/test/views/main/admin/highAvailability/resourceManager/wizard_view_test.js
@@ -85,11 +85,7 @@
       items: [
         {
           Hosts: {
-            host_name: 'host1',
-            cpu_count: 1,
-            total_mem: 1,
-            disk_info: {},
-            maintenance_state: 'OFF'
+            host_name: 'host1'
           }
         }
       ]
@@ -108,23 +104,15 @@
       expect(view.get('controller.content.hosts')).to.be.eql({
         "host1": {
           "name": "host1",
-          "cpu": 1,
-          "memory": 1,
-          "disk_info": {},
           "bootStatus": "REGISTERED",
-          "isInstalled": true,
-          "maintenance_state": "OFF"
+          "isInstalled": true
         }
       });
       expect(App.db.setHosts.calledWith({
         "host1": {
           "name": "host1",
-          "cpu": 1,
-          "memory": 1,
-          "disk_info": {},
           "bootStatus": "REGISTERED",
-          "isInstalled": true,
-          "maintenance_state": "OFF"
+          "isInstalled": true
         }
       })).to.be.true;
     });
diff --git a/ambari-web/test/views/main/service/reassign_view_test.js b/ambari-web/test/views/main/service/reassign_view_test.js
index f6ae81e..4de4aad 100644
--- a/ambari-web/test/views/main/service/reassign_view_test.js
+++ b/ambari-web/test/views/main/service/reassign_view_test.js
@@ -69,10 +69,6 @@
         {
           Hosts: {
             host_name: 'host1',
-            cpu_count: 1,
-            total_mem: 1024,
-            disk_info: {},
-            maintenance_state: 'ON'
           }
         }
       ]
@@ -90,11 +86,7 @@
         {
           "host1": {
             "bootStatus": "REGISTERED",
-            "cpu": 1,
-            "disk_info": {},
             "isInstalled": true,
-            "maintenance_state": "ON",
-            "memory": 1024,
             "name": "host1"
           }
         }
@@ -104,11 +96,7 @@
       expect(view.get('controller.content.hosts')).to.be.eql({
         "host1": {
           "bootStatus": "REGISTERED",
-          "cpu": 1,
-          "disk_info": {},
           "isInstalled": true,
-          "maintenance_state": "ON",
-          "memory": 1024,
           "name": "host1"
         }
       });
diff --git a/ambari-web/yarn.lock b/ambari-web/yarn.lock
new file mode 100644
index 0000000..26a0211
--- /dev/null
+++ b/ambari-web/yarn.lock
@@ -0,0 +1,4153 @@
+# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
+# yarn lockfile v1
+
+
+abab@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/abab/-/abab-1.0.3.tgz#b81de5f7274ec4e756d797cd834f303642724e5d"
+
+abbrev@1, abbrev@1.0.x:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.0.9.tgz#91b4792588a7738c25f35dd6f63752a2f8776135"
+
+accepts@1.3.3:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.3.tgz#c3ca7434938648c3e0d9c1e328dd68b622c284ca"
+  dependencies:
+    mime-types "~2.1.11"
+    negotiator "0.6.1"
+
+acorn-globals@^3.1.0:
+  version "3.1.0"
+  resolved "https://registry.yarnpkg.com/acorn-globals/-/acorn-globals-3.1.0.tgz#fd8270f71fbb4996b004fa880ee5d46573a731bf"
+  dependencies:
+    acorn "^4.0.4"
+
+acorn@^4.0.3, acorn@^4.0.4:
+  version "4.0.11"
+  resolved "https://registry.yarnpkg.com/acorn/-/acorn-4.0.11.tgz#edcda3bd937e7556410d42ed5860f67399c794c0"
+
+adm-zip@0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/adm-zip/-/adm-zip-0.2.1.tgz#e801cedeb5bd9a4e98d699c5c0f4239e2731dcbf"
+
+after@0.8.2:
+  version "0.8.2"
+  resolved "https://registry.yarnpkg.com/after/-/after-0.8.2.tgz#fedb394f9f0e02aa9768e702bda23b505fae7e1f"
+
+ajv@^4.9.1:
+  version "4.11.7"
+  resolved "https://registry.yarnpkg.com/ajv/-/ajv-4.11.7.tgz#8655a5d86d0824985cc471a1d913fb6729a0ec48"
+  dependencies:
+    co "^4.6.0"
+    json-stable-stringify "^1.0.1"
+
+align-text@^0.1.1, align-text@^0.1.3:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/align-text/-/align-text-0.1.4.tgz#0cd90a561093f35d0a99256c22b7069433fad117"
+  dependencies:
+    kind-of "^3.0.2"
+    longest "^1.0.1"
+    repeat-string "^1.5.2"
+
+alter@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/alter/-/alter-0.2.0.tgz#c7588808617572034aae62480af26b1d4d1cb3cd"
+  dependencies:
+    stable "~0.1.3"
+
+amdefine@>=0.0.4:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5"
+
+ansi-color@~0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/ansi-color/-/ansi-color-0.2.1.tgz#3e75c037475217544ed763a8db5709fa9ae5bf9a"
+
+ansi-regex@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df"
+
+ansi-styles@^2.2.1:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe"
+
+anymatch@^1.0.0, anymatch@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-1.0.0.tgz#0aed64d30bc25973afdb3155eb87ae6881e21b1c"
+  dependencies:
+    minimatch "~1.0.0"
+
+anymatch@^1.3.0, anymatch@~1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-1.3.0.tgz#a3e52fa39168c825ff57b0248126ce5a8ff95507"
+  dependencies:
+    arrify "^1.0.0"
+    micromatch "^2.1.5"
+
+anysort@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/anysort/-/anysort-1.0.1.tgz#341bd5d5ba1485f64e55ae865f1d45994b507fc4"
+  dependencies:
+    anymatch "~1.3.0"
+
+aproba@^1.0.3:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.1.1.tgz#95d3600f07710aa0e9298c726ad5ecf2eacbabab"
+
+are-we-there-yet@~1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.2.tgz#80e470e95a084794fe1899262c5667c6e88de1b3"
+  dependencies:
+    delegates "^1.0.0"
+    readable-stream "^2.0.0 || ^1.1.13"
+
+argparse@^1.0.7:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.9.tgz#73d83bc263f86e97f8cc4f6bae1b0e90a7d22c86"
+  dependencies:
+    sprintf-js "~1.0.2"
+
+arr-diff@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-2.0.0.tgz#8f3b827f955a8bd669697e4a4256ac3ceae356cf"
+  dependencies:
+    arr-flatten "^1.0.1"
+
+arr-flatten@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.0.3.tgz#a274ed85ac08849b6bd7847c4580745dc51adfb1"
+
+array-equal@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/array-equal/-/array-equal-1.0.0.tgz#8c2a5ef2472fd9ea742b04c77a75093ba2757c93"
+
+array-find-index@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/array-find-index/-/array-find-index-1.0.2.tgz#df010aa1287e164bbda6f9723b0a96a1ec4187a1"
+
+array-slice@^0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/array-slice/-/array-slice-0.2.3.tgz#dd3cfb80ed7973a75117cdac69b0b99ec86186f5"
+
+array-unique@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.2.1.tgz#a1d97ccafcbc2625cc70fadceb36a50c58b01a53"
+
+arraybuffer.slice@0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/arraybuffer.slice/-/arraybuffer.slice-0.0.6.tgz#f33b2159f0532a3f3107a272c0ccfbd1ad2979ca"
+
+arrify@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d"
+
+asn1@0.1.11:
+  version "0.1.11"
+  resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.1.11.tgz#559be18376d08a4ec4dbe80877d27818639b2df7"
+
+asn1@~0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.3.tgz#dac8787713c9966849fc8180777ebe9c1ddf3b86"
+
+assert-plus@1.0.0, assert-plus@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525"
+
+assert-plus@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.1.5.tgz#ee74009413002d84cec7219c6ac811812e723160"
+
+assert-plus@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.2.0.tgz#d74e1b87e7affc0db8aadb7021f3fe48101ab234"
+
+assertion-error@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/assertion-error/-/assertion-error-1.0.2.tgz#13ca515d86206da0bac66e834dd397d87581094c"
+
+assetsmanager-brunch@~1.8.1:
+  version "1.8.1"
+  resolved "https://registry.yarnpkg.com/assetsmanager-brunch/-/assetsmanager-brunch-1.8.1.tgz#e2fcff9c1b5fb5f5feb3fcbd0d762a08ad91072d"
+  dependencies:
+    fs-extra "~0.6.3"
+    glob "~3.2.3"
+
+ast-traverse@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/ast-traverse/-/ast-traverse-0.1.1.tgz#69cf2b8386f19dcda1bb1e05d68fe359d8897de6"
+
+ast-types@0.8.12:
+  version "0.8.12"
+  resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.8.12.tgz#a0d90e4351bb887716c83fd637ebf818af4adfcc"
+
+ast-types@0.9.6:
+  version "0.9.6"
+  resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.9.6.tgz#102c9e9e9005d3e7e3829bf0c4fa24ee862ee9b9"
+
+async-each@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/async-each/-/async-each-1.0.1.tgz#19d386a1d9edc6e7c1c85d388aedbcc56d33602d"
+
+async-each@~0.1.2, async-each@~0.1.3, async-each@~0.1.5:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/async-each/-/async-each-0.1.6.tgz#b67e99edcddf96541e44af56290cd7d5c6e70439"
+
+async-waterfall@~0.1.2:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/async-waterfall/-/async-waterfall-0.1.5.tgz#398bd48b0eac5d40ffbe400fe9e37a53ba966dae"
+
+async@1.x, async@^1.4.0:
+  version "1.5.2"
+  resolved "https://registry.yarnpkg.com/async/-/async-1.5.2.tgz#ec6a61ae56480c0c3cb241c95618e20892f9672a"
+
+async@^2.0.1:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/async/-/async-2.3.0.tgz#1013d1051047dd320fe24e494d5c66ecaf6147d9"
+  dependencies:
+    lodash "^4.14.0"
+
+async@~0.9.0:
+  version "0.9.2"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.9.2.tgz#aea74d5e61c1f899613bf64bda66d4c78f2fd17d"
+
+asynckit@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79"
+
+aws-sign2@~0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.5.0.tgz#c57103f7a17fc037f02d7c2e64b602ea223f7d63"
+
+aws-sign2@~0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.6.0.tgz#14342dd38dbcc94d0e5b87d763cd63612c0e794f"
+
+aws4@^1.2.1:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.6.0.tgz#83ef5ca860b2b32e4a0deedee8c771b9db57471e"
+
+babel-brunch@^5.1.2:
+  version "5.1.2"
+  resolved "https://registry.yarnpkg.com/babel-brunch/-/babel-brunch-5.1.2.tgz#4b476b9ea2b64dbfd8864ac3665932f634469bdc"
+  dependencies:
+    anymatch "^1.0.0"
+    babel-core "^5.0.0"
+
+babel-code-frame@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-code-frame/-/babel-code-frame-6.22.0.tgz#027620bee567a88c32561574e7fd0801d33118e4"
+  dependencies:
+    chalk "^1.1.0"
+    esutils "^2.0.2"
+    js-tokens "^3.0.0"
+
+babel-core@^5.0.0:
+  version "5.8.38"
+  resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-5.8.38.tgz#1fcaee79d7e61b750b00b8e54f6dfc9d0af86558"
+  dependencies:
+    babel-plugin-constant-folding "^1.0.1"
+    babel-plugin-dead-code-elimination "^1.0.2"
+    babel-plugin-eval "^1.0.1"
+    babel-plugin-inline-environment-variables "^1.0.1"
+    babel-plugin-jscript "^1.0.4"
+    babel-plugin-member-expression-literals "^1.0.1"
+    babel-plugin-property-literals "^1.0.1"
+    babel-plugin-proto-to-assign "^1.0.3"
+    babel-plugin-react-constant-elements "^1.0.3"
+    babel-plugin-react-display-name "^1.0.3"
+    babel-plugin-remove-console "^1.0.1"
+    babel-plugin-remove-debugger "^1.0.1"
+    babel-plugin-runtime "^1.0.7"
+    babel-plugin-undeclared-variables-check "^1.0.2"
+    babel-plugin-undefined-to-void "^1.1.6"
+    babylon "^5.8.38"
+    bluebird "^2.9.33"
+    chalk "^1.0.0"
+    convert-source-map "^1.1.0"
+    core-js "^1.0.0"
+    debug "^2.1.1"
+    detect-indent "^3.0.0"
+    esutils "^2.0.0"
+    fs-readdir-recursive "^0.1.0"
+    globals "^6.4.0"
+    home-or-tmp "^1.0.0"
+    is-integer "^1.0.4"
+    js-tokens "1.0.1"
+    json5 "^0.4.0"
+    lodash "^3.10.0"
+    minimatch "^2.0.3"
+    output-file-sync "^1.1.0"
+    path-exists "^1.0.0"
+    path-is-absolute "^1.0.0"
+    private "^0.1.6"
+    regenerator "0.8.40"
+    regexpu "^1.3.0"
+    repeating "^1.1.2"
+    resolve "^1.1.6"
+    shebang-regex "^1.0.0"
+    slash "^1.0.0"
+    source-map "^0.5.0"
+    source-map-support "^0.2.10"
+    to-fast-properties "^1.0.0"
+    trim-right "^1.0.0"
+    try-resolve "^1.0.0"
+
+babel-core@^6.0.0, babel-core@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-6.24.1.tgz#8c428564dce1e1f41fb337ec34f4c3b022b5ad83"
+  dependencies:
+    babel-code-frame "^6.22.0"
+    babel-generator "^6.24.1"
+    babel-helpers "^6.24.1"
+    babel-messages "^6.23.0"
+    babel-register "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+    babylon "^6.11.0"
+    convert-source-map "^1.1.0"
+    debug "^2.1.1"
+    json5 "^0.5.0"
+    lodash "^4.2.0"
+    minimatch "^3.0.2"
+    path-is-absolute "^1.0.0"
+    private "^0.1.6"
+    slash "^1.0.0"
+    source-map "^0.5.0"
+
+babel-generator@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-generator/-/babel-generator-6.24.1.tgz#e715f486c58ded25649d888944d52aa07c5d9497"
+  dependencies:
+    babel-messages "^6.23.0"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+    detect-indent "^4.0.0"
+    jsesc "^1.3.0"
+    lodash "^4.2.0"
+    source-map "^0.5.0"
+    trim-right "^1.0.1"
+
+babel-helper-call-delegate@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-call-delegate/-/babel-helper-call-delegate-6.24.1.tgz#ece6aacddc76e41c3461f88bfc575bd0daa2df8d"
+  dependencies:
+    babel-helper-hoist-variables "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-helper-define-map@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-define-map/-/babel-helper-define-map-6.24.1.tgz#7a9747f258d8947d32d515f6aa1c7bd02204a080"
+  dependencies:
+    babel-helper-function-name "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+    lodash "^4.2.0"
+
+babel-helper-function-name@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-function-name/-/babel-helper-function-name-6.24.1.tgz#d3475b8c03ed98242a25b48351ab18399d3580a9"
+  dependencies:
+    babel-helper-get-function-arity "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-helper-get-function-arity@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-get-function-arity/-/babel-helper-get-function-arity-6.24.1.tgz#8f7782aa93407c41d3aa50908f89b031b1b6853d"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-helper-hoist-variables@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-hoist-variables/-/babel-helper-hoist-variables-6.24.1.tgz#1ecb27689c9d25513eadbc9914a73f5408be7a76"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-helper-optimise-call-expression@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-optimise-call-expression/-/babel-helper-optimise-call-expression-6.24.1.tgz#f7a13427ba9f73f8f4fa993c54a97882d1244257"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-helper-regex@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-regex/-/babel-helper-regex-6.24.1.tgz#d36e22fab1008d79d88648e32116868128456ce8"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+    lodash "^4.2.0"
+
+babel-helper-replace-supers@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-replace-supers/-/babel-helper-replace-supers-6.24.1.tgz#bf6dbfe43938d17369a213ca8a8bf74b6a90ab1a"
+  dependencies:
+    babel-helper-optimise-call-expression "^6.24.1"
+    babel-messages "^6.23.0"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-helpers@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helpers/-/babel-helpers-6.24.1.tgz#3471de9caec388e5c850e597e58a26ddf37602b2"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+
+babel-messages@^6.23.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-messages/-/babel-messages-6.23.0.tgz#f3cdf4703858035b2a2951c6ec5edf6c62f2630e"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-check-es2015-constants@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-check-es2015-constants/-/babel-plugin-check-es2015-constants-6.22.0.tgz#35157b101426fd2ffd3da3f75c7d1e91835bbf8a"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-constant-folding@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-constant-folding/-/babel-plugin-constant-folding-1.0.1.tgz#8361d364c98e449c3692bdba51eff0844290aa8e"
+
+babel-plugin-dead-code-elimination@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/babel-plugin-dead-code-elimination/-/babel-plugin-dead-code-elimination-1.0.2.tgz#5f7c451274dcd7cccdbfbb3e0b85dd28121f0f65"
+
+babel-plugin-eval@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-eval/-/babel-plugin-eval-1.0.1.tgz#a2faed25ce6be69ade4bfec263f70169195950da"
+
+babel-plugin-inline-environment-variables@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-inline-environment-variables/-/babel-plugin-inline-environment-variables-1.0.1.tgz#1f58ce91207ad6a826a8bf645fafe68ff5fe3ffe"
+
+babel-plugin-jscript@^1.0.4:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/babel-plugin-jscript/-/babel-plugin-jscript-1.0.4.tgz#8f342c38276e87a47d5fa0a8bd3d5eb6ccad8fcc"
+
+babel-plugin-member-expression-literals@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-member-expression-literals/-/babel-plugin-member-expression-literals-1.0.1.tgz#cc5edb0faa8dc927170e74d6d1c02440021624d3"
+
+babel-plugin-property-literals@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-property-literals/-/babel-plugin-property-literals-1.0.1.tgz#0252301900192980b1c118efea48ce93aab83336"
+
+babel-plugin-proto-to-assign@^1.0.3:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/babel-plugin-proto-to-assign/-/babel-plugin-proto-to-assign-1.0.4.tgz#c49e7afd02f577bc4da05ea2df002250cf7cd123"
+  dependencies:
+    lodash "^3.9.3"
+
+babel-plugin-react-constant-elements@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/babel-plugin-react-constant-elements/-/babel-plugin-react-constant-elements-1.0.3.tgz#946736e8378429cbc349dcff62f51c143b34e35a"
+
+babel-plugin-react-display-name@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/babel-plugin-react-display-name/-/babel-plugin-react-display-name-1.0.3.tgz#754fe38926e8424a4e7b15ab6ea6139dee0514fc"
+
+babel-plugin-remove-console@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-remove-console/-/babel-plugin-remove-console-1.0.1.tgz#d8f24556c3a05005d42aaaafd27787f53ff013a7"
+
+babel-plugin-remove-debugger@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-remove-debugger/-/babel-plugin-remove-debugger-1.0.1.tgz#fd2ea3cd61a428ad1f3b9c89882ff4293e8c14c7"
+
+babel-plugin-runtime@^1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/babel-plugin-runtime/-/babel-plugin-runtime-1.0.7.tgz#bf7c7d966dd56ecd5c17fa1cb253c9acb7e54aaf"
+
+babel-plugin-transform-es2015-arrow-functions@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-arrow-functions/-/babel-plugin-transform-es2015-arrow-functions-6.22.0.tgz#452692cb711d5f79dc7f85e440ce41b9f244d221"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-block-scoped-functions@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-block-scoped-functions/-/babel-plugin-transform-es2015-block-scoped-functions-6.22.0.tgz#bbc51b49f964d70cb8d8e0b94e820246ce3a6141"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-block-scoping@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-block-scoping/-/babel-plugin-transform-es2015-block-scoping-6.24.1.tgz#76c295dc3a4741b1665adfd3167215dcff32a576"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+    lodash "^4.2.0"
+
+babel-plugin-transform-es2015-classes@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-classes/-/babel-plugin-transform-es2015-classes-6.24.1.tgz#5a4c58a50c9c9461e564b4b2a3bfabc97a2584db"
+  dependencies:
+    babel-helper-define-map "^6.24.1"
+    babel-helper-function-name "^6.24.1"
+    babel-helper-optimise-call-expression "^6.24.1"
+    babel-helper-replace-supers "^6.24.1"
+    babel-messages "^6.23.0"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-computed-properties@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-computed-properties/-/babel-plugin-transform-es2015-computed-properties-6.24.1.tgz#6fe2a8d16895d5634f4cd999b6d3480a308159b3"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+
+babel-plugin-transform-es2015-destructuring@^6.22.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-destructuring/-/babel-plugin-transform-es2015-destructuring-6.23.0.tgz#997bb1f1ab967f682d2b0876fe358d60e765c56d"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-duplicate-keys@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-duplicate-keys/-/babel-plugin-transform-es2015-duplicate-keys-6.24.1.tgz#73eb3d310ca969e3ef9ec91c53741a6f1576423e"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-for-of@^6.22.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-for-of/-/babel-plugin-transform-es2015-for-of-6.23.0.tgz#f47c95b2b613df1d3ecc2fdb7573623c75248691"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-function-name@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-function-name/-/babel-plugin-transform-es2015-function-name-6.24.1.tgz#834c89853bc36b1af0f3a4c5dbaa94fd8eacaa8b"
+  dependencies:
+    babel-helper-function-name "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-literals@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-literals/-/babel-plugin-transform-es2015-literals-6.22.0.tgz#4f54a02d6cd66cf915280019a31d31925377ca2e"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-modules-amd@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-amd/-/babel-plugin-transform-es2015-modules-amd-6.24.1.tgz#3b3e54017239842d6d19c3011c4bd2f00a00d154"
+  dependencies:
+    babel-plugin-transform-es2015-modules-commonjs "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+
+babel-plugin-transform-es2015-modules-commonjs@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-commonjs/-/babel-plugin-transform-es2015-modules-commonjs-6.24.1.tgz#d3e310b40ef664a36622200097c6d440298f2bfe"
+  dependencies:
+    babel-plugin-transform-strict-mode "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-modules-systemjs@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-systemjs/-/babel-plugin-transform-es2015-modules-systemjs-6.24.1.tgz#ff89a142b9119a906195f5f106ecf305d9407d23"
+  dependencies:
+    babel-helper-hoist-variables "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+
+babel-plugin-transform-es2015-modules-umd@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-umd/-/babel-plugin-transform-es2015-modules-umd-6.24.1.tgz#ac997e6285cd18ed6176adb607d602344ad38468"
+  dependencies:
+    babel-plugin-transform-es2015-modules-amd "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+
+babel-plugin-transform-es2015-object-super@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-object-super/-/babel-plugin-transform-es2015-object-super-6.24.1.tgz#24cef69ae21cb83a7f8603dad021f572eb278f8d"
+  dependencies:
+    babel-helper-replace-supers "^6.24.1"
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-parameters@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-parameters/-/babel-plugin-transform-es2015-parameters-6.24.1.tgz#57ac351ab49caf14a97cd13b09f66fdf0a625f2b"
+  dependencies:
+    babel-helper-call-delegate "^6.24.1"
+    babel-helper-get-function-arity "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-shorthand-properties@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-shorthand-properties/-/babel-plugin-transform-es2015-shorthand-properties-6.24.1.tgz#24f875d6721c87661bbd99a4622e51f14de38aa0"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-spread@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-spread/-/babel-plugin-transform-es2015-spread-6.22.0.tgz#d6d68a99f89aedc4536c81a542e8dd9f1746f8d1"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-sticky-regex@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-sticky-regex/-/babel-plugin-transform-es2015-sticky-regex-6.24.1.tgz#00c1cdb1aca71112cdf0cf6126c2ed6b457ccdbc"
+  dependencies:
+    babel-helper-regex "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-template-literals@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-template-literals/-/babel-plugin-transform-es2015-template-literals-6.22.0.tgz#a84b3450f7e9f8f1f6839d6d687da84bb1236d8d"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-typeof-symbol@^6.22.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-typeof-symbol/-/babel-plugin-transform-es2015-typeof-symbol-6.23.0.tgz#dec09f1cddff94b52ac73d505c84df59dcceb372"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-unicode-regex@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-unicode-regex/-/babel-plugin-transform-es2015-unicode-regex-6.24.1.tgz#d38b12f42ea7323f729387f18a7c5ae1faeb35e9"
+  dependencies:
+    babel-helper-regex "^6.24.1"
+    babel-runtime "^6.22.0"
+    regexpu-core "^2.0.0"
+
+babel-plugin-transform-regenerator@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-regenerator/-/babel-plugin-transform-regenerator-6.24.1.tgz#b8da305ad43c3c99b4848e4fe4037b770d23c418"
+  dependencies:
+    regenerator-transform "0.9.11"
+
+babel-plugin-transform-strict-mode@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-strict-mode/-/babel-plugin-transform-strict-mode-6.24.1.tgz#d5faf7aa578a65bbe591cf5edae04a0c67020758"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-plugin-undeclared-variables-check@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/babel-plugin-undeclared-variables-check/-/babel-plugin-undeclared-variables-check-1.0.2.tgz#5cf1aa539d813ff64e99641290af620965f65dee"
+  dependencies:
+    leven "^1.0.2"
+
+babel-plugin-undefined-to-void@^1.1.6:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/babel-plugin-undefined-to-void/-/babel-plugin-undefined-to-void-1.1.6.tgz#7f578ef8b78dfae6003385d8417a61eda06e2f81"
+
+babel-preset-es2015@^6.18.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-preset-es2015/-/babel-preset-es2015-6.24.1.tgz#d44050d6bc2c9feea702aaf38d727a0210538939"
+  dependencies:
+    babel-plugin-check-es2015-constants "^6.22.0"
+    babel-plugin-transform-es2015-arrow-functions "^6.22.0"
+    babel-plugin-transform-es2015-block-scoped-functions "^6.22.0"
+    babel-plugin-transform-es2015-block-scoping "^6.24.1"
+    babel-plugin-transform-es2015-classes "^6.24.1"
+    babel-plugin-transform-es2015-computed-properties "^6.24.1"
+    babel-plugin-transform-es2015-destructuring "^6.22.0"
+    babel-plugin-transform-es2015-duplicate-keys "^6.24.1"
+    babel-plugin-transform-es2015-for-of "^6.22.0"
+    babel-plugin-transform-es2015-function-name "^6.24.1"
+    babel-plugin-transform-es2015-literals "^6.22.0"
+    babel-plugin-transform-es2015-modules-amd "^6.24.1"
+    babel-plugin-transform-es2015-modules-commonjs "^6.24.1"
+    babel-plugin-transform-es2015-modules-systemjs "^6.24.1"
+    babel-plugin-transform-es2015-modules-umd "^6.24.1"
+    babel-plugin-transform-es2015-object-super "^6.24.1"
+    babel-plugin-transform-es2015-parameters "^6.24.1"
+    babel-plugin-transform-es2015-shorthand-properties "^6.24.1"
+    babel-plugin-transform-es2015-spread "^6.22.0"
+    babel-plugin-transform-es2015-sticky-regex "^6.24.1"
+    babel-plugin-transform-es2015-template-literals "^6.22.0"
+    babel-plugin-transform-es2015-typeof-symbol "^6.22.0"
+    babel-plugin-transform-es2015-unicode-regex "^6.24.1"
+    babel-plugin-transform-regenerator "^6.24.1"
+
+babel-register@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-register/-/babel-register-6.24.1.tgz#7e10e13a2f71065bdfad5a1787ba45bca6ded75f"
+  dependencies:
+    babel-core "^6.24.1"
+    babel-runtime "^6.22.0"
+    core-js "^2.4.0"
+    home-or-tmp "^2.0.0"
+    lodash "^4.2.0"
+    mkdirp "^0.5.1"
+    source-map-support "^0.4.2"
+
+babel-runtime@^6.18.0, babel-runtime@^6.22.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-runtime/-/babel-runtime-6.23.0.tgz#0a9489f144de70efb3ce4300accdb329e2fc543b"
+  dependencies:
+    core-js "^2.4.0"
+    regenerator-runtime "^0.10.0"
+
+babel-template@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-template/-/babel-template-6.24.1.tgz#04ae514f1f93b3a2537f2a0f60a5a45fb8308333"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+    babylon "^6.11.0"
+    lodash "^4.2.0"
+
+babel-traverse@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-traverse/-/babel-traverse-6.24.1.tgz#ab36673fd356f9a0948659e7b338d5feadb31695"
+  dependencies:
+    babel-code-frame "^6.22.0"
+    babel-messages "^6.23.0"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+    babylon "^6.15.0"
+    debug "^2.2.0"
+    globals "^9.0.0"
+    invariant "^2.2.0"
+    lodash "^4.2.0"
+
+babel-types@^6.19.0, babel-types@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-types/-/babel-types-6.24.1.tgz#a136879dc15b3606bda0d90c1fc74304c2ff0975"
+  dependencies:
+    babel-runtime "^6.22.0"
+    esutils "^2.0.2"
+    lodash "^4.2.0"
+    to-fast-properties "^1.0.1"
+
+babel@^6.5.2:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel/-/babel-6.23.0.tgz#d0d1e7d803e974765beea3232d4e153c0efb90f4"
+
+babylon@^5.8.38:
+  version "5.8.38"
+  resolved "https://registry.yarnpkg.com/babylon/-/babylon-5.8.38.tgz#ec9b120b11bf6ccd4173a18bf217e60b79859ffd"
+
+babylon@^6.11.0, babylon@^6.15.0:
+  version "6.17.0"
+  resolved "https://registry.yarnpkg.com/babylon/-/babylon-6.17.0.tgz#37da948878488b9c4e3c4038893fa3314b3fc932"
+
+backo2@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/backo2/-/backo2-1.0.2.tgz#31ab1ac8b129363463e35b3ebb69f4dfcfba7947"
+
+balanced-match@^0.4.1:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-0.4.2.tgz#cb3f3e3c732dc0f01ee70b403f302e61d7709838"
+
+base64-arraybuffer@0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/base64-arraybuffer/-/base64-arraybuffer-0.1.5.tgz#73926771923b5a19747ad666aa5cd4bf9c6e9ce8"
+
+base64id@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/base64id/-/base64id-1.0.0.tgz#47688cb99bb6804f0e06d3e763b1c32e57d8e6b6"
+
+batch@0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/batch/-/batch-0.2.1.tgz#4463997bb4d5fd1c7a011548813e52aa189c2c79"
+
+bcrypt-pbkdf@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.1.tgz#63bc5dcb61331b92bc05fd528953c33462a06f8d"
+  dependencies:
+    tweetnacl "^0.14.3"
+
+better-assert@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/better-assert/-/better-assert-1.0.2.tgz#40866b9e1b9e0b55b481894311e68faffaebc522"
+  dependencies:
+    callsite "1.0.0"
+
+binary-extensions@^1.0.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-1.8.0.tgz#48ec8d16df4377eae5fa5884682480af4d95c774"
+
+bl@~1.0.0:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/bl/-/bl-1.0.3.tgz#fc5421a28fd4226036c3b3891a66a25bc64d226e"
+  dependencies:
+    readable-stream "~2.0.5"
+
+blob@0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/blob/-/blob-0.0.4.tgz#bcf13052ca54463f30f9fc7e95b9a47630a94921"
+
+block-stream@*:
+  version "0.0.9"
+  resolved "https://registry.yarnpkg.com/block-stream/-/block-stream-0.0.9.tgz#13ebfe778a03205cfe03751481ebb4b3300c126a"
+  dependencies:
+    inherits "~2.0.0"
+
+bluebird@^2.9.33:
+  version "2.11.0"
+  resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-2.11.0.tgz#534b9033c022c9579c56ba3b3e5a5caafbb650e1"
+
+bluebird@^3.3.0:
+  version "3.5.0"
+  resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.5.0.tgz#791420d7f551eea2897453a8a77653f96606d67c"
+
+body-parser@^1.16.1:
+  version "1.17.1"
+  resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.17.1.tgz#75b3bc98ddd6e7e0d8ffe750dfaca5c66993fa47"
+  dependencies:
+    bytes "2.4.0"
+    content-type "~1.0.2"
+    debug "2.6.1"
+    depd "~1.1.0"
+    http-errors "~1.6.1"
+    iconv-lite "0.4.15"
+    on-finished "~2.3.0"
+    qs "6.4.0"
+    raw-body "~2.2.0"
+    type-is "~1.6.14"
+
+boom@0.4.x:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/boom/-/boom-0.4.2.tgz#7a636e9ded4efcefb19cef4947a3c67dfaee911b"
+  dependencies:
+    hoek "0.9.x"
+
+boom@2.x.x:
+  version "2.10.1"
+  resolved "https://registry.yarnpkg.com/boom/-/boom-2.10.1.tgz#39c8918ceff5799f83f9492a848f625add0c766f"
+  dependencies:
+    hoek "2.x.x"
+
+brace-expansion@^1.0.0:
+  version "1.1.7"
+  resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.7.tgz#3effc3c50e000531fb720eaff80f0ae8ef23cf59"
+  dependencies:
+    balanced-match "^0.4.1"
+    concat-map "0.0.1"
+
+braces@^0.1.2:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/braces/-/braces-0.1.5.tgz#c085711085291d8b75fdd74eab0f8597280711e6"
+  dependencies:
+    expand-range "^0.1.0"
+
+braces@^1.8.2:
+  version "1.8.5"
+  resolved "https://registry.yarnpkg.com/braces/-/braces-1.8.5.tgz#ba77962e12dff969d6b76711e914b737857bf6a7"
+  dependencies:
+    expand-range "^1.8.1"
+    preserve "^0.2.0"
+    repeat-element "^1.1.2"
+
+breakable@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/breakable/-/breakable-1.0.0.tgz#784a797915a38ead27bad456b5572cb4bbaa78c1"
+
+brunch@1.7.20:
+  version "1.7.20"
+  resolved "https://registry.yarnpkg.com/brunch/-/brunch-1.7.20.tgz#0d755255b2bc695b14d45742a176c2b1fc9d88c6"
+  dependencies:
+    anymatch "~1.0.0"
+    anysort "~1.0.0"
+    async-each "~0.1.2"
+    async-waterfall "~0.1.2"
+    chokidar "~0.12.0"
+    coffee-script "~1.8.0"
+    commander "~2.0.0"
+    commonjs-require-definition "~0.1.0"
+    debug "~0.7.2"
+    init-skeleton "~0.2.0"
+    loggy "~0.2.0"
+    mkdirp "~0.3.5"
+    ncp "~0.4.2"
+    pushserve "~0.1.6"
+    read-components "~0.6.0"
+    source-map "~0.1.35"
+
+buffer-crc32@0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/buffer-crc32/-/buffer-crc32-0.2.1.tgz#be3e5382fc02b6d6324956ac1af98aa98b08534c"
+
+buffer-shims@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/buffer-shims/-/buffer-shims-1.0.0.tgz#9978ce317388c649ad8793028c3477ef044a8b51"
+
+builtin-modules@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/builtin-modules/-/builtin-modules-1.1.1.tgz#270f076c5a72c02f5b65a47df94c5fe3a278892f"
+
+buster-core@=0.6.4:
+  version "0.6.4"
+  resolved "https://registry.yarnpkg.com/buster-core/-/buster-core-0.6.4.tgz#27bf6bad674244ea720f311d900a0ca1cb786050"
+
+buster-format@~0.5:
+  version "0.5.6"
+  resolved "https://registry.yarnpkg.com/buster-format/-/buster-format-0.5.6.tgz#2b86c322ecf5e1b0ae6e6e7905ebfcf387d2ab95"
+  dependencies:
+    buster-core "=0.6.4"
+
+bytes@0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-0.2.0.tgz#aad33ec14e3dc2ca74e8e7d451f9ba053ad4f7a0"
+
+bytes@2.4.0:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.4.0.tgz#7d97196f9d5baf7f6935e25985549edd2a6c2339"
+
+callsite@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/callsite/-/callsite-1.0.0.tgz#280398e5d664bd74038b6f0905153e6e8af1bc20"
+
+camelcase-keys@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/camelcase-keys/-/camelcase-keys-2.1.0.tgz#308beeaffdf28119051efa1d932213c91b8f92e7"
+  dependencies:
+    camelcase "^2.0.0"
+    map-obj "^1.0.0"
+
+camelcase@^1.0.2, camelcase@^1.2.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-1.2.1.tgz#9bb5304d2e0b56698b2c758b08a3eaa9daa58a39"
+
+camelcase@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-2.1.1.tgz#7c1d16d679a1bbe59ca02cacecfb011e201f5a1f"
+
+caseless@~0.11.0:
+  version "0.11.0"
+  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.11.0.tgz#715b96ea9841593cc33067923f5ec60ebda4f7d7"
+
+caseless@~0.12.0:
+  version "0.12.0"
+  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc"
+
+center-align@^0.1.1:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/center-align/-/center-align-0.1.3.tgz#aa0d32629b6ee972200411cbd4461c907bc2b7ad"
+  dependencies:
+    align-text "^0.1.3"
+    lazy-cache "^1.0.3"
+
+chai@~3.5.0:
+  version "3.5.0"
+  resolved "https://registry.yarnpkg.com/chai/-/chai-3.5.0.tgz#4d02637b067fe958bdbfdd3a40ec56fef7373247"
+  dependencies:
+    assertion-error "^1.0.1"
+    deep-eql "^0.1.3"
+    type-detect "^1.0.0"
+
+chalk@^1.0.0, chalk@^1.1.0, chalk@^1.1.1:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98"
+  dependencies:
+    ansi-styles "^2.2.1"
+    escape-string-regexp "^1.0.2"
+    has-ansi "^2.0.0"
+    strip-ansi "^3.0.0"
+    supports-color "^2.0.0"
+
+chokidar@^1.4.1:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-1.6.1.tgz#2f4447ab5e96e50fb3d789fd90d4c72e0e4c70c2"
+  dependencies:
+    anymatch "^1.3.0"
+    async-each "^1.0.0"
+    glob-parent "^2.0.0"
+    inherits "^2.0.1"
+    is-binary-path "^1.0.0"
+    is-glob "^2.0.0"
+    path-is-absolute "^1.0.0"
+    readdirp "^2.0.0"
+  optionalDependencies:
+    fsevents "^1.0.0"
+
+chokidar@~0.12.0:
+  version "0.12.6"
+  resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-0.12.6.tgz#be204f5b9634e009311256e5d6e8e0e508284d2f"
+  dependencies:
+    async-each "~0.1.5"
+    readdirp "~1.3.0"
+  optionalDependencies:
+    fsevents "~0.3.1"
+
+"clean-css-brunch@>= 1.0 < 1.5":
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/clean-css-brunch/-/clean-css-brunch-1.4.0.tgz#7df42f1c2bc7de4eb9db1931c96061ced54e0bd9"
+  dependencies:
+    clean-css "0.8.x"
+    coffee-script "1.3.3"
+
+clean-css@0.8.x:
+  version "0.8.3"
+  resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-0.8.3.tgz#13bca09d4aaa8b0e73e4805530e8ba3dc76c22d0"
+  dependencies:
+    optimist "0.3.x"
+
+cliui@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/cliui/-/cliui-2.1.0.tgz#4b475760ff80264c762c3a1719032e91c7fea0d1"
+  dependencies:
+    center-align "^0.1.1"
+    right-align "^0.1.1"
+    wordwrap "0.0.2"
+
+co@^4.6.0:
+  version "4.6.0"
+  resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184"
+
+code-point-at@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77"
+
+coffee-script@1.3.3:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/coffee-script/-/coffee-script-1.3.3.tgz#150d6b4cb522894369efed6a2101c20bc7f4a4f4"
+
+coffee-script@1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/coffee-script/-/coffee-script-1.4.0.tgz#5e3bc8aac26c01a8e27bf107722c5655f5ad7d36"
+
+coffee-script@~1.8.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/coffee-script/-/coffee-script-1.8.0.tgz#9c9f1d2b4a52a000ded15b659791703648263c1d"
+  dependencies:
+    mkdirp "~0.3.5"
+
+colors@^1.1.0:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/colors/-/colors-1.1.2.tgz#168a4701756b6a7f51a12ce0c97bfa28c084ed63"
+
+combine-lists@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/combine-lists/-/combine-lists-1.0.1.tgz#458c07e09e0d900fc28b70a3fec2dacd1d2cb7f6"
+  dependencies:
+    lodash "^4.5.0"
+
+combined-stream@^1.0.5, combined-stream@~1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.5.tgz#938370a57b4a51dea2c77c15d5c5fdf895164009"
+  dependencies:
+    delayed-stream "~1.0.0"
+
+combined-stream@~0.0.4:
+  version "0.0.7"
+  resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-0.0.7.tgz#0137e657baa5a7541c57ac37ac5fc07d73b4dc1f"
+  dependencies:
+    delayed-stream "0.0.5"
+
+commander@0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-0.6.1.tgz#fa68a14f6a945d54dbbe50d8cdb3320e9e3b1a06"
+
+commander@1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-1.2.0.tgz#fd5713bfa153c7d6cc599378a5ab4c45c535029e"
+  dependencies:
+    keypress "0.1.x"
+
+commander@2.3.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.3.0.tgz#fd430e889832ec353b9acd1de217c11cb3eef873"
+
+commander@^2.5.0, commander@^2.8.1, commander@^2.9.0:
+  version "2.9.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.9.0.tgz#9c99094176e12240cb22d6c5146098400fe0f7d4"
+  dependencies:
+    graceful-readlink ">= 1.0.0"
+
+commander@~2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.0.0.tgz#d1b86f901f8b64bd941bdeadaf924530393be928"
+
+commoner@~0.10.3:
+  version "0.10.8"
+  resolved "https://registry.yarnpkg.com/commoner/-/commoner-0.10.8.tgz#34fc3672cd24393e8bb47e70caa0293811f4f2c5"
+  dependencies:
+    commander "^2.5.0"
+    detective "^4.3.1"
+    glob "^5.0.15"
+    graceful-fs "^4.1.2"
+    iconv-lite "^0.4.5"
+    mkdirp "^0.5.0"
+    private "^0.1.6"
+    q "^1.1.2"
+    recast "^0.11.17"
+
+commonjs-require-definition@~0.1.0:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/commonjs-require-definition/-/commonjs-require-definition-0.1.2.tgz#93720e42b3383a00e4097f6a4a979f10f376dc2d"
+
+component-bind@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/component-bind/-/component-bind-1.0.0.tgz#00c608ab7dcd93897c0009651b1d3a8e1e73bbd1"
+
+component-builder@~0.10.0:
+  version "0.10.1"
+  resolved "https://registry.yarnpkg.com/component-builder/-/component-builder-0.10.1.tgz#d29c7ab70241a678e3f8dbea4d1759c68b6f8f9b"
+  dependencies:
+    batch "0.2.1"
+    component-require "0.3.1"
+    cp "~0.1.0"
+    debug "*"
+    mkdirp "0.3.4"
+    string-to-js "0.0.1"
+
+component-emitter@1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.1.2.tgz#296594f2753daa63996d2af08d15a95116c9aec3"
+
+component-emitter@1.2.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.2.1.tgz#137918d6d78283f7df7a6b7c5a63e140e69425e6"
+
+component-inherit@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/component-inherit/-/component-inherit-0.0.3.tgz#645fc4adf58b72b649d5cae65135619db26ff143"
+
+component-require@0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/component-require/-/component-require-0.3.1.tgz#50a00e2e2cb0fe273ab4268fe20ae4804f35fe6d"
+
+concat-map@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b"
+
+concat-stream@1.5.0:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.5.0.tgz#53f7d43c51c5e43f81c8fdd03321c631be68d611"
+  dependencies:
+    inherits "~2.0.1"
+    readable-stream "~2.0.0"
+    typedarray "~0.0.5"
+
+config-chain@~1.1.1:
+  version "1.1.11"
+  resolved "https://registry.yarnpkg.com/config-chain/-/config-chain-1.1.11.tgz#aba09747dfbe4c3e70e766a6e41586e1859fc6f2"
+  dependencies:
+    ini "^1.3.4"
+    proto-list "~1.2.1"
+
+connect-slashes@~0.0.9:
+  version "0.0.11"
+  resolved "https://registry.yarnpkg.com/connect-slashes/-/connect-slashes-0.0.11.tgz#4b44efae7599cc03ee20b24e9287272f41d62258"
+
+connect@1.x:
+  version "1.9.2"
+  resolved "https://registry.yarnpkg.com/connect/-/connect-1.9.2.tgz#42880a22e9438ae59a8add74e437f58ae8e52807"
+  dependencies:
+    formidable "1.0.x"
+    mime ">= 0.0.1"
+    qs ">= 0.4.0"
+
+connect@2.8.8:
+  version "2.8.8"
+  resolved "https://registry.yarnpkg.com/connect/-/connect-2.8.8.tgz#b9abf8caf0bd9773cb3dea29344119872582446d"
+  dependencies:
+    buffer-crc32 "0.2.1"
+    bytes "0.2.0"
+    cookie "0.1.0"
+    cookie-signature "1.0.1"
+    debug "*"
+    formidable "1.0.14"
+    fresh "0.2.0"
+    methods "0.0.1"
+    pause "0.0.1"
+    qs "0.6.5"
+    send "0.1.4"
+    uid2 "0.0.2"
+
+connect@^3.6.0:
+  version "3.6.1"
+  resolved "https://registry.yarnpkg.com/connect/-/connect-3.6.1.tgz#b7760693a74f0454face1d9378edb3f885b43227"
+  dependencies:
+    debug "2.6.3"
+    finalhandler "1.0.1"
+    parseurl "~1.3.1"
+    utils-merge "1.0.0"
+
+console-control-strings@^1.0.0, console-control-strings@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/console-control-strings/-/console-control-strings-1.1.0.tgz#3d7cf4464db6446ea644bf4b39507f9851008e8e"
+
+content-type-parser@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/content-type-parser/-/content-type-parser-1.0.1.tgz#c3e56988c53c65127fb46d4032a3a900246fdc94"
+
+content-type@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.2.tgz#b7d113aee7a8dd27bd21133c4dc2529df1721eed"
+
+convert-source-map@^1.1.0:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.5.0.tgz#9acd70851c6d5dfdd93d9282e5edf94a03ff46b5"
+
+cookie-signature@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.1.tgz#44e072148af01e6e8e24afbf12690d68ae698ecb"
+
+cookie@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.1.0.tgz#90eb469ddce905c866de687efc43131d8801f9d0"
+
+cookie@0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.3.1.tgz#e7e0a1f9ef43b4c8ba925c5c5a96e806d16873bb"
+
+core-js@^1.0.0:
+  version "1.2.7"
+  resolved "https://registry.yarnpkg.com/core-js/-/core-js-1.2.7.tgz#652294c14651db28fa93bd2d5ff2983a4f08c636"
+
+core-js@^2.2.0, core-js@^2.4.0:
+  version "2.4.1"
+  resolved "https://registry.yarnpkg.com/core-js/-/core-js-2.4.1.tgz#4de911e667b0eae9124e34254b53aea6fc618d3e"
+
+core-util-is@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7"
+
+cp@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/cp/-/cp-0.1.1.tgz#3946a76c1a53ffe0e68593f341c124b336c1f06d"
+
+cryptiles@0.2.x:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/cryptiles/-/cryptiles-0.2.2.tgz#ed91ff1f17ad13d3748288594f8a48a0d26f325c"
+  dependencies:
+    boom "0.4.x"
+
+cryptiles@2.x.x:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/cryptiles/-/cryptiles-2.0.5.tgz#3bdfecdc608147c1c67202fa291e7dca59eaa3b8"
+  dependencies:
+    boom "2.x.x"
+
+"css-brunch@>= 1.0 < 1.5":
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/css-brunch/-/css-brunch-1.3.0.tgz#82b07ee0ea8887e5f97d8ee6b9eda8abb939644b"
+  dependencies:
+    coffee-script "1.3.3"
+
+cssom@0.2.x:
+  version "0.2.5"
+  resolved "https://registry.yarnpkg.com/cssom/-/cssom-0.2.5.tgz#2682709b5902e7212df529116ff788cd5b254894"
+
+cssom@0.3.x, "cssom@>= 0.3.2 < 0.4.0":
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/cssom/-/cssom-0.3.2.tgz#b8036170c79f07a90ff2f16e22284027a243848b"
+
+cssstyle@0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/cssstyle/-/cssstyle-0.2.3.tgz#fc6a0cb8c72b99f10b17d2028adcdb9bcc73cb49"
+  dependencies:
+    cssom "0.2.x"
+
+"cssstyle@>= 0.2.37 < 0.3.0":
+  version "0.2.37"
+  resolved "https://registry.yarnpkg.com/cssstyle/-/cssstyle-0.2.37.tgz#541097234cb2513c83ceed3acddc27ff27987d54"
+  dependencies:
+    cssom "0.3.x"
+
+ctype@0.5.3:
+  version "0.5.3"
+  resolved "https://registry.yarnpkg.com/ctype/-/ctype-0.5.3.tgz#82c18c2461f74114ef16c135224ad0b9144ca12f"
+
+currently-unhandled@^0.4.1:
+  version "0.4.1"
+  resolved "https://registry.yarnpkg.com/currently-unhandled/-/currently-unhandled-0.4.1.tgz#988df33feab191ef799a61369dd76c17adf957ea"
+  dependencies:
+    array-find-index "^1.0.1"
+
+custom-event@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/custom-event/-/custom-event-1.0.1.tgz#5d02a46850adf1b4a317946a3928fccb5bfd0425"
+
+dashdash@^1.12.0:
+  version "1.14.1"
+  resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0"
+  dependencies:
+    assert-plus "^1.0.0"
+
+date-utils@~1.2.17:
+  version "1.2.21"
+  resolved "https://registry.yarnpkg.com/date-utils/-/date-utils-1.2.21.tgz#61fb16cdc1274b3c9acaaffe9fc69df8720a2b64"
+
+dateformat@~1.0.6:
+  version "1.0.12"
+  resolved "https://registry.yarnpkg.com/dateformat/-/dateformat-1.0.12.tgz#9f124b67594c937ff706932e4a642cca8dbbfee9"
+  dependencies:
+    get-stdin "^4.0.1"
+    meow "^3.3.0"
+
+debug@*, debug@2.6.3, debug@^2.2.0:
+  version "2.6.3"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.3.tgz#0f7eb8c30965ec08c72accfa0130c8b79984141d"
+  dependencies:
+    ms "0.7.2"
+
+debug@0.7.4, debug@~0.7.2:
+  version "0.7.4"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-0.7.4.tgz#06e1ea8082c2cb14e39806e22e2f6f757f92af39"
+
+debug@2.2.0, debug@^2.1.1:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.2.0.tgz#f87057e995b1a1f6ae6a4960664137bc56f039da"
+  dependencies:
+    ms "0.7.1"
+
+debug@2.3.3:
+  version "2.3.3"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.3.3.tgz#40c453e67e6e13c901ddec317af8986cda9eff8c"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.1:
+  version "2.6.1"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.1.tgz#79855090ba2c4e3115cc7d8769491d58f0491351"
+  dependencies:
+    ms "0.7.2"
+
+decamelize@^1.0.0, decamelize@^1.1.2:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290"
+
+deep-eql@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/deep-eql/-/deep-eql-0.1.3.tgz#ef558acab8de25206cd713906d74e56930eb69f2"
+  dependencies:
+    type-detect "0.1.1"
+
+deep-extend@~0.4.0:
+  version "0.4.1"
+  resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.4.1.tgz#efe4113d08085f4e6f9687759810f807469e2253"
+
+deep-is@~0.1.2, deep-is@~0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.3.tgz#b369d6fb5dbc13eecf524f91b070feedc357cf34"
+
+defined@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/defined/-/defined-1.0.0.tgz#c98d9bcef75674188e110969151199e39b1fa693"
+
+defs@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/defs/-/defs-1.1.1.tgz#b22609f2c7a11ba7a3db116805c139b1caffa9d2"
+  dependencies:
+    alter "~0.2.0"
+    ast-traverse "~0.1.1"
+    breakable "~1.0.0"
+    esprima-fb "~15001.1001.0-dev-harmony-fb"
+    simple-fmt "~0.1.0"
+    simple-is "~0.2.0"
+    stringmap "~0.2.2"
+    stringset "~0.2.1"
+    tryor "~0.1.2"
+    yargs "~3.27.0"
+
+delayed-stream@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-0.0.5.tgz#d4b1f43a93e8296dfe02694f4680bc37a313c73f"
+
+delayed-stream@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619"
+
+delegates@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a"
+
+depd@1.1.0, depd@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.0.tgz#e1bd82c6aab6ced965b97b88b17ed3e528ca18c3"
+
+detect-indent@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/detect-indent/-/detect-indent-3.0.1.tgz#9dc5e5ddbceef8325764b9451b02bc6d54084f75"
+  dependencies:
+    get-stdin "^4.0.1"
+    minimist "^1.1.0"
+    repeating "^1.1.0"
+
+detect-indent@^4.0.0:
+  version "4.0.0"
+  resolved "https://registry.yarnpkg.com/detect-indent/-/detect-indent-4.0.0.tgz#f76d064352cdf43a1cb6ce619c4ee3a9475de208"
+  dependencies:
+    repeating "^2.0.0"
+
+detective@^4.3.1:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/detective/-/detective-4.5.0.tgz#6e5a8c6b26e6c7a254b1c6b6d7490d98ec91edd1"
+  dependencies:
+    acorn "^4.0.3"
+    defined "^1.0.0"
+
+di@^0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/di/-/di-0.0.1.tgz#806649326ceaa7caa3306d75d985ea2748ba913c"
+
+diff@1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/diff/-/diff-1.4.0.tgz#7f28d2eb9ee7b15a97efd89ce63dcfdaa3ccbabf"
+
+dom-serialize@^2.2.0:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/dom-serialize/-/dom-serialize-2.2.1.tgz#562ae8999f44be5ea3076f5419dcd59eb43ac95b"
+  dependencies:
+    custom-event "~1.0.0"
+    ent "~2.2.0"
+    extend "^3.0.0"
+    void-elements "^2.0.0"
+
+ecc-jsbn@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.1.tgz#0fc73a9ed5f0d53c38193398523ef7e543777505"
+  dependencies:
+    jsbn "~0.1.0"
+
+ee-first@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d"
+
+ember-precompile-brunch@^0.1.1:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/ember-precompile-brunch/-/ember-precompile-brunch-0.1.2.tgz#54e73d5054a35afde79aa1c1b454f6ed0e37611c"
+  dependencies:
+    coffee-script "1.4.0"
+    jsdom "^9.0.0"
+
+ember-radio-button@0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/ember-radio-button/-/ember-radio-button-0.1.2.tgz#6cbf20977ec2ea741ba361e5f4295b1d804b7c19"
+
+encodeurl@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.1.tgz#79e3d58655346909fe6f0f45a5de68103b294d20"
+
+engine.io-client@1.8.3:
+  version "1.8.3"
+  resolved "https://registry.yarnpkg.com/engine.io-client/-/engine.io-client-1.8.3.tgz#1798ed93451246453d4c6f635d7a201fe940d5ab"
+  dependencies:
+    component-emitter "1.2.1"
+    component-inherit "0.0.3"
+    debug "2.3.3"
+    engine.io-parser "1.3.2"
+    has-cors "1.1.0"
+    indexof "0.0.1"
+    parsejson "0.0.3"
+    parseqs "0.0.5"
+    parseuri "0.0.5"
+    ws "1.1.2"
+    xmlhttprequest-ssl "1.5.3"
+    yeast "0.1.2"
+
+engine.io-parser@1.3.2:
+  version "1.3.2"
+  resolved "https://registry.yarnpkg.com/engine.io-parser/-/engine.io-parser-1.3.2.tgz#937b079f0007d0893ec56d46cb220b8cb435220a"
+  dependencies:
+    after "0.8.2"
+    arraybuffer.slice "0.0.6"
+    base64-arraybuffer "0.1.5"
+    blob "0.0.4"
+    has-binary "0.1.7"
+    wtf-8 "1.0.0"
+
+engine.io@1.8.3:
+  version "1.8.3"
+  resolved "https://registry.yarnpkg.com/engine.io/-/engine.io-1.8.3.tgz#8de7f97895d20d39b85f88eeee777b2bd42b13d4"
+  dependencies:
+    accepts "1.3.3"
+    base64id "1.0.0"
+    cookie "0.3.1"
+    debug "2.3.3"
+    engine.io-parser "1.3.2"
+    ws "1.1.2"
+
+ent@~2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/ent/-/ent-2.2.0.tgz#e964219325a21d05f44466a2f686ed6ce5f5dd1d"
+
+error-ex@^1.2.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.1.tgz#f855a86ce61adc4e8621c3cda21e7a7612c3a8dc"
+  dependencies:
+    is-arrayish "^0.2.1"
+
+es6-promise@~4.0.3:
+  version "4.0.5"
+  resolved "https://registry.yarnpkg.com/es6-promise/-/es6-promise-4.0.5.tgz#7882f30adde5b240ccfa7f7d78c548330951ae42"
+
+escape-html@~1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988"
+
+escape-string-regexp@1.0.2, escape-string-regexp@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.2.tgz#4dbc2fe674e71949caf3fb2695ce7f2dc1d9a8d1"
+
+escodegen@1.7.x:
+  version "1.7.1"
+  resolved "https://registry.yarnpkg.com/escodegen/-/escodegen-1.7.1.tgz#30ecfcf66ca98dc67cd2fd162abeb6eafa8ce6fc"
+  dependencies:
+    esprima "^1.2.2"
+    estraverse "^1.9.1"
+    esutils "^2.0.2"
+    optionator "^0.5.0"
+  optionalDependencies:
+    source-map "~0.2.0"
+
+escodegen@^1.6.1:
+  version "1.8.1"
+  resolved "https://registry.yarnpkg.com/escodegen/-/escodegen-1.8.1.tgz#5a5b53af4693110bebb0867aa3430dd3b70a1018"
+  dependencies:
+    esprima "^2.7.1"
+    estraverse "^1.9.1"
+    esutils "^2.0.2"
+    optionator "^0.8.1"
+  optionalDependencies:
+    source-map "~0.2.0"
+
+esprima-fb@~15001.1001.0-dev-harmony-fb:
+  version "15001.1001.0-dev-harmony-fb"
+  resolved "https://registry.yarnpkg.com/esprima-fb/-/esprima-fb-15001.1001.0-dev-harmony-fb.tgz#43beb57ec26e8cf237d3dd8b33e42533577f2659"
+
+esprima@1.2.x, esprima@^1.2.2:
+  version "1.2.5"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-1.2.5.tgz#0993502feaf668138325756f30f9a51feeec11e9"
+
+esprima@2.5.x:
+  version "2.5.0"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-2.5.0.tgz#f387a46fd344c1b1a39baf8c20bfb43b6d0058cc"
+
+esprima@^2.6.0, esprima@^2.7.1:
+  version "2.7.3"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-2.7.3.tgz#96e3b70d5779f6ad49cd032673d1c312767ba581"
+
+esprima@^3.1.1, esprima@~3.1.0:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-3.1.3.tgz#fdca51cee6133895e3c88d535ce49dbff62a4633"
+
+estraverse@^1.9.1:
+  version "1.9.3"
+  resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-1.9.3.tgz#af67f2dc922582415950926091a4005d29c9bb44"
+
+estraverse@~1.8.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-1.8.0.tgz#3f1264fb62c8500dbae5e4f73705cd576d6af428"
+
+esutils@^2.0.0, esutils@^2.0.2:
+  version "2.0.2"
+  resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.2.tgz#0abf4f1caa5bcb1f7a9d8acc6dea4faaa04bac9b"
+
+eventemitter3@1.x.x:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-1.2.0.tgz#1c86991d816ad1e504750e73874224ecf3bec508"
+
+expand-braces@^0.1.1:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/expand-braces/-/expand-braces-0.1.2.tgz#488b1d1d2451cb3d3a6b192cfc030f44c5855fea"
+  dependencies:
+    array-slice "^0.2.3"
+    array-unique "^0.2.1"
+    braces "^0.1.2"
+
+expand-brackets@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-0.1.5.tgz#df07284e342a807cd733ac5af72411e581d1177b"
+  dependencies:
+    is-posix-bracket "^0.1.0"
+
+expand-range@^0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/expand-range/-/expand-range-0.1.1.tgz#4cb8eda0993ca56fa4f41fc42f3cbb4ccadff044"
+  dependencies:
+    is-number "^0.1.1"
+    repeat-string "^0.2.2"
+
+expand-range@^1.8.1:
+  version "1.8.2"
+  resolved "https://registry.yarnpkg.com/expand-range/-/expand-range-1.8.2.tgz#a299effd335fe2721ebae8e257ec79644fc85337"
+  dependencies:
+    fill-range "^2.1.0"
+
+express@2.5.8:
+  version "2.5.8"
+  resolved "https://registry.yarnpkg.com/express/-/express-2.5.8.tgz#f166b55d4e8c6d2307ef88ad1768209613f7452a"
+  dependencies:
+    connect "1.x"
+    mime "1.2.4"
+    mkdirp "0.3.0"
+    qs "0.4.x"
+
+express@~3.3.0:
+  version "3.3.8"
+  resolved "https://registry.yarnpkg.com/express/-/express-3.3.8.tgz#8e98ac30d81f4c95b85d71d2af6cf84f62ef19bd"
+  dependencies:
+    buffer-crc32 "0.2.1"
+    commander "1.2.0"
+    connect "2.8.8"
+    cookie "0.1.0"
+    cookie-signature "1.0.1"
+    debug "*"
+    fresh "0.2.0"
+    methods "0.0.1"
+    mkdirp "0.3.5"
+    range-parser "0.0.4"
+    send "0.1.4"
+
+extend@^3.0.0, extend@~3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.0.tgz#5a474353b9f3353ddd8176dfd37b91c83a46f1d4"
+
+extglob@^0.3.1:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/extglob/-/extglob-0.3.2.tgz#2e18ff3d2f49ab2765cec9023f011daa8d8349a1"
+  dependencies:
+    is-extglob "^1.0.0"
+
+extract-zip@~1.5.0:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/extract-zip/-/extract-zip-1.5.0.tgz#92ccf6d81ef70a9fa4c1747114ccef6d8688a6c4"
+  dependencies:
+    concat-stream "1.5.0"
+    debug "0.7.4"
+    mkdirp "0.5.0"
+    yauzl "2.4.1"
+
+extsprintf@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.0.2.tgz#e1080e0658e300b06294990cc70e1502235fd550"
+
+fast-levenshtein@~1.0.0:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-1.0.7.tgz#0178dcdee023b92905193af0959e8a7639cfdcb9"
+
+fast-levenshtein@~2.0.4:
+  version "2.0.6"
+  resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917"
+
+fd-slicer@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/fd-slicer/-/fd-slicer-1.0.1.tgz#8b5bcbd9ec327c5041bf9ab023fd6750f1177e65"
+  dependencies:
+    pend "~1.2.0"
+
+filename-regex@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/filename-regex/-/filename-regex-2.0.0.tgz#996e3e80479b98b9897f15a8a58b3d084e926775"
+
+fileset@0.1.x:
+  version "0.1.8"
+  resolved "https://registry.yarnpkg.com/fileset/-/fileset-0.1.8.tgz#506b91a9396eaa7e32fb42a84077c7a0c736b741"
+  dependencies:
+    glob "3.x"
+    minimatch "0.x"
+
+fileset@0.2.x:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/fileset/-/fileset-0.2.1.tgz#588ef8973c6623b2a76df465105696b96aac8067"
+  dependencies:
+    glob "5.x"
+    minimatch "2.x"
+
+fill-range@^2.1.0:
+  version "2.2.3"
+  resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-2.2.3.tgz#50b77dfd7e469bc7492470963699fe7a8485a723"
+  dependencies:
+    is-number "^2.1.0"
+    isobject "^2.0.0"
+    randomatic "^1.1.3"
+    repeat-element "^1.1.2"
+    repeat-string "^1.5.2"
+
+finalhandler@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.0.1.tgz#bcd15d1689c0e5ed729b6f7f541a6df984117db8"
+  dependencies:
+    debug "2.6.3"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    statuses "~1.3.1"
+    unpipe "~1.0.0"
+
+find-up@^1.0.0:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/find-up/-/find-up-1.1.2.tgz#6b2e9822b1a2ce0a60ab64d610eccad53cb24d0f"
+  dependencies:
+    path-exists "^2.0.0"
+    pinkie-promise "^2.0.0"
+
+for-in@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80"
+
+for-own@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/for-own/-/for-own-0.1.5.tgz#5265c681a4f294dabbf17c9509b6763aa84510ce"
+  dependencies:
+    for-in "^1.0.1"
+
+forever-agent@~0.5.0:
+  version "0.5.2"
+  resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.5.2.tgz#6d0e09c4921f94a27f63d3b49c5feff1ea4c5130"
+
+forever-agent@~0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91"
+
+form-data@~0.1.0:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-0.1.4.tgz#91abd788aba9702b1aabfa8bc01031a2ac9e3b12"
+  dependencies:
+    async "~0.9.0"
+    combined-stream "~0.0.4"
+    mime "~1.2.11"
+
+form-data@~1.0.0-rc3:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-1.0.1.tgz#ae315db9a4907fa065502304a66d7733475ee37c"
+  dependencies:
+    async "^2.0.1"
+    combined-stream "^1.0.5"
+    mime-types "^2.1.11"
+
+form-data@~2.1.1:
+  version "2.1.4"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.1.4.tgz#33c183acf193276ecaa98143a69e94bfee1750d1"
+  dependencies:
+    asynckit "^0.4.0"
+    combined-stream "^1.0.5"
+    mime-types "^2.1.12"
+
+formidable@1.0.14:
+  version "1.0.14"
+  resolved "https://registry.yarnpkg.com/formidable/-/formidable-1.0.14.tgz#2b3f4c411cbb5fdd695c44843e2a23514a43231a"
+
+formidable@1.0.x:
+  version "1.0.17"
+  resolved "https://registry.yarnpkg.com/formidable/-/formidable-1.0.17.tgz#ef5491490f9433b705faa77249c99029ae348559"
+
+fresh@0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.2.0.tgz#bfd9402cf3df12c4a4c310c79f99a3dde13d34a7"
+
+fs-extra@~0.26.4:
+  version "0.26.7"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.26.7.tgz#9ae1fdd94897798edab76d0918cf42d0c3184fa9"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    klaw "^1.0.0"
+    path-is-absolute "^1.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@~0.6.3:
+  version "0.6.4"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.6.4.tgz#f46f0c75b7841f8d200b3348cd4d691d5a099d15"
+  dependencies:
+    jsonfile "~1.0.1"
+    mkdirp "0.3.x"
+    ncp "~0.4.2"
+    rimraf "~2.2.0"
+
+fs-extra@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-1.0.0.tgz#cd3ce5f7e7cb6145883fcae3191e9877f8587950"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    klaw "^1.0.0"
+
+fs-readdir-recursive@^0.1.0:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/fs-readdir-recursive/-/fs-readdir-recursive-0.1.2.tgz#315b4fb8c1ca5b8c47defef319d073dad3568059"
+
+fs.realpath@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f"
+
+fsevents@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-1.1.1.tgz#f19fd28f43eeaf761680e519a203c4d0b3d31aff"
+  dependencies:
+    nan "^2.3.0"
+    node-pre-gyp "^0.6.29"
+
+fsevents@~0.3.1:
+  version "0.3.8"
+  resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-0.3.8.tgz#9992f1032c925c829554d0d59801dca0313a5356"
+  dependencies:
+    nan "^2.0.2"
+
+fstream-ignore@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/fstream-ignore/-/fstream-ignore-1.0.5.tgz#9c31dae34767018fe1d249b24dada67d092da105"
+  dependencies:
+    fstream "^1.0.0"
+    inherits "2"
+    minimatch "^3.0.0"
+
+fstream@^1.0.0, fstream@^1.0.10, fstream@^1.0.2:
+  version "1.0.11"
+  resolved "https://registry.yarnpkg.com/fstream/-/fstream-1.0.11.tgz#5c1fb1f117477114f0632a0eb4b71b3cb0fd3171"
+  dependencies:
+    graceful-fs "^4.1.2"
+    inherits "~2.0.0"
+    mkdirp ">=0.5 0"
+    rimraf "2"
+
+gauge@~2.7.1:
+  version "2.7.3"
+  resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.3.tgz#1c23855f962f17b3ad3d0dc7443f304542edfe09"
+  dependencies:
+    aproba "^1.0.3"
+    console-control-strings "^1.0.0"
+    has-unicode "^2.0.0"
+    object-assign "^4.1.0"
+    signal-exit "^3.0.0"
+    string-width "^1.0.1"
+    strip-ansi "^3.0.1"
+    wide-align "^1.1.0"
+
+generate-function@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/generate-function/-/generate-function-2.0.0.tgz#6858fe7c0969b7d4e9093337647ac79f60dfbe74"
+
+generate-object-property@^1.1.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/generate-object-property/-/generate-object-property-1.2.0.tgz#9c0e1c40308ce804f4783618b937fa88f99d50d0"
+  dependencies:
+    is-property "^1.0.0"
+
+get-stdin@^4.0.1:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-4.0.1.tgz#b968c6b0a04384324902e8bf1a5df32579a450fe"
+
+getpass@^0.1.1:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.6.tgz#283ffd9fc1256840875311c1b60e8c40187110e6"
+  dependencies:
+    assert-plus "^1.0.0"
+
+glob-base@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/glob-base/-/glob-base-0.3.0.tgz#dbb164f6221b1c0b1ccf82aea328b497df0ea3c4"
+  dependencies:
+    glob-parent "^2.0.0"
+    is-glob "^2.0.0"
+
+glob-parent@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-2.0.0.tgz#81383d72db054fcccf5336daa902f182f6edbb28"
+  dependencies:
+    is-glob "^2.0.0"
+
+glob@3.2.11, glob@3.x, glob@~3.2.3:
+  version "3.2.11"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-3.2.11.tgz#4a973f635b9190f715d10987d5c00fd2815ebe3d"
+  dependencies:
+    inherits "2"
+    minimatch "0.3"
+
+glob@5.x, glob@^5.0.15:
+  version "5.0.15"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.15.tgz#1bc936b9e02f4a603fcc222ecf7633d30b8b93b1"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "2 || 3"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@^7.0.5, glob@^7.1.1:
+  version "7.1.1"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.1.tgz#805211df04faaf1c63a3600306cdf5ade50b2ec8"
+  dependencies:
+    fs.realpath "^1.0.0"
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^3.0.2"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+globals@^6.4.0:
+  version "6.4.1"
+  resolved "https://registry.yarnpkg.com/globals/-/globals-6.4.1.tgz#8498032b3b6d1cc81eebc5f79690d8fe29fabf4f"
+
+globals@^9.0.0:
+  version "9.17.0"
+  resolved "https://registry.yarnpkg.com/globals/-/globals-9.17.0.tgz#0c0ca696d9b9bb694d2e5470bd37777caad50286"
+
+graceful-fs@^4.1.2, graceful-fs@^4.1.4, graceful-fs@^4.1.6, graceful-fs@^4.1.9:
+  version "4.1.11"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.11.tgz#0e8bdfe4d1ddb8854d64e04ea7c00e2a026e5658"
+
+graceful-fs@~2.0.0:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-2.0.3.tgz#7cd2cdb228a4a3f36e95efa6cc142de7d1a136d0"
+
+"graceful-readlink@>= 1.0.0":
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/graceful-readlink/-/graceful-readlink-1.0.1.tgz#4cafad76bc62f02fa039b2f94e9a3dd3a391a725"
+
+growl@1.9.2:
+  version "1.9.2"
+  resolved "https://registry.yarnpkg.com/growl/-/growl-1.9.2.tgz#0ea7743715db8d8de2c5ede1775e1b45ac85c02f"
+
+growl@~1.8.1:
+  version "1.8.1"
+  resolved "https://registry.yarnpkg.com/growl/-/growl-1.8.1.tgz#4b2dec8d907e93db336624dcec0183502f8c9428"
+
+handlebars@^4.0.1:
+  version "4.0.6"
+  resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-4.0.6.tgz#2ce4484850537f9c97a8026d5399b935c4ed4ed7"
+  dependencies:
+    async "^1.4.0"
+    optimist "^0.6.1"
+    source-map "^0.4.4"
+  optionalDependencies:
+    uglify-js "^2.6"
+
+har-schema@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-1.0.5.tgz#d263135f43307c02c602afc8fe95970c0151369e"
+
+har-validator@~2.0.2, har-validator@~2.0.6:
+  version "2.0.6"
+  resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-2.0.6.tgz#cdcbc08188265ad119b6a5a7c8ab70eecfb5d27d"
+  dependencies:
+    chalk "^1.1.1"
+    commander "^2.9.0"
+    is-my-json-valid "^2.12.4"
+    pinkie-promise "^2.0.0"
+
+har-validator@~4.2.1:
+  version "4.2.1"
+  resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-4.2.1.tgz#33481d0f1bbff600dd203d75812a6a5fba002e2a"
+  dependencies:
+    ajv "^4.9.1"
+    har-schema "^1.0.5"
+
+has-ansi@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91"
+  dependencies:
+    ansi-regex "^2.0.0"
+
+has-binary@0.1.7:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/has-binary/-/has-binary-0.1.7.tgz#68e61eb16210c9545a0a5cce06a873912fe1e68c"
+  dependencies:
+    isarray "0.0.1"
+
+has-cors@1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/has-cors/-/has-cors-1.1.0.tgz#5e474793f7ea9843d1bb99c23eef49ff126fff39"
+
+has-flag@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-1.0.0.tgz#9d9e793165ce017a00f00418c43f942a7b1d11fa"
+
+has-unicode@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9"
+
+hasha@^2.2.0, hasha@~2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/hasha/-/hasha-2.2.0.tgz#78d7cbfc1e6d66303fe79837365984517b2f6ee1"
+  dependencies:
+    is-stream "^1.0.1"
+    pinkie-promise "^2.0.0"
+
+hawk@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/hawk/-/hawk-1.0.0.tgz#b90bb169807285411da7ffcb8dd2598502d3b52d"
+  dependencies:
+    boom "0.4.x"
+    cryptiles "0.2.x"
+    hoek "0.9.x"
+    sntp "0.2.x"
+
+hawk@~3.1.0, hawk@~3.1.3:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/hawk/-/hawk-3.1.3.tgz#078444bd7c1640b0fe540d2c9b73d59678e8e1c4"
+  dependencies:
+    boom "2.x.x"
+    cryptiles "2.x.x"
+    hoek "2.x.x"
+    sntp "1.x.x"
+
+hoek@0.9.x:
+  version "0.9.1"
+  resolved "https://registry.yarnpkg.com/hoek/-/hoek-0.9.1.tgz#3d322462badf07716ea7eb85baf88079cddce505"
+
+hoek@2.x.x:
+  version "2.16.3"
+  resolved "https://registry.yarnpkg.com/hoek/-/hoek-2.16.3.tgz#20bb7403d3cea398e91dc4710a8ff1b8274a25ed"
+
+home-or-tmp@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/home-or-tmp/-/home-or-tmp-1.0.0.tgz#4b9f1e40800c3e50c6c27f781676afcce71f3985"
+  dependencies:
+    os-tmpdir "^1.0.1"
+    user-home "^1.1.1"
+
+home-or-tmp@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/home-or-tmp/-/home-or-tmp-2.0.0.tgz#e36c3f2d2cae7d746a857e38d18d5f32a7882db8"
+  dependencies:
+    os-homedir "^1.0.0"
+    os-tmpdir "^1.0.1"
+
+hosted-git-info@^2.1.4:
+  version "2.4.2"
+  resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.4.2.tgz#0076b9f46a270506ddbaaea56496897460612a67"
+
+html-encoding-sniffer@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/html-encoding-sniffer/-/html-encoding-sniffer-1.0.1.tgz#79bf7a785ea495fe66165e734153f363ff5437da"
+  dependencies:
+    whatwg-encoding "^1.0.1"
+
+http-errors@~1.6.1:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.6.1.tgz#5f8b8ed98aca545656bf572997387f904a722257"
+  dependencies:
+    depd "1.1.0"
+    inherits "2.0.3"
+    setprototypeof "1.0.3"
+    statuses ">= 1.3.1 < 2"
+
+http-proxy@^1.13.0:
+  version "1.16.2"
+  resolved "https://registry.yarnpkg.com/http-proxy/-/http-proxy-1.16.2.tgz#06dff292952bf64dbe8471fa9df73066d4f37742"
+  dependencies:
+    eventemitter3 "1.x.x"
+    requires-port "1.x.x"
+
+http-signature@~0.10.0:
+  version "0.10.1"
+  resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-0.10.1.tgz#4fbdac132559aa8323121e540779c0a012b27e66"
+  dependencies:
+    asn1 "0.1.11"
+    assert-plus "^0.1.5"
+    ctype "0.5.3"
+
+http-signature@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.1.1.tgz#df72e267066cd0ac67fb76adf8e134a8fbcf91bf"
+  dependencies:
+    assert-plus "^0.2.0"
+    jsprim "^1.2.2"
+    sshpk "^1.7.0"
+
+ibrik@~2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/ibrik/-/ibrik-2.0.0.tgz#89a2434f2a5c82b92166c3d97de3b5636eea2e9c"
+  dependencies:
+    coffee-script "~1.8.0"
+    esprima "1.2.x"
+    estraverse "~1.8.0"
+    fileset "0.1.x"
+    istanbul "~0.3.2"
+    lodash "~2.4.1"
+    mkdirp "~0.5.0"
+    optimist "~0.6.1"
+    which "~1.0.5"
+
+iconv-lite@0.4.13:
+  version "0.4.13"
+  resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.13.tgz#1f88aba4ab0b1508e8312acc39345f36e992e2f2"
+
+iconv-lite@0.4.15, iconv-lite@^0.4.5:
+  version "0.4.15"
+  resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.15.tgz#fe265a218ac6a57cfe854927e9d04c19825eddeb"
+
+indent-string@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-2.1.0.tgz#8e2d48348742121b4a8218b7a137e9a52049dc80"
+  dependencies:
+    repeating "^2.0.0"
+
+indexof@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/indexof/-/indexof-0.0.1.tgz#82dc336d232b9062179d05ab3293a66059fd435d"
+
+inflight@^1.0.4:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9"
+  dependencies:
+    once "^1.3.0"
+    wrappy "1"
+
+inherits@2, inherits@2.0.3, inherits@^2.0.1, inherits@~2.0.0, inherits@~2.0.1:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de"
+
+inherits@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/inherits/-/inherits-1.0.2.tgz#ca4309dadee6b54cc0b8d247e8d7c7a0975bdc9b"
+
+ini@^1.3.4, ini@~1.3.0:
+  version "1.3.4"
+  resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.4.tgz#0537cb79daf59b59a1a517dff706c86ec039162e"
+
+ini@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/ini/-/ini-1.1.0.tgz#4e808c2ce144c6c1788918e034d6797bc6cf6281"
+
+init-skeleton@~0.2.0:
+  version "0.2.4"
+  resolved "https://registry.yarnpkg.com/init-skeleton/-/init-skeleton-0.2.4.tgz#82655737a62d3b3b0153371c5847209132171863"
+  dependencies:
+    commander "~2.0.0"
+    loggy "~0.2.0"
+    mkdirp "~0.3.5"
+    ncp "~0.4.2"
+    rimraf "~2.2.1"
+
+invariant@^2.2.0:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.2.tgz#9e1f56ac0acdb6bf303306f338be3b204ae60360"
+  dependencies:
+    loose-envify "^1.0.0"
+
+invert-kv@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/invert-kv/-/invert-kv-1.0.0.tgz#104a8e4aaca6d3d8cd157a8ef8bfab2d7a3ffdb6"
+
+is-arrayish@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d"
+
+is-binary-path@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/is-binary-path/-/is-binary-path-1.0.1.tgz#75f16642b480f187a711c814161fd3a4a7655898"
+  dependencies:
+    binary-extensions "^1.0.0"
+
+is-buffer@^1.0.2:
+  version "1.1.5"
+  resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.5.tgz#1f3b26ef613b214b88cbca23cc6c01d87961eecc"
+
+is-builtin-module@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-builtin-module/-/is-builtin-module-1.0.0.tgz#540572d34f7ac3119f8f76c30cbc1b1e037affbe"
+  dependencies:
+    builtin-modules "^1.0.0"
+
+is-dotfile@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-dotfile/-/is-dotfile-1.0.2.tgz#2c132383f39199f8edc268ca01b9b007d205cc4d"
+
+is-equal-shallow@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/is-equal-shallow/-/is-equal-shallow-0.1.3.tgz#2238098fc221de0bcfa5d9eac4c45d638aa1c534"
+  dependencies:
+    is-primitive "^2.0.0"
+
+is-extendable@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89"
+
+is-extglob@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-1.0.0.tgz#ac468177c4943405a092fc8f29760c6ffc6206c0"
+
+is-finite@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-finite/-/is-finite-1.0.2.tgz#cc6677695602be550ef11e8b4aa6305342b6d0aa"
+  dependencies:
+    number-is-nan "^1.0.0"
+
+is-fullwidth-code-point@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb"
+  dependencies:
+    number-is-nan "^1.0.0"
+
+is-glob@^2.0.0, is-glob@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-2.0.1.tgz#d096f926a3ded5600f3fdfd91198cb0888c2d863"
+  dependencies:
+    is-extglob "^1.0.0"
+
+is-integer@^1.0.4:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/is-integer/-/is-integer-1.0.6.tgz#5273819fada880d123e1ac00a938e7172dd8d95e"
+  dependencies:
+    is-finite "^1.0.0"
+
+is-my-json-valid@^2.12.4:
+  version "2.16.0"
+  resolved "https://registry.yarnpkg.com/is-my-json-valid/-/is-my-json-valid-2.16.0.tgz#f079dd9bfdae65ee2038aae8acbc86ab109e3693"
+  dependencies:
+    generate-function "^2.0.0"
+    generate-object-property "^1.1.0"
+    jsonpointer "^4.0.0"
+    xtend "^4.0.0"
+
+is-number@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/is-number/-/is-number-0.1.1.tgz#69a7af116963d47206ec9bd9b48a14216f1e3806"
+
+is-number@^2.0.2, is-number@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/is-number/-/is-number-2.1.0.tgz#01fcbbb393463a548f2f466cce16dece49db908f"
+  dependencies:
+    kind-of "^3.0.2"
+
+is-posix-bracket@^0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/is-posix-bracket/-/is-posix-bracket-0.1.1.tgz#3334dc79774368e92f016e6fbc0a88f5cd6e6bc4"
+
+is-primitive@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/is-primitive/-/is-primitive-2.0.0.tgz#207bab91638499c07b2adf240a41a87210034575"
+
+is-property@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-property/-/is-property-1.0.2.tgz#57fe1c4e48474edd65b09911f26b1cd4095dda84"
+
+is-stream@^1.0.1:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-1.1.0.tgz#12d4a3dd4e68e0b79ceb8dbc84173ae80d91ca44"
+
+is-typedarray@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a"
+
+is-utf8@^0.2.0:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/is-utf8/-/is-utf8-0.2.1.tgz#4b0da1442104d1b336340e80797e865cf39f7d72"
+
+isarray@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf"
+
+isarray@1.0.0, isarray@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11"
+
+isbinaryfile@^3.0.0:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/isbinaryfile/-/isbinaryfile-3.0.2.tgz#4a3e974ec0cba9004d3fc6cde7209ea69368a621"
+
+isexe@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10"
+
+isobject@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/isobject/-/isobject-2.1.0.tgz#f065561096a3f1da2ef46272f815c840d87e0c89"
+  dependencies:
+    isarray "1.0.0"
+
+isstream@~0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a"
+
+istanbul@~0.3.0, istanbul@~0.3.2:
+  version "0.3.22"
+  resolved "https://registry.yarnpkg.com/istanbul/-/istanbul-0.3.22.tgz#3e164d85021fe19c985d1f0e7ef0c3e22d012eb6"
+  dependencies:
+    abbrev "1.0.x"
+    async "1.x"
+    escodegen "1.7.x"
+    esprima "2.5.x"
+    fileset "0.2.x"
+    handlebars "^4.0.1"
+    js-yaml "3.x"
+    mkdirp "0.5.x"
+    nopt "3.x"
+    once "1.x"
+    resolve "1.1.x"
+    supports-color "^3.1.0"
+    which "^1.1.1"
+    wordwrap "^1.0.0"
+
+jade@0.26.3:
+  version "0.26.3"
+  resolved "https://registry.yarnpkg.com/jade/-/jade-0.26.3.tgz#8f10d7977d8d79f2f6ff862a81b0513ccb25686c"
+  dependencies:
+    commander "0.6.1"
+    mkdirp "0.3.0"
+
+"javascript-brunch@>= 1.0 < 1.5":
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/javascript-brunch/-/javascript-brunch-1.3.0.tgz#c72067f5f6971c6e7e636a139e5fab8f88339b83"
+  dependencies:
+    coffee-script "1.3.3"
+
+jodid25519@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/jodid25519/-/jodid25519-1.0.2.tgz#06d4912255093419477d425633606e0e90782967"
+  dependencies:
+    jsbn "~0.1.0"
+
+js-tokens@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-1.0.1.tgz#cc435a5c8b94ad15acb7983140fc80182c89aeae"
+
+js-tokens@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.1.tgz#08e9f132484a2c45a30907e9dc4d5567b7f114d7"
+
+js-yaml@3.x:
+  version "3.8.3"
+  resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.8.3.tgz#33a05ec481c850c8875929166fe1beb61c728766"
+  dependencies:
+    argparse "^1.0.7"
+    esprima "^3.1.1"
+
+jsbn@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513"
+
+jsdom@^9.0.0:
+  version "9.12.0"
+  resolved "https://registry.yarnpkg.com/jsdom/-/jsdom-9.12.0.tgz#e8c546fffcb06c00d4833ca84410fed7f8a097d4"
+  dependencies:
+    abab "^1.0.3"
+    acorn "^4.0.4"
+    acorn-globals "^3.1.0"
+    array-equal "^1.0.0"
+    content-type-parser "^1.0.1"
+    cssom ">= 0.3.2 < 0.4.0"
+    cssstyle ">= 0.2.37 < 0.3.0"
+    escodegen "^1.6.1"
+    html-encoding-sniffer "^1.0.1"
+    nwmatcher ">= 1.3.9 < 2.0.0"
+    parse5 "^1.5.1"
+    request "^2.79.0"
+    sax "^1.2.1"
+    symbol-tree "^3.2.1"
+    tough-cookie "^2.3.2"
+    webidl-conversions "^4.0.0"
+    whatwg-encoding "^1.0.1"
+    whatwg-url "^4.3.0"
+    xml-name-validator "^2.0.1"
+
+jsesc@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-1.3.0.tgz#46c3fec8c1892b12b0833db9bc7622176dbab34b"
+
+jsesc@~0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-0.5.0.tgz#e7dee66e35d6fc16f710fe91d5cf69f70f08911d"
+
+json-schema@0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.2.3.tgz#b480c892e59a2f05954ce727bd3f2a4e882f9e13"
+
+json-stable-stringify@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/json-stable-stringify/-/json-stable-stringify-1.0.1.tgz#9a759d39c5f2ff503fd5300646ed445f88c4f9af"
+  dependencies:
+    jsonify "~0.0.0"
+
+json-stringify-safe@~5.0.0, json-stringify-safe@~5.0.1:
+  version "5.0.1"
+  resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb"
+
+json3@3.3.2:
+  version "3.3.2"
+  resolved "https://registry.yarnpkg.com/json3/-/json3-3.3.2.tgz#3c0434743df93e2f5c42aee7b19bcb483575f4e1"
+
+json5@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/json5/-/json5-0.4.0.tgz#054352e4c4c80c86c0923877d449de176a732c8d"
+
+json5@^0.5.0:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/json5/-/json5-0.5.1.tgz#1eade7acc012034ad84e2396767ead9fa5495821"
+
+jsonfile@^2.1.0:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-2.4.0.tgz#3736a2b428b87bbda0cc83b53fa3d633a35c2ae8"
+  optionalDependencies:
+    graceful-fs "^4.1.6"
+
+jsonfile@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-1.0.1.tgz#ea5efe40b83690b98667614a7392fc60e842c0dd"
+
+jsonify@~0.0.0:
+  version "0.0.0"
+  resolved "https://registry.yarnpkg.com/jsonify/-/jsonify-0.0.0.tgz#2c74b6ee41d93ca51b7b5aaee8f503631d252a73"
+
+jsonpointer@^4.0.0:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/jsonpointer/-/jsonpointer-4.0.1.tgz#4fd92cb34e0e9db3c89c8622ecf51f9b978c6cb9"
+
+jsprim@^1.2.2:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.0.tgz#a3b87e40298d8c380552d8cc7628a0bb95a22918"
+  dependencies:
+    assert-plus "1.0.0"
+    extsprintf "1.0.2"
+    json-schema "0.2.3"
+    verror "1.3.6"
+
+karma-babel-preprocessor@^6.0.1:
+  version "6.0.1"
+  resolved "https://registry.yarnpkg.com/karma-babel-preprocessor/-/karma-babel-preprocessor-6.0.1.tgz#7ae1d3e64950dbe11f421b74040ab08fb5a66c21"
+  dependencies:
+    babel-core "^6.0.0"
+
+karma-chai@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/karma-chai/-/karma-chai-0.1.0.tgz#bee5ad40400517811ae34bb945f762909108b79a"
+
+karma-commonjs-require@~0.0.1:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/karma-commonjs-require/-/karma-commonjs-require-0.0.3.tgz#3b8426535b84b4635ec11fc238f5fe4b24ec0241"
+  dependencies:
+    commonjs-require-definition "~0.1.0"
+
+karma-coverage@~0.2.0:
+  version "0.2.7"
+  resolved "https://registry.yarnpkg.com/karma-coverage/-/karma-coverage-0.2.7.tgz#f76740b275bbf30a0ab9f41d8cf56843a0994576"
+  dependencies:
+    dateformat "~1.0.6"
+    ibrik "~2.0.0"
+    istanbul "~0.3.0"
+    minimatch "~0.3.0"
+
+karma-ember-precompiler-brunch@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/karma-ember-precompiler-brunch/-/karma-ember-precompiler-brunch-1.0.0.tgz#d7753a71c47d2de5f930f10da1262b602303b770"
+  dependencies:
+    jsdom "^9.0.0"
+
+karma-mocha@0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/karma-mocha/-/karma-mocha-0.1.1.tgz#5edddb3e2d2c6fd47e9ad26f0af1595fe852c756"
+
+karma-phantomjs-launcher@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/karma-phantomjs-launcher/-/karma-phantomjs-launcher-1.0.2.tgz#19e1041498fd75563ed86730a22c1fe579fa8fb1"
+  dependencies:
+    lodash "^4.0.1"
+    phantomjs-prebuilt "^2.1.7"
+
+karma-sinon@~1.0.2:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/karma-sinon/-/karma-sinon-1.0.5.tgz#4e3443f2830fdecff624d3747163f1217daa2a9a"
+
+karma@>=0.11.14:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/karma/-/karma-1.6.0.tgz#0e871d4527d5eac56c41d181f03c5c0a7e6dbf3e"
+  dependencies:
+    bluebird "^3.3.0"
+    body-parser "^1.16.1"
+    chokidar "^1.4.1"
+    colors "^1.1.0"
+    combine-lists "^1.0.0"
+    connect "^3.6.0"
+    core-js "^2.2.0"
+    di "^0.0.1"
+    dom-serialize "^2.2.0"
+    expand-braces "^0.1.1"
+    glob "^7.1.1"
+    graceful-fs "^4.1.2"
+    http-proxy "^1.13.0"
+    isbinaryfile "^3.0.0"
+    lodash "^3.8.0"
+    log4js "^0.6.31"
+    mime "^1.3.4"
+    minimatch "^3.0.2"
+    optimist "^0.6.1"
+    qjobs "^1.1.4"
+    range-parser "^1.2.0"
+    rimraf "^2.6.0"
+    safe-buffer "^5.0.1"
+    socket.io "1.7.3"
+    source-map "^0.5.3"
+    tmp "0.0.31"
+    useragent "^2.1.12"
+
+kew@~0.1.7:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/kew/-/kew-0.1.7.tgz#0a32a817ff1a9b3b12b8c9bacf4bc4d679af8e72"
+
+kew@~0.7.0:
+  version "0.7.0"
+  resolved "https://registry.yarnpkg.com/kew/-/kew-0.7.0.tgz#79d93d2d33363d6fdd2970b335d9141ad591d79b"
+
+keypress@0.1.x:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/keypress/-/keypress-0.1.0.tgz#4a3188d4291b66b4f65edb99f806aa9ae293592a"
+
+kind-of@^3.0.2:
+  version "3.1.0"
+  resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.1.0.tgz#475d698a5e49ff5e53d14e3e732429dc8bf4cf47"
+  dependencies:
+    is-buffer "^1.0.2"
+
+klaw@^1.0.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/klaw/-/klaw-1.3.1.tgz#4088433b46b3b1ba259d78785d8e96f73ba02439"
+  optionalDependencies:
+    graceful-fs "^4.1.9"
+
+lazy-cache@^1.0.3:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/lazy-cache/-/lazy-cache-1.0.4.tgz#a1d78fc3a50474cb80845d3b3b6e1da49a446e8e"
+
+lcid@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/lcid/-/lcid-1.0.0.tgz#308accafa0bc483a3867b4b6f2b9506251d1b835"
+  dependencies:
+    invert-kv "^1.0.0"
+
+"less-brunch@>= 1.0 < 1.5":
+  version "1.4.1"
+  resolved "https://registry.yarnpkg.com/less-brunch/-/less-brunch-1.4.1.tgz#f6cc957280187b4008ccb829089d0f0fcdb08a6d"
+  dependencies:
+    coffee-script "1.3.3"
+    less "1.3.x"
+
+less@1.3.x:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/less/-/less-1.3.3.tgz#7ee8f300a41080f3544c80c7a70cdf6a61280cf9"
+  optionalDependencies:
+    ycssmin ">=1.0.1"
+
+leven@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/leven/-/leven-1.0.2.tgz#9144b6eebca5f1d0680169f1a6770dcea60b75c3"
+
+levn@~0.2.5:
+  version "0.2.5"
+  resolved "https://registry.yarnpkg.com/levn/-/levn-0.2.5.tgz#ba8d339d0ca4a610e3a3f145b9caf48807155054"
+  dependencies:
+    prelude-ls "~1.1.0"
+    type-check "~0.3.1"
+
+levn@~0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/levn/-/levn-0.3.0.tgz#3b09924edf9f083c0490fdd4c0bc4421e04764ee"
+  dependencies:
+    prelude-ls "~1.1.2"
+    type-check "~0.3.2"
+
+load-json-file@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/load-json-file/-/load-json-file-1.1.0.tgz#956905708d58b4bab4c2261b04f59f31c99374c0"
+  dependencies:
+    graceful-fs "^4.1.2"
+    parse-json "^2.2.0"
+    pify "^2.0.0"
+    pinkie-promise "^2.0.0"
+    strip-bom "^2.0.0"
+
+lodash@^3.10.0, lodash@^3.8.0, lodash@^3.9.3:
+  version "3.10.1"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-3.10.1.tgz#5bf45e8e49ba4189e17d482789dfd15bd140b7b6"
+
+lodash@^4.0.1, lodash@^4.14.0, lodash@^4.2.0, lodash@^4.5.0:
+  version "4.17.4"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.4.tgz#78203a4d1c328ae1d86dca6460e369b57f4055ae"
+
+lodash@~2.4.1:
+  version "2.4.2"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-2.4.2.tgz#fadd834b9683073da179b3eae6d9c0d15053f73e"
+
+log4js@^0.6.31:
+  version "0.6.38"
+  resolved "https://registry.yarnpkg.com/log4js/-/log4js-0.6.38.tgz#2c494116695d6fb25480943d3fc872e662a522fd"
+  dependencies:
+    readable-stream "~1.0.2"
+    semver "~4.3.3"
+
+loggy@~0.2.0:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/loggy/-/loggy-0.2.2.tgz#7edc85706a82d761ce9cef868f1afbad84165427"
+  dependencies:
+    ansi-color "~0.2.1"
+    date-utils "~1.2.17"
+    growl "~1.8.1"
+
+longest@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/longest/-/longest-1.0.1.tgz#30a0b2da38f73770e8294a0d22e6625ed77d0097"
+
+loose-envify@^1.0.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.3.1.tgz#d1a8ad33fa9ce0e713d65fdd0ac8b748d478c848"
+  dependencies:
+    js-tokens "^3.0.0"
+
+loud-rejection@^1.0.0:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/loud-rejection/-/loud-rejection-1.6.0.tgz#5b46f80147edee578870f086d04821cf998e551f"
+  dependencies:
+    currently-unhandled "^0.4.1"
+    signal-exit "^3.0.0"
+
+lru-cache@2, lru-cache@2.2.x:
+  version "2.2.4"
+  resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-2.2.4.tgz#6c658619becf14031d0d0b594b16042ce4dc063d"
+
+map-obj@^1.0.0, map-obj@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/map-obj/-/map-obj-1.0.1.tgz#d933ceb9205d82bdcf4886f6742bdc2b4dea146d"
+
+media-typer@0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748"
+
+meow@^3.3.0:
+  version "3.7.0"
+  resolved "https://registry.yarnpkg.com/meow/-/meow-3.7.0.tgz#72cb668b425228290abbfa856892587308a801fb"
+  dependencies:
+    camelcase-keys "^2.0.0"
+    decamelize "^1.1.2"
+    loud-rejection "^1.0.0"
+    map-obj "^1.0.1"
+    minimist "^1.1.3"
+    normalize-package-data "^2.3.4"
+    object-assign "^4.0.1"
+    read-pkg-up "^1.0.1"
+    redent "^1.0.0"
+    trim-newlines "^1.0.0"
+
+methods@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/methods/-/methods-0.0.1.tgz#277c90f8bef39709645a8371c51c3b6c648e068c"
+
+micromatch@^2.1.5:
+  version "2.3.11"
+  resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-2.3.11.tgz#86677c97d1720b363431d04d0d15293bd38c1565"
+  dependencies:
+    arr-diff "^2.0.0"
+    array-unique "^0.2.1"
+    braces "^1.8.2"
+    expand-brackets "^0.1.4"
+    extglob "^0.3.1"
+    filename-regex "^2.0.0"
+    is-extglob "^1.0.0"
+    is-glob "^2.0.1"
+    kind-of "^3.0.2"
+    normalize-path "^2.0.1"
+    object.omit "^2.0.0"
+    parse-glob "^3.0.4"
+    regex-cache "^0.4.2"
+
+mime-db@~1.27.0:
+  version "1.27.0"
+  resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.27.0.tgz#820f572296bbd20ec25ed55e5b5de869e5436eb1"
+
+mime-types@^2.1.11, mime-types@^2.1.12, mime-types@~2.1.11, mime-types@~2.1.15, mime-types@~2.1.7:
+  version "2.1.15"
+  resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.15.tgz#a4ebf5064094569237b8cf70046776d09fc92aed"
+  dependencies:
+    mime-db "~1.27.0"
+
+mime@1.2.4:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/mime/-/mime-1.2.4.tgz#11b5fdaf29c2509255176b80ad520294f5de92b7"
+
+"mime@>= 0.0.1", mime@^1.3.4:
+  version "1.3.4"
+  resolved "https://registry.yarnpkg.com/mime/-/mime-1.3.4.tgz#115f9e3b6b3daf2959983cb38f149a2d40eb5d53"
+
+mime@~1.2.11, mime@~1.2.9:
+  version "1.2.11"
+  resolved "https://registry.yarnpkg.com/mime/-/mime-1.2.11.tgz#58203eed86e3a5ef17aed2b7d9ebd47f0a60dd10"
+
+minimatch@0.3, minimatch@0.x, minimatch@~0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-0.3.0.tgz#275d8edaac4f1bb3326472089e7949c8394699dd"
+  dependencies:
+    lru-cache "2"
+    sigmund "~1.0.0"
+
+"minimatch@2 || 3", minimatch@^3.0.0, minimatch@^3.0.2:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.3.tgz#2a4e4090b96b2db06a9d7df01055a62a77c9b774"
+  dependencies:
+    brace-expansion "^1.0.0"
+
+minimatch@2.x, minimatch@^2.0.3:
+  version "2.0.10"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-2.0.10.tgz#8d087c39c6b38c001b97fca7ce6d0e1e80afbac7"
+  dependencies:
+    brace-expansion "^1.0.0"
+
+minimatch@~0.2.12:
+  version "0.2.14"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-0.2.14.tgz#c74e780574f63c6f9a090e90efbe6ef53a6a756a"
+  dependencies:
+    lru-cache "2"
+    sigmund "~1.0.0"
+
+minimatch@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-1.0.0.tgz#e0dd2120b49e1b724ce8d714c520822a9438576d"
+  dependencies:
+    lru-cache "2"
+    sigmund "~1.0.0"
+
+minimist@0.0.8, minimist@~0.0.1:
+  version "0.0.8"
+  resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d"
+
+minimist@^1.1.0, minimist@^1.1.3, minimist@^1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.0.tgz#a35008b20f41383eec1fb914f4cd5df79a264284"
+
+mkdirp@0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.3.0.tgz#1bbf5ab1ba827af23575143490426455f481fe1e"
+
+mkdirp@0.3.4:
+  version "0.3.4"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.3.4.tgz#f8c81d213b7299a031f193a57d752a17d2f6c7d8"
+
+mkdirp@0.3.5, mkdirp@0.3.x, mkdirp@~0.3.3, mkdirp@~0.3.5:
+  version "0.3.5"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.3.5.tgz#de3e5f8961c88c787ee1368df849ac4413eca8d7"
+
+mkdirp@0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.0.tgz#1d73076a6df986cd9344e15e71fcc05a4c9abf12"
+  dependencies:
+    minimist "0.0.8"
+
+mkdirp@0.5.1, mkdirp@0.5.x, "mkdirp@>=0.5 0", mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@~0.5.0:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903"
+  dependencies:
+    minimist "0.0.8"
+
+mocha-phantomjs-core@^1.1.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/mocha-phantomjs-core/-/mocha-phantomjs-core-1.3.1.tgz#586538c8d71fa8de90c41a46acc0481c1fb83e18"
+
+mocha-phantomjs-core@~2.1.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/mocha-phantomjs-core/-/mocha-phantomjs-core-2.1.1.tgz#83b1400b437209e0c710f0a17a0485a155179fe2"
+
+mocha-phantomjs@~4.1.0:
+  version "4.1.0"
+  resolved "https://registry.yarnpkg.com/mocha-phantomjs/-/mocha-phantomjs-4.1.0.tgz#c75e16612e1a6af0ad8d281e3a2fef49d55e505b"
+  dependencies:
+    commander "^2.8.1"
+    mocha-phantomjs-core "^1.1.0"
+    phantomjs "1.9.7-15"
+
+mocha@2.5.3:
+  version "2.5.3"
+  resolved "https://registry.yarnpkg.com/mocha/-/mocha-2.5.3.tgz#161be5bdeb496771eb9b35745050b622b5aefc58"
+  dependencies:
+    commander "2.3.0"
+    debug "2.2.0"
+    diff "1.4.0"
+    escape-string-regexp "1.0.2"
+    glob "3.2.11"
+    growl "1.9.2"
+    jade "0.26.3"
+    mkdirp "0.5.1"
+    supports-color "1.2.0"
+    to-iso-string "0.0.2"
+
+ms@0.7.1:
+  version "0.7.1"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.1.tgz#9cd13c03adbff25b65effde7ce864ee952017098"
+
+ms@0.7.2:
+  version "0.7.2"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.2.tgz#ae25cf2512b3885a1d95d7f037868d8431124765"
+
+nan@^2.0.2, nan@^2.3.0:
+  version "2.6.2"
+  resolved "https://registry.yarnpkg.com/nan/-/nan-2.6.2.tgz#e4ff34e6c95fdfb5aecc08de6596f43605a7db45"
+
+ncp@0.4.2, ncp@~0.4.2:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/ncp/-/ncp-0.4.2.tgz#abcc6cbd3ec2ed2a729ff6e7c1fa8f01784a8574"
+
+negotiator@0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.1.tgz#2b327184e8992101177b28563fb5e7102acd0ca9"
+
+node-pre-gyp@^0.6.29:
+  version "0.6.34"
+  resolved "https://registry.yarnpkg.com/node-pre-gyp/-/node-pre-gyp-0.6.34.tgz#94ad1c798a11d7fc67381b50d47f8cc18d9799f7"
+  dependencies:
+    mkdirp "^0.5.1"
+    nopt "^4.0.1"
+    npmlog "^4.0.2"
+    rc "^1.1.7"
+    request "^2.81.0"
+    rimraf "^2.6.1"
+    semver "^5.3.0"
+    tar "^2.2.1"
+    tar-pack "^3.4.0"
+
+node-uuid@~1.4.0, node-uuid@~1.4.7:
+  version "1.4.8"
+  resolved "https://registry.yarnpkg.com/node-uuid/-/node-uuid-1.4.8.tgz#b040eb0923968afabf8d32fb1f17f1167fdab907"
+
+nopt@2:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/nopt/-/nopt-2.2.1.tgz#2aa09b7d1768487b3b89a9c5aa52335bff0baea7"
+  dependencies:
+    abbrev "1"
+
+nopt@3.x:
+  version "3.0.6"
+  resolved "https://registry.yarnpkg.com/nopt/-/nopt-3.0.6.tgz#c6465dbf08abcd4db359317f79ac68a646b28ff9"
+  dependencies:
+    abbrev "1"
+
+nopt@^4.0.1:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/nopt/-/nopt-4.0.1.tgz#d0d4685afd5415193c8c7505602d0d17cd64474d"
+  dependencies:
+    abbrev "1"
+    osenv "^0.1.4"
+
+normalize-package-data@^2.3.2, normalize-package-data@^2.3.4:
+  version "2.3.8"
+  resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-2.3.8.tgz#d819eda2a9dedbd1ffa563ea4071d936782295bb"
+  dependencies:
+    hosted-git-info "^2.1.4"
+    is-builtin-module "^1.0.0"
+    semver "2 || 3 || 4 || 5"
+    validate-npm-package-license "^3.0.1"
+
+normalize-path@^2.0.1:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-2.1.1.tgz#1ab28b556e198363a8c1a6f7e6fa20137fe6aed9"
+  dependencies:
+    remove-trailing-separator "^1.0.1"
+
+npmconf@0.0.24:
+  version "0.0.24"
+  resolved "https://registry.yarnpkg.com/npmconf/-/npmconf-0.0.24.tgz#b78875b088ccc3c0afa3eceb3ce3244b1b52390c"
+  dependencies:
+    config-chain "~1.1.1"
+    inherits "~1.0.0"
+    ini "~1.1.0"
+    mkdirp "~0.3.3"
+    nopt "2"
+    once "~1.1.1"
+    osenv "0.0.3"
+    semver "~1.1.0"
+
+npmlog@^4.0.2:
+  version "4.0.2"
+  resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-4.0.2.tgz#d03950e0e78ce1527ba26d2a7592e9348ac3e75f"
+  dependencies:
+    are-we-there-yet "~1.1.2"
+    console-control-strings "~1.1.0"
+    gauge "~2.7.1"
+    set-blocking "~2.0.0"
+
+number-is-nan@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/number-is-nan/-/number-is-nan-1.0.1.tgz#097b602b53422a522c1afb8790318336941a011d"
+
+"nwmatcher@>= 1.3.9 < 2.0.0":
+  version "1.3.9"
+  resolved "https://registry.yarnpkg.com/nwmatcher/-/nwmatcher-1.3.9.tgz#8bab486ff7fa3dfd086656bbe8b17116d3692d2a"
+
+oauth-sign@~0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.3.0.tgz#cb540f93bb2b22a7d5941691a288d60e8ea9386e"
+
+oauth-sign@~0.8.0, oauth-sign@~0.8.1:
+  version "0.8.2"
+  resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.8.2.tgz#46a6ab7f0aead8deae9ec0565780b7d4efeb9d43"
+
+object-assign@4.1.0, object-assign@^4.0.1, object-assign@^4.1.0:
+  version "4.1.0"
+  resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.0.tgz#7a3b3d0e98063d43f4c03f2e8ae6cd51a86883a0"
+
+object-component@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/object-component/-/object-component-0.0.3.tgz#f0c69aa50efc95b866c186f400a33769cb2f1291"
+
+object.omit@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/object.omit/-/object.omit-2.0.1.tgz#1a9c744829f39dbb858c76ca3579ae2a54ebd1fa"
+  dependencies:
+    for-own "^0.1.4"
+    is-extendable "^0.1.1"
+
+on-finished@~2.3.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.3.0.tgz#20f1336481b083cd75337992a16971aa2d906947"
+  dependencies:
+    ee-first "1.1.1"
+
+once@1.x, once@^1.3.0, once@^1.3.3:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1"
+  dependencies:
+    wrappy "1"
+
+once@~1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/once/-/once-1.1.1.tgz#9db574933ccb08c3a7614d154032c09ea6f339e7"
+
+optimist@0.3.x:
+  version "0.3.7"
+  resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.3.7.tgz#c90941ad59e4273328923074d2cf2e7cbc6ec0d9"
+  dependencies:
+    wordwrap "~0.0.2"
+
+optimist@^0.6.1, optimist@~0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.6.1.tgz#da3ea74686fa21a19a111c326e90eb15a0196686"
+  dependencies:
+    minimist "~0.0.1"
+    wordwrap "~0.0.2"
+
+optionator@^0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.5.0.tgz#b75a8995a2d417df25b6e4e3862f50aa88651368"
+  dependencies:
+    deep-is "~0.1.2"
+    fast-levenshtein "~1.0.0"
+    levn "~0.2.5"
+    prelude-ls "~1.1.1"
+    type-check "~0.3.1"
+    wordwrap "~0.0.2"
+
+optionator@^0.8.1:
+  version "0.8.2"
+  resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.8.2.tgz#364c5e409d3f4d6301d6c0b4c05bba50180aeb64"
+  dependencies:
+    deep-is "~0.1.3"
+    fast-levenshtein "~2.0.4"
+    levn "~0.3.0"
+    prelude-ls "~1.1.2"
+    type-check "~0.3.2"
+    wordwrap "~1.0.0"
+
+options@>=0.0.5:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/options/-/options-0.0.6.tgz#ec22d312806bb53e731773e7cdaefcf1c643128f"
+
+os-homedir@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3"
+
+os-locale@^1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/os-locale/-/os-locale-1.4.0.tgz#20f9f17ae29ed345e8bde583b13d2009803c14d9"
+  dependencies:
+    lcid "^1.0.0"
+
+os-tmpdir@^1.0.0, os-tmpdir@^1.0.1, os-tmpdir@~1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274"
+
+osenv@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.0.3.tgz#cd6ad8ddb290915ad9e22765576025d411f29cb6"
+
+osenv@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.1.4.tgz#42fe6d5953df06c8064be6f176c3d05aaaa34644"
+  dependencies:
+    os-homedir "^1.0.0"
+    os-tmpdir "^1.0.0"
+
+output-file-sync@^1.1.0:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/output-file-sync/-/output-file-sync-1.1.2.tgz#d0a33eefe61a205facb90092e826598d5245ce76"
+  dependencies:
+    graceful-fs "^4.1.4"
+    mkdirp "^0.5.1"
+    object-assign "^4.1.0"
+
+parse-glob@^3.0.4:
+  version "3.0.4"
+  resolved "https://registry.yarnpkg.com/parse-glob/-/parse-glob-3.0.4.tgz#b2c376cfb11f35513badd173ef0bb6e3a388391c"
+  dependencies:
+    glob-base "^0.3.0"
+    is-dotfile "^1.0.0"
+    is-extglob "^1.0.0"
+    is-glob "^2.0.0"
+
+parse-json@^2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-2.2.0.tgz#f480f40434ef80741f8469099f8dea18f55a4dc9"
+  dependencies:
+    error-ex "^1.2.0"
+
+parse5@^1.5.1:
+  version "1.5.1"
+  resolved "https://registry.yarnpkg.com/parse5/-/parse5-1.5.1.tgz#9b7f3b0de32be78dc2401b17573ccaf0f6f59d94"
+
+parsejson@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/parsejson/-/parsejson-0.0.3.tgz#ab7e3759f209ece99437973f7d0f1f64ae0e64ab"
+  dependencies:
+    better-assert "~1.0.0"
+
+parseqs@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/parseqs/-/parseqs-0.0.5.tgz#d5208a3738e46766e291ba2ea173684921a8b89d"
+  dependencies:
+    better-assert "~1.0.0"
+
+parseuri@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/parseuri/-/parseuri-0.0.5.tgz#80204a50d4dbb779bfdc6ebe2778d90e4bce320a"
+  dependencies:
+    better-assert "~1.0.0"
+
+parseurl@~1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.1.tgz#c8ab8c9223ba34888aa64a297b28853bec18da56"
+
+path-exists@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-1.0.0.tgz#d5a8998eb71ef37a74c34eb0d9eba6e878eea081"
+
+path-exists@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-2.1.0.tgz#0feb6c64f0fc518d9a754dd5efb62c7022761f4b"
+  dependencies:
+    pinkie-promise "^2.0.0"
+
+path-is-absolute@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f"
+
+path-parse@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.5.tgz#3c1adf871ea9cd6c9431b6ea2bd74a0ff055c4c1"
+
+path-type@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/path-type/-/path-type-1.1.0.tgz#59c44f7ee491da704da415da5a4070ba4f8fe441"
+  dependencies:
+    graceful-fs "^4.1.2"
+    pify "^2.0.0"
+    pinkie-promise "^2.0.0"
+
+pause@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/pause/-/pause-0.0.1.tgz#1d408b3fdb76923b9543d96fb4c9dfd535d9cb5d"
+
+pend@~1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/pend/-/pend-1.2.0.tgz#7a57eb550a6783f9115331fcf4663d5c8e007a50"
+
+performance-now@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-0.2.0.tgz#33ef30c5c77d4ea21c5a53869d91b56d8f2555e5"
+
+phantomjs-prebuilt@^2.1.7:
+  version "2.1.14"
+  resolved "https://registry.yarnpkg.com/phantomjs-prebuilt/-/phantomjs-prebuilt-2.1.14.tgz#d53d311fcfb7d1d08ddb24014558f1188c516da0"
+  dependencies:
+    es6-promise "~4.0.3"
+    extract-zip "~1.5.0"
+    fs-extra "~1.0.0"
+    hasha "~2.2.0"
+    kew "~0.7.0"
+    progress "~1.1.8"
+    request "~2.79.0"
+    request-progress "~2.0.1"
+    which "~1.2.10"
+
+phantomjs@1.9.7-15:
+  version "1.9.7-15"
+  resolved "https://registry.yarnpkg.com/phantomjs/-/phantomjs-1.9.7-15.tgz#0b3a7ce630486a83be91ff4e832eee20e971115b"
+  dependencies:
+    adm-zip "0.2.1"
+    kew "~0.1.7"
+    mkdirp "0.3.5"
+    ncp "0.4.2"
+    npmconf "0.0.24"
+    progress "^1.1.5"
+    request "2.36.0"
+    request-progress "^0.3.1"
+    rimraf "~2.2.2"
+    which "~1.0.5"
+
+phantomjs@~2.1.0:
+  version "2.1.7+deprecated"
+  resolved "https://registry.yarnpkg.com/phantomjs/-/phantomjs-2.1.7.tgz#c6910f67935c37285b6114329fc2f27d5f3e3134"
+  dependencies:
+    extract-zip "~1.5.0"
+    fs-extra "~0.26.4"
+    hasha "^2.2.0"
+    kew "~0.7.0"
+    progress "~1.1.8"
+    request "~2.67.0"
+    request-progress "~2.0.1"
+    which "~1.2.2"
+
+pify@^2.0.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c"
+
+pinkie-promise@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa"
+  dependencies:
+    pinkie "^2.0.0"
+
+pinkie@^2.0.0:
+  version "2.0.4"
+  resolved "https://registry.yarnpkg.com/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870"
+
+prelude-ls@~1.1.0, prelude-ls@~1.1.1, prelude-ls@~1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.1.2.tgz#21932a549f5e52ffd9a827f570e04be62a97da54"
+
+preserve@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/preserve/-/preserve-0.2.0.tgz#815ed1f6ebc65926f865b310c0713bcb3315ce4b"
+
+private@^0.1.6, private@~0.1.5:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/private/-/private-0.1.7.tgz#68ce5e8a1ef0a23bb570cc28537b5332aba63ef1"
+
+process-nextick-args@~1.0.6:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-1.0.7.tgz#150e20b756590ad3f91093f25a4f2ad8bff30ba3"
+
+progress@^1.1.5, progress@~1.1.8:
+  version "1.1.8"
+  resolved "https://registry.yarnpkg.com/progress/-/progress-1.1.8.tgz#e260c78f6161cdd9b0e56cc3e0a85de17c7a57be"
+
+proto-list@~1.2.1:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/proto-list/-/proto-list-1.2.4.tgz#212d5bfe1318306a420f6402b8e26ff39647a849"
+
+punycode@^1.4.1:
+  version "1.4.1"
+  resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e"
+
+pushserve@~0.1.6:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/pushserve/-/pushserve-0.1.6.tgz#a07b173fc2488b71d9af4a5b37411bb3d91d6e27"
+  dependencies:
+    commander "~2.0.0"
+    connect-slashes "~0.0.9"
+    express "~3.3.0"
+
+q@^1.1.2:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/q/-/q-1.5.0.tgz#dd01bac9d06d30e6f219aecb8253ee9ebdc308f1"
+
+qjobs@^1.1.4:
+  version "1.1.5"
+  resolved "https://registry.yarnpkg.com/qjobs/-/qjobs-1.1.5.tgz#659de9f2cf8dcc27a1481276f205377272382e73"
+
+qs@0.4.x, "qs@>= 0.4.0":
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-0.4.2.tgz#3cac4c861e371a8c9c4770ac23cda8de639b8e5f"
+
+qs@0.6.5, qs@~0.6.0:
+  version "0.6.5"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-0.6.5.tgz#294b268e4b0d4250f6dde19b3b8b34935dff14ef"
+
+qs@6.4.0, qs@~6.4.0:
+  version "6.4.0"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-6.4.0.tgz#13e26d28ad6b0ffaa91312cd3bf708ed351e7233"
+
+qs@~5.2.0:
+  version "5.2.1"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-5.2.1.tgz#801fee030e0b9450d6385adc48a4cc55b44aedfc"
+
+qs@~6.3.0:
+  version "6.3.2"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-6.3.2.tgz#e75bd5f6e268122a2a0e0bda630b2550c166502c"
+
+randomatic@^1.1.3:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/randomatic/-/randomatic-1.1.6.tgz#110dcabff397e9dcff7c0789ccc0a49adf1ec5bb"
+  dependencies:
+    is-number "^2.0.2"
+    kind-of "^3.0.2"
+
+range-parser@0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-0.0.4.tgz#c0427ffef51c10acba0782a46c9602e744ff620b"
+
+range-parser@^1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.0.tgz#f49be6b487894ddc40dcc94a322f611092e00d5e"
+
+raw-body@~2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.2.0.tgz#994976cf6a5096a41162840492f0bdc5d6e7fb96"
+  dependencies:
+    bytes "2.4.0"
+    iconv-lite "0.4.15"
+    unpipe "1.0.0"
+
+rc@^1.1.7:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/rc/-/rc-1.2.1.tgz#2e03e8e42ee450b8cb3dce65be1bf8974e1dfd95"
+  dependencies:
+    deep-extend "~0.4.0"
+    ini "~1.3.0"
+    minimist "^1.2.0"
+    strip-json-comments "~2.0.1"
+
+read-components@~0.6.0:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/read-components/-/read-components-0.6.1.tgz#45752f1c7c7e450742f4085fe6e24fccc5c75720"
+  dependencies:
+    async-each "~0.1.3"
+    component-builder "~0.10.0"
+
+read-pkg-up@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-1.0.1.tgz#9d63c13276c065918d57f002a57f40a1b643fb02"
+  dependencies:
+    find-up "^1.0.0"
+    read-pkg "^1.0.0"
+
+read-pkg@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-1.1.0.tgz#f5ffaa5ecd29cb31c0474bca7d756b6bb29e3f28"
+  dependencies:
+    load-json-file "^1.0.0"
+    normalize-package-data "^2.3.2"
+    path-type "^1.0.0"
+
+"readable-stream@^2.0.0 || ^1.1.13", readable-stream@^2.1.4:
+  version "2.2.9"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.2.9.tgz#cf78ec6f4a6d1eb43d26488cac97f042e74b7fc8"
+  dependencies:
+    buffer-shims "~1.0.0"
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "~1.0.0"
+    process-nextick-args "~1.0.6"
+    string_decoder "~1.0.0"
+    util-deprecate "~1.0.1"
+
+readable-stream@^2.0.2, readable-stream@~2.0.0, readable-stream@~2.0.5:
+  version "2.0.6"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.0.6.tgz#8f90341e68a53ccc928788dacfcd11b36eb9b78e"
+  dependencies:
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "~1.0.0"
+    process-nextick-args "~1.0.6"
+    string_decoder "~0.10.x"
+    util-deprecate "~1.0.1"
+
+readable-stream@~1.0.2, readable-stream@~1.0.26-2:
+  version "1.0.34"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.0.34.tgz#125820e34bc842d2f2aaafafe4c2916ee32c157c"
+  dependencies:
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "0.0.1"
+    string_decoder "~0.10.x"
+
+readdirp@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-2.1.0.tgz#4ed0ad060df3073300c48440373f72d1cc642d78"
+  dependencies:
+    graceful-fs "^4.1.2"
+    minimatch "^3.0.2"
+    readable-stream "^2.0.2"
+    set-immediate-shim "^1.0.1"
+
+readdirp@~1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-1.3.0.tgz#eaf1a9b463be9a8190fc9ae163aa1ac934aa340b"
+  dependencies:
+    graceful-fs "~2.0.0"
+    minimatch "~0.2.12"
+    readable-stream "~1.0.26-2"
+
+recast@0.10.33, recast@^0.10.10:
+  version "0.10.33"
+  resolved "https://registry.yarnpkg.com/recast/-/recast-0.10.33.tgz#942808f7aa016f1fa7142c461d7e5704aaa8d697"
+  dependencies:
+    ast-types "0.8.12"
+    esprima-fb "~15001.1001.0-dev-harmony-fb"
+    private "~0.1.5"
+    source-map "~0.5.0"
+
+recast@^0.11.17:
+  version "0.11.23"
+  resolved "https://registry.yarnpkg.com/recast/-/recast-0.11.23.tgz#451fd3004ab1e4df9b4e4b66376b2a21912462d3"
+  dependencies:
+    ast-types "0.9.6"
+    esprima "~3.1.0"
+    private "~0.1.5"
+    source-map "~0.5.0"
+
+redent@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/redent/-/redent-1.0.0.tgz#cf916ab1fd5f1f16dfb20822dd6ec7f730c2afde"
+  dependencies:
+    indent-string "^2.1.0"
+    strip-indent "^1.0.1"
+
+regenerate@^1.2.1:
+  version "1.3.2"
+  resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.3.2.tgz#d1941c67bad437e1be76433add5b385f95b19260"
+
+regenerator-runtime@^0.10.0:
+  version "0.10.3"
+  resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.10.3.tgz#8c4367a904b51ea62a908ac310bf99ff90a82a3e"
+
+regenerator-transform@0.9.11:
+  version "0.9.11"
+  resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.9.11.tgz#3a7d067520cb7b7176769eb5ff868691befe1283"
+  dependencies:
+    babel-runtime "^6.18.0"
+    babel-types "^6.19.0"
+    private "^0.1.6"
+
+regenerator@0.8.40:
+  version "0.8.40"
+  resolved "https://registry.yarnpkg.com/regenerator/-/regenerator-0.8.40.tgz#a0e457c58ebdbae575c9f8cd75127e93756435d8"
+  dependencies:
+    commoner "~0.10.3"
+    defs "~1.1.0"
+    esprima-fb "~15001.1001.0-dev-harmony-fb"
+    private "~0.1.5"
+    recast "0.10.33"
+    through "~2.3.8"
+
+regex-cache@^0.4.2:
+  version "0.4.3"
+  resolved "https://registry.yarnpkg.com/regex-cache/-/regex-cache-0.4.3.tgz#9b1a6c35d4d0dfcef5711ae651e8e9d3d7114145"
+  dependencies:
+    is-equal-shallow "^0.1.3"
+    is-primitive "^2.0.0"
+
+regexpu-core@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-2.0.0.tgz#49d038837b8dcf8bfa5b9a42139938e6ea2ae240"
+  dependencies:
+    regenerate "^1.2.1"
+    regjsgen "^0.2.0"
+    regjsparser "^0.1.4"
+
+regexpu@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/regexpu/-/regexpu-1.3.0.tgz#e534dc991a9e5846050c98de6d7dd4a55c9ea16d"
+  dependencies:
+    esprima "^2.6.0"
+    recast "^0.10.10"
+    regenerate "^1.2.1"
+    regjsgen "^0.2.0"
+    regjsparser "^0.1.4"
+
+regjsgen@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.2.0.tgz#6c016adeac554f75823fe37ac05b92d5a4edb1f7"
+
+regjsparser@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.1.5.tgz#7ee8f84dc6fa792d3fd0ae228d24bd949ead205c"
+  dependencies:
+    jsesc "~0.5.0"
+
+remove-trailing-separator@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/remove-trailing-separator/-/remove-trailing-separator-1.0.1.tgz#615ebb96af559552d4bf4057c8436d486ab63cc4"
+
+repeat-element@^1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/repeat-element/-/repeat-element-1.1.2.tgz#ef089a178d1483baae4d93eb98b4f9e4e11d990a"
+
+repeat-string@^0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-0.2.2.tgz#c7a8d3236068362059a7e4651fc6884e8b1fb4ae"
+
+repeat-string@^1.5.2:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637"
+
+repeating@^1.1.0, repeating@^1.1.2:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/repeating/-/repeating-1.1.3.tgz#3d4114218877537494f97f77f9785fab810fa4ac"
+  dependencies:
+    is-finite "^1.0.0"
+
+repeating@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/repeating/-/repeating-2.0.1.tgz#5214c53a926d3552707527fbab415dbc08d06dda"
+  dependencies:
+    is-finite "^1.0.0"
+
+request-progress@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/request-progress/-/request-progress-0.3.1.tgz#0721c105d8a96ac6b2ce8b2c89ae2d5ecfcf6b3a"
+  dependencies:
+    throttleit "~0.0.2"
+
+request-progress@~2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/request-progress/-/request-progress-2.0.1.tgz#5d36bb57961c673aa5b788dbc8141fdf23b44e08"
+  dependencies:
+    throttleit "^1.0.0"
+
+request@2.36.0:
+  version "2.36.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.36.0.tgz#28c6c04262c7b9ffdd21b9255374517ee6d943f5"
+  dependencies:
+    forever-agent "~0.5.0"
+    json-stringify-safe "~5.0.0"
+    mime "~1.2.9"
+    node-uuid "~1.4.0"
+    qs "~0.6.0"
+  optionalDependencies:
+    aws-sign2 "~0.5.0"
+    form-data "~0.1.0"
+    hawk "~1.0.0"
+    http-signature "~0.10.0"
+    oauth-sign "~0.3.0"
+    tough-cookie ">=0.12.0"
+    tunnel-agent "~0.4.0"
+
+request@^2.79.0, request@^2.81.0:
+  version "2.81.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.81.0.tgz#c6928946a0e06c5f8d6f8a9333469ffda46298a0"
+  dependencies:
+    aws-sign2 "~0.6.0"
+    aws4 "^1.2.1"
+    caseless "~0.12.0"
+    combined-stream "~1.0.5"
+    extend "~3.0.0"
+    forever-agent "~0.6.1"
+    form-data "~2.1.1"
+    har-validator "~4.2.1"
+    hawk "~3.1.3"
+    http-signature "~1.1.0"
+    is-typedarray "~1.0.0"
+    isstream "~0.1.2"
+    json-stringify-safe "~5.0.1"
+    mime-types "~2.1.7"
+    oauth-sign "~0.8.1"
+    performance-now "^0.2.0"
+    qs "~6.4.0"
+    safe-buffer "^5.0.1"
+    stringstream "~0.0.4"
+    tough-cookie "~2.3.0"
+    tunnel-agent "^0.6.0"
+    uuid "^3.0.0"
+
+request@~2.67.0:
+  version "2.67.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.67.0.tgz#8af74780e2bf11ea0ae9aa965c11f11afd272742"
+  dependencies:
+    aws-sign2 "~0.6.0"
+    bl "~1.0.0"
+    caseless "~0.11.0"
+    combined-stream "~1.0.5"
+    extend "~3.0.0"
+    forever-agent "~0.6.1"
+    form-data "~1.0.0-rc3"
+    har-validator "~2.0.2"
+    hawk "~3.1.0"
+    http-signature "~1.1.0"
+    is-typedarray "~1.0.0"
+    isstream "~0.1.2"
+    json-stringify-safe "~5.0.1"
+    mime-types "~2.1.7"
+    node-uuid "~1.4.7"
+    oauth-sign "~0.8.0"
+    qs "~5.2.0"
+    stringstream "~0.0.4"
+    tough-cookie "~2.2.0"
+    tunnel-agent "~0.4.1"
+
+request@~2.79.0:
+  version "2.79.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.79.0.tgz#4dfe5bf6be8b8cdc37fcf93e04b65577722710de"
+  dependencies:
+    aws-sign2 "~0.6.0"
+    aws4 "^1.2.1"
+    caseless "~0.11.0"
+    combined-stream "~1.0.5"
+    extend "~3.0.0"
+    forever-agent "~0.6.1"
+    form-data "~2.1.1"
+    har-validator "~2.0.6"
+    hawk "~3.1.3"
+    http-signature "~1.1.0"
+    is-typedarray "~1.0.0"
+    isstream "~0.1.2"
+    json-stringify-safe "~5.0.1"
+    mime-types "~2.1.7"
+    oauth-sign "~0.8.1"
+    qs "~6.3.0"
+    stringstream "~0.0.4"
+    tough-cookie "~2.3.0"
+    tunnel-agent "~0.4.1"
+    uuid "^3.0.0"
+
+requires-port@1.x.x:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/requires-port/-/requires-port-1.0.0.tgz#925d2601d39ac485e091cf0da5c6e694dc3dcaff"
+
+resolve@1.1.x:
+  version "1.1.7"
+  resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.1.7.tgz#203114d82ad2c5ed9e8e0411b3932875e889e97b"
+
+resolve@^1.1.6:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.3.3.tgz#655907c3469a8680dc2de3a275a8fdd69691f0e5"
+  dependencies:
+    path-parse "^1.0.5"
+
+right-align@^0.1.1:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/right-align/-/right-align-0.1.3.tgz#61339b722fe6a3515689210d24e14c96148613ef"
+  dependencies:
+    align-text "^0.1.1"
+
+rimraf@2, rimraf@^2.2.8, rimraf@^2.5.1, rimraf@^2.6.0, rimraf@^2.6.1:
+  version "2.6.1"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.1.tgz#c2338ec643df7a1b7fe5c54fa86f57428a55f33d"
+  dependencies:
+    glob "^7.0.5"
+
+rimraf@~2.2.0, rimraf@~2.2.1, rimraf@~2.2.2:
+  version "2.2.8"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.2.8.tgz#e439be2aaee327321952730f99a8929e4fc50582"
+
+safe-buffer@^5.0.1:
+  version "5.0.1"
+  resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.0.1.tgz#d263ca54696cd8a306b5ca6551e92de57918fbe7"
+
+sax@^1.2.1:
+  version "1.2.2"
+  resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.2.tgz#fd8631a23bc7826bef5d871bdb87378c95647828"
+
+"semver@2 || 3 || 4 || 5", semver@^5.3.0:
+  version "5.3.0"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-5.3.0.tgz#9b2ce5d3de02d17c6012ad326aa6b4d0cf54f94f"
+
+semver@~1.1.0:
+  version "1.1.4"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-1.1.4.tgz#2e5a4e72bab03472cc97f72753b4508912ef5540"
+
+semver@~4.3.3:
+  version "4.3.6"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-4.3.6.tgz#300bc6e0e86374f7ba61068b5b1ecd57fc6532da"
+
+send@0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/send/-/send-0.1.4.tgz#be70d8d1be01de61821af13780b50345a4f71abd"
+  dependencies:
+    debug "*"
+    fresh "0.2.0"
+    mime "~1.2.9"
+    range-parser "0.0.4"
+
+set-blocking@~2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7"
+
+set-immediate-shim@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/set-immediate-shim/-/set-immediate-shim-1.0.1.tgz#4b2b1b27eb808a9f8dcc481a58e5e56f599f3f61"
+
+setprototypeof@1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.0.3.tgz#66567e37043eeb4f04d91bd658c0cbefb55b8e04"
+
+shebang-regex@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-1.0.0.tgz#da42f49740c0b42db2ca9728571cb190c98efea3"
+
+sigmund@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/sigmund/-/sigmund-1.0.1.tgz#3ff21f198cad2175f9f3b781853fd94d0d19b590"
+
+signal-exit@^3.0.0:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.2.tgz#b5fdc08f1287ea1178628e415e25132b73646c6d"
+
+simple-fmt@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/simple-fmt/-/simple-fmt-0.1.0.tgz#191bf566a59e6530482cb25ab53b4a8dc85c3a6b"
+
+simple-is@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/simple-is/-/simple-is-0.2.0.tgz#2abb75aade39deb5cc815ce10e6191164850baf0"
+
+sinon-chai@~2.8.0:
+  version "2.8.0"
+  resolved "https://registry.yarnpkg.com/sinon-chai/-/sinon-chai-2.8.0.tgz#432a9bbfd51a6fc00798f4d2526a829c060687ac"
+
+sinon@=1.7.3:
+  version "1.7.3"
+  resolved "https://registry.yarnpkg.com/sinon/-/sinon-1.7.3.tgz#7a69d69cd0294586c743254eeff1b583a50997f2"
+  dependencies:
+    buster-format "~0.5"
+
+slash@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/slash/-/slash-1.0.0.tgz#c41f2f6c39fc16d1cd17ad4b5d896114ae470d55"
+
+sntp@0.2.x:
+  version "0.2.4"
+  resolved "https://registry.yarnpkg.com/sntp/-/sntp-0.2.4.tgz#fb885f18b0f3aad189f824862536bceeec750900"
+  dependencies:
+    hoek "0.9.x"
+
+sntp@1.x.x:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/sntp/-/sntp-1.0.9.tgz#6541184cc90aeea6c6e7b35e2659082443c66198"
+  dependencies:
+    hoek "2.x.x"
+
+socket.io-adapter@0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/socket.io-adapter/-/socket.io-adapter-0.5.0.tgz#cb6d4bb8bec81e1078b99677f9ced0046066bb8b"
+  dependencies:
+    debug "2.3.3"
+    socket.io-parser "2.3.1"
+
+socket.io-client@1.7.3:
+  version "1.7.3"
+  resolved "https://registry.yarnpkg.com/socket.io-client/-/socket.io-client-1.7.3.tgz#b30e86aa10d5ef3546601c09cde4765e381da377"
+  dependencies:
+    backo2 "1.0.2"
+    component-bind "1.0.0"
+    component-emitter "1.2.1"
+    debug "2.3.3"
+    engine.io-client "1.8.3"
+    has-binary "0.1.7"
+    indexof "0.0.1"
+    object-component "0.0.3"
+    parseuri "0.0.5"
+    socket.io-parser "2.3.1"
+    to-array "0.1.4"
+
+socket.io-parser@2.3.1:
+  version "2.3.1"
+  resolved "https://registry.yarnpkg.com/socket.io-parser/-/socket.io-parser-2.3.1.tgz#dd532025103ce429697326befd64005fcfe5b4a0"
+  dependencies:
+    component-emitter "1.1.2"
+    debug "2.2.0"
+    isarray "0.0.1"
+    json3 "3.3.2"
+
+socket.io@1.7.3:
+  version "1.7.3"
+  resolved "https://registry.yarnpkg.com/socket.io/-/socket.io-1.7.3.tgz#b8af9caba00949e568e369f1327ea9be9ea2461b"
+  dependencies:
+    debug "2.3.3"
+    engine.io "1.8.3"
+    has-binary "0.1.7"
+    object-assign "4.1.0"
+    socket.io-adapter "0.5.0"
+    socket.io-client "1.7.3"
+    socket.io-parser "2.3.1"
+
+source-map-support@^0.2.10:
+  version "0.2.10"
+  resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.2.10.tgz#ea5a3900a1c1cb25096a0ae8cc5c2b4b10ded3dc"
+  dependencies:
+    source-map "0.1.32"
+
+source-map-support@^0.4.2:
+  version "0.4.14"
+  resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.4.14.tgz#9d4463772598b86271b4f523f6c1f4e02a7d6aef"
+  dependencies:
+    source-map "^0.5.6"
+
+source-map@0.1.32:
+  version "0.1.32"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.32.tgz#c8b6c167797ba4740a8ea33252162ff08591b266"
+  dependencies:
+    amdefine ">=0.0.4"
+
+source-map@^0.4.4:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.4.4.tgz#eba4f5da9c0dc999de68032d8b4f76173652036b"
+  dependencies:
+    amdefine ">=0.0.4"
+
+source-map@^0.5.0, source-map@^0.5.3, source-map@^0.5.6, source-map@~0.5.0, source-map@~0.5.1:
+  version "0.5.6"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.6.tgz#75ce38f52bf0733c5a7f0c118d81334a2bb5f412"
+
+source-map@~0.1.35:
+  version "0.1.43"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.43.tgz#c24bc146ca517c1471f5dacbe2571b2b7f9e3346"
+  dependencies:
+    amdefine ">=0.0.4"
+
+source-map@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.2.0.tgz#dab73fbcfc2ba819b4de03bd6f6eaa48164b3f9d"
+  dependencies:
+    amdefine ">=0.0.4"
+
+spdx-correct@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-1.0.2.tgz#4b3073d933ff51f3912f03ac5519498a4150db40"
+  dependencies:
+    spdx-license-ids "^1.0.2"
+
+spdx-expression-parse@~1.0.0:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-1.0.4.tgz#9bdf2f20e1f40ed447fbe273266191fced51626c"
+
+spdx-license-ids@^1.0.2:
+  version "1.2.2"
+  resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-1.2.2.tgz#c9df7a3424594ade6bd11900d596696dc06bac57"
+
+sprintf-js@~1.0.2:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c"
+
+sshpk@^1.7.0:
+  version "1.13.0"
+  resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.13.0.tgz#ff2a3e4fd04497555fed97b39a0fd82fafb3a33c"
+  dependencies:
+    asn1 "~0.2.3"
+    assert-plus "^1.0.0"
+    dashdash "^1.12.0"
+    getpass "^0.1.1"
+  optionalDependencies:
+    bcrypt-pbkdf "^1.0.0"
+    ecc-jsbn "~0.1.1"
+    jodid25519 "^1.0.0"
+    jsbn "~0.1.0"
+    tweetnacl "~0.14.0"
+
+stable@~0.1.3:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/stable/-/stable-0.1.6.tgz#910f5d2aed7b520c6e777499c1f32e139fdecb10"
+
+"statuses@>= 1.3.1 < 2", statuses@~1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.3.1.tgz#faf51b9eb74aaef3b3acf4ad5f61abf24cb7b93e"
+
+string-to-js@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/string-to-js/-/string-to-js-0.0.1.tgz#bf153c760636faa30769b804a0195552ba7ad80f"
+
+string-width@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3"
+  dependencies:
+    code-point-at "^1.0.0"
+    is-fullwidth-code-point "^1.0.0"
+    strip-ansi "^3.0.0"
+
+string_decoder@~0.10.x:
+  version "0.10.31"
+  resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-0.10.31.tgz#62e203bc41766c6c28c9fc84301dab1c5310fa94"
+
+string_decoder@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.0.0.tgz#f06f41157b664d86069f84bdbdc9b0d8ab281667"
+  dependencies:
+    buffer-shims "~1.0.0"
+
+stringmap@~0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/stringmap/-/stringmap-0.2.2.tgz#556c137b258f942b8776f5b2ef582aa069d7d1b1"
+
+stringset@~0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/stringset/-/stringset-0.2.1.tgz#ef259c4e349344377fcd1c913dd2e848c9c042b5"
+
+stringstream@~0.0.4:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/stringstream/-/stringstream-0.0.5.tgz#4e484cd4de5a0bbbee18e46307710a8a81621878"
+
+strip-ansi@^3.0.0, strip-ansi@^3.0.1:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf"
+  dependencies:
+    ansi-regex "^2.0.0"
+
+strip-bom@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-2.0.0.tgz#6219a85616520491f35788bdbf1447a99c7e6b0e"
+  dependencies:
+    is-utf8 "^0.2.0"
+
+strip-indent@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/strip-indent/-/strip-indent-1.0.1.tgz#0c7962a6adefa7bbd4ac366460a638552ae1a0a2"
+  dependencies:
+    get-stdin "^4.0.1"
+
+strip-json-comments@~2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a"
+
+supports-color@1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-1.2.0.tgz#ff1ed1e61169d06b3cf2d588e188b18d8847e17e"
+
+supports-color@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7"
+
+supports-color@^3.1.0:
+  version "3.2.3"
+  resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-3.2.3.tgz#65ac0504b3954171d8a64946b2ae3cbb8a5f54f6"
+  dependencies:
+    has-flag "^1.0.0"
+
+symbol-tree@^3.2.1:
+  version "3.2.2"
+  resolved "https://registry.yarnpkg.com/symbol-tree/-/symbol-tree-3.2.2.tgz#ae27db38f660a7ae2e1c3b7d1bc290819b8519e6"
+
+tar-pack@^3.4.0:
+  version "3.4.0"
+  resolved "https://registry.yarnpkg.com/tar-pack/-/tar-pack-3.4.0.tgz#23be2d7f671a8339376cbdb0b8fe3fdebf317984"
+  dependencies:
+    debug "^2.2.0"
+    fstream "^1.0.10"
+    fstream-ignore "^1.0.5"
+    once "^1.3.3"
+    readable-stream "^2.1.4"
+    rimraf "^2.5.1"
+    tar "^2.2.1"
+    uid-number "^0.0.6"
+
+tar@^2.2.1:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/tar/-/tar-2.2.1.tgz#8e4d2a256c0e2185c6b18ad694aec968b83cb1d1"
+  dependencies:
+    block-stream "*"
+    fstream "^1.0.2"
+    inherits "2"
+
+throttleit@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/throttleit/-/throttleit-1.0.0.tgz#9e785836daf46743145a5984b6268d828528ac6c"
+
+throttleit@~0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/throttleit/-/throttleit-0.0.2.tgz#cfedf88e60c00dd9697b61fdd2a8343a9b680eaf"
+
+through@~2.3.8:
+  version "2.3.8"
+  resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5"
+
+tmp@0.0.31, tmp@0.0.x:
+  version "0.0.31"
+  resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.31.tgz#8f38ab9438e17315e5dbd8b3657e8bfb277ae4a7"
+  dependencies:
+    os-tmpdir "~1.0.1"
+
+to-array@0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/to-array/-/to-array-0.1.4.tgz#17e6c11f73dd4f3d74cda7a4ff3238e9ad9bf890"
+
+to-fast-properties@^1.0.0, to-fast-properties@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-1.0.2.tgz#f3f5c0c3ba7299a7ef99427e44633257ade43320"
+
+to-iso-string@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/to-iso-string/-/to-iso-string-0.0.2.tgz#4dc19e664dfccbe25bd8db508b00c6da158255d1"
+
+tough-cookie@>=0.12.0, tough-cookie@^2.3.2, tough-cookie@~2.3.0:
+  version "2.3.2"
+  resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.3.2.tgz#f081f76e4c85720e6c37a5faced737150d84072a"
+  dependencies:
+    punycode "^1.4.1"
+
+tough-cookie@~2.2.0:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.2.2.tgz#c83a1830f4e5ef0b93ef2a3488e724f8de016ac7"
+
+tr46@~0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/tr46/-/tr46-0.0.3.tgz#8184fd347dac9cdc185992f3a6622e14b9d9ab6a"
+
+trim-newlines@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/trim-newlines/-/trim-newlines-1.0.0.tgz#5887966bb582a4503a41eb524f7d35011815a613"
+
+trim-right@^1.0.0, trim-right@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/trim-right/-/trim-right-1.0.1.tgz#cb2e1203067e0c8de1f614094b9fe45704ea6003"
+
+try-resolve@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/try-resolve/-/try-resolve-1.0.1.tgz#cfde6fabd72d63e5797cfaab873abbe8e700e912"
+
+tryor@~0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/tryor/-/tryor-0.1.2.tgz#8145e4ca7caff40acde3ccf946e8b8bb75b4172b"
+
+tunnel-agent@^0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd"
+  dependencies:
+    safe-buffer "^5.0.1"
+
+tunnel-agent@~0.4.0, tunnel-agent@~0.4.1:
+  version "0.4.3"
+  resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.4.3.tgz#6373db76909fe570e08d73583365ed828a74eeeb"
+
+tweetnacl@^0.14.3, tweetnacl@~0.14.0:
+  version "0.14.5"
+  resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64"
+
+type-check@~0.3.1, type-check@~0.3.2:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.3.2.tgz#5884cab512cf1d355e3fb784f30804b2b520db72"
+  dependencies:
+    prelude-ls "~1.1.2"
+
+type-detect@0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/type-detect/-/type-detect-0.1.1.tgz#0ba5ec2a885640e470ea4e8505971900dac58822"
+
+type-detect@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/type-detect/-/type-detect-1.0.0.tgz#762217cc06db258ec48908a1298e8b95121e8ea2"
+
+type-is@~1.6.14:
+  version "1.6.15"
+  resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.15.tgz#cab10fb4909e441c82842eafe1ad646c81804410"
+  dependencies:
+    media-typer "0.3.0"
+    mime-types "~2.1.15"
+
+typedarray@~0.0.5:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777"
+
+"uglify-js-brunch@>= 1.0 < 1.5":
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/uglify-js-brunch/-/uglify-js-brunch-1.3.1.tgz#2d9faf463846cf499d3918147b3e328210d98fbf"
+  dependencies:
+    coffee-script "1.3.3"
+    uglify-js "1.3.2"
+
+uglify-js@1.3.2:
+  version "1.3.2"
+  resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-1.3.2.tgz#7ed11c9dedf77f29632286ea4dc96a2229aef98e"
+
+uglify-js@^2.6:
+  version "2.8.22"
+  resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-2.8.22.tgz#d54934778a8da14903fa29a326fb24c0ab51a1a0"
+  dependencies:
+    source-map "~0.5.1"
+    yargs "~3.10.0"
+  optionalDependencies:
+    uglify-to-browserify "~1.0.0"
+
+uglify-to-browserify@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/uglify-to-browserify/-/uglify-to-browserify-1.0.2.tgz#6e0924d6bda6b5afe349e39a6d632850a0f882b7"
+
+uid-number@^0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/uid-number/-/uid-number-0.0.6.tgz#0ea10e8035e8eb5b8e4449f06da1c730663baa81"
+
+uid2@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/uid2/-/uid2-0.0.2.tgz#107fb155c82c1136620797ed4c88cf2b08f6aab8"
+
+ultron@1.0.x:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/ultron/-/ultron-1.0.2.tgz#ace116ab557cd197386a4e88f4685378c8b2e4fa"
+
+unpipe@1.0.0, unpipe@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec"
+
+user-home@^1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/user-home/-/user-home-1.1.1.tgz#2b5be23a32b63a7c9deb8d0f28d485724a3df190"
+
+useragent@^2.1.12:
+  version "2.1.13"
+  resolved "https://registry.yarnpkg.com/useragent/-/useragent-2.1.13.tgz#bba43e8aa24d5ceb83c2937473e102e21df74c10"
+  dependencies:
+    lru-cache "2.2.x"
+    tmp "0.0.x"
+
+util-deprecate@~1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf"
+
+utils-merge@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.0.tgz#0294fb922bb9375153541c4f7096231f287c8af8"
+
+uuid@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.0.1.tgz#6544bba2dfda8c1cf17e629a3a305e2bb1fee6c1"
+
+validate-npm-package-license@^3.0.1:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.1.tgz#2804babe712ad3379459acfbe24746ab2c303fbc"
+  dependencies:
+    spdx-correct "~1.0.0"
+    spdx-expression-parse "~1.0.0"
+
+verror@1.3.6:
+  version "1.3.6"
+  resolved "https://registry.yarnpkg.com/verror/-/verror-1.3.6.tgz#cff5df12946d297d2baaefaa2689e25be01c005c"
+  dependencies:
+    extsprintf "1.0.2"
+
+void-elements@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/void-elements/-/void-elements-2.0.1.tgz#c066afb582bb1cb4128d60ea92392e94d5e9dbec"
+
+webidl-conversions@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871"
+
+webidl-conversions@^4.0.0:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-4.0.1.tgz#8015a17ab83e7e1b311638486ace81da6ce206a0"
+
+whatwg-encoding@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/whatwg-encoding/-/whatwg-encoding-1.0.1.tgz#3c6c451a198ee7aec55b1ec61d0920c67801a5f4"
+  dependencies:
+    iconv-lite "0.4.13"
+
+whatwg-url@^4.3.0:
+  version "4.7.0"
+  resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-4.7.0.tgz#202035ac1955b087cdd20fa8b58ded3ab1cd2af5"
+  dependencies:
+    tr46 "~0.0.3"
+    webidl-conversions "^3.0.0"
+
+which@^1.1.1, which@~1.2.10, which@~1.2.2:
+  version "1.2.14"
+  resolved "https://registry.yarnpkg.com/which/-/which-1.2.14.tgz#9a87c4378f03e827cecaf1acdf56c736c01c14e5"
+  dependencies:
+    isexe "^2.0.0"
+
+which@~1.0.5:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/which/-/which-1.0.9.tgz#460c1da0f810103d0321a9b633af9e575e64486f"
+
+wide-align@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.0.tgz#40edde802a71fea1f070da3e62dcda2e7add96ad"
+  dependencies:
+    string-width "^1.0.1"
+
+window-size@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.0.tgz#5438cd2ea93b202efa3a19fe8887aee7c94f9c9d"
+
+window-size@^0.1.2:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.4.tgz#f8e1aa1ee5a53ec5bf151ffa09742a6ad7697876"
+
+wordwrap@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.2.tgz#b79669bb42ecb409f83d583cad52ca17eaa1643f"
+
+wordwrap@^1.0.0, wordwrap@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-1.0.0.tgz#27584810891456a4171c8d0226441ade90cbcaeb"
+
+wordwrap@~0.0.2:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.3.tgz#a3d5da6cd5c0bc0008d37234bbaf1bed63059107"
+
+wrappy@1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f"
+
+ws@1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/ws/-/ws-1.1.2.tgz#8a244fa052401e08c9886cf44a85189e1fd4067f"
+  dependencies:
+    options ">=0.0.5"
+    ultron "1.0.x"
+
+wtf-8@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/wtf-8/-/wtf-8-1.0.0.tgz#392d8ba2d0f1c34d1ee2d630f15d0efb68e1048a"
+
+xml-name-validator@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/xml-name-validator/-/xml-name-validator-2.0.1.tgz#4d8b8f1eccd3419aa362061becef515e1e559635"
+
+xmlhttprequest-ssl@1.5.3:
+  version "1.5.3"
+  resolved "https://registry.yarnpkg.com/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.5.3.tgz#185a888c04eca46c3e4070d99f7b49de3528992d"
+
+xtend@^4.0.0:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.1.tgz#a5c6d532be656e23db820efb943a1f04998d63af"
+
+y18n@^3.2.0:
+  version "3.2.1"
+  resolved "https://registry.yarnpkg.com/y18n/-/y18n-3.2.1.tgz#6d15fba884c08679c0d77e88e7759e811e07fa41"
+
+yargs@~3.10.0:
+  version "3.10.0"
+  resolved "https://registry.yarnpkg.com/yargs/-/yargs-3.10.0.tgz#f7ee7bd857dd7c1d2d38c0e74efbd681d1431fd1"
+  dependencies:
+    camelcase "^1.0.2"
+    cliui "^2.1.0"
+    decamelize "^1.0.0"
+    window-size "0.1.0"
+
+yargs@~3.27.0:
+  version "3.27.0"
+  resolved "https://registry.yarnpkg.com/yargs/-/yargs-3.27.0.tgz#21205469316e939131d59f2da0c6d7f98221ea40"
+  dependencies:
+    camelcase "^1.2.1"
+    cliui "^2.1.0"
+    decamelize "^1.0.0"
+    os-locale "^1.4.0"
+    window-size "^0.1.2"
+    y18n "^3.2.0"
+
+yauzl@2.4.1:
+  version "2.4.1"
+  resolved "https://registry.yarnpkg.com/yauzl/-/yauzl-2.4.1.tgz#9528f442dab1b2284e58b4379bb194e22e0c4005"
+  dependencies:
+    fd-slicer "~1.0.1"
+
+ycssmin@>=1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/ycssmin/-/ycssmin-1.0.1.tgz#7cdde8db78cfab00d2901c3b2301e304faf4df16"
+
+yeast@0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/yeast/-/yeast-0.1.2.tgz#008e06d8094320c372dbc2f8ed76a0ca6c8ac419"
diff --git a/contrib/management-packs/hdf-ambari-mpack/src/main/resources/stacks/HDF/2.0/hooks/before-START/scripts/params.py b/contrib/management-packs/hdf-ambari-mpack/src/main/resources/stacks/HDF/2.0/hooks/before-START/scripts/params.py
index 6a70675..8cc876f 100644
--- a/contrib/management-packs/hdf-ambari-mpack/src/main/resources/stacks/HDF/2.0/hooks/before-START/scripts/params.py
+++ b/contrib/management-packs/hdf-ambari-mpack/src/main/resources/stacks/HDF/2.0/hooks/before-START/scripts/params.py
@@ -135,6 +135,8 @@
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
 #hadoop params
 
 if has_namenode or dfs_type == 'HCFS':
diff --git a/contrib/views/capacity-scheduler/pom.xml b/contrib/views/capacity-scheduler/pom.xml
index 7a65ffb..1bc1861 100644
--- a/contrib/views/capacity-scheduler/pom.xml
+++ b/contrib/views/capacity-scheduler/pom.xml
@@ -120,16 +120,22 @@
       <plugin>
         <groupId>com.github.eirslett</groupId>
         <artifactId>frontend-maven-plugin</artifactId>
-        <version>1.3</version>
+        <version>1.4</version>
         <configuration>
+          <yarnVersion>v0.23.2</yarnVersion>
           <workingDirectory>${ui.directory}</workingDirectory>
           <npmInheritsProxyConfigFromMaven>false</npmInheritsProxyConfigFromMaven>
+          <!-- setting npm_config_tmp environment variable is a workaround for
+               https://github.com/Medium/phantomjs/issues/673 -->
+          <environmentVariables>
+            <npm_config_tmp>/tmp/npm_config_tmp</npm_config_tmp>
+          </environmentVariables>
         </configuration>
         <executions>
           <execution>
-            <id>install node and npm</id>
+            <id>install node and yarn</id>
             <goals>
-                <goal>install-node-and-npm</goal>
+                <goal>install-node-and-yarn</goal>
             </goals>
             <!-- optional: default phase is "generate-resources" -->
             <phase>initialize</phase>
@@ -139,9 +145,9 @@
             </configuration>
           </execution>
           <execution>
-            <id>npm install</id>
+            <id>yarn install --pure-lockfile</id>
             <goals>
-              <goal>npm</goal>
+              <goal>yarn</goal>
             </goals>
             <phase>generate-resources</phase>
             <configuration>
@@ -156,22 +162,6 @@
         <version>1.2.1</version>
         <executions>
           <execution>
-            <id>node gyp executable</id>
-            <phase>initialize</phase>
-            <goals>
-              <goal>exec</goal>
-            </goals>
-            <configuration>
-              <skip>${skip.nodegyp.chmod}</skip>
-              <workingDirectory>${ui.directory}</workingDirectory>
-              <executable>chmod</executable>
-              <arguments>
-                <argument>+x</argument>
-                <argument>${ui.directory}/node/node_modules/npm/bin/node-gyp-bin/node-gyp</argument>
-              </arguments>
-            </configuration>
-          </execution>
-          <execution>
             <id>Bower install</id>
             <phase>generate-resources</phase>
             <goals>
diff --git a/contrib/views/capacity-scheduler/src/main/resources/ui/app/components/capacityInput.js b/contrib/views/capacity-scheduler/src/main/resources/ui/app/components/capacityInput.js
index b6c419a..0646b79 100644
--- a/contrib/views/capacity-scheduler/src/main/resources/ui/app/components/capacityInput.js
+++ b/contrib/views/capacity-scheduler/src/main/resources/ui/app/components/capacityInput.js
@@ -257,6 +257,8 @@
       } else {
         this.set('value', (parseFloat(val) > maxVal)? parseFloat(maxVal) : parseFloat(val));
       }
+    } else {
+      this.set('value', (!Em.isBlank(this.get('value')) && !isNaN(parseFloat(this.get('value')))) ? parseFloat(val) : null);
     }
   }.observes('value').on('change')
 });
diff --git a/contrib/views/capacity-scheduler/src/main/resources/ui/yarn.lock b/contrib/views/capacity-scheduler/src/main/resources/ui/yarn.lock
new file mode 100644
index 0000000..cf2a644
--- /dev/null
+++ b/contrib/views/capacity-scheduler/src/main/resources/ui/yarn.lock
@@ -0,0 +1,1374 @@
+# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
+# yarn lockfile v1
+
+
+abab@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/abab/-/abab-1.0.3.tgz#b81de5f7274ec4e756d797cd834f303642724e5d"
+
+acorn-globals@^3.1.0:
+  version "3.1.0"
+  resolved "https://registry.yarnpkg.com/acorn-globals/-/acorn-globals-3.1.0.tgz#fd8270f71fbb4996b004fa880ee5d46573a731bf"
+  dependencies:
+    acorn "^4.0.4"
+
+acorn@^4.0.3, acorn@^4.0.4:
+  version "4.0.11"
+  resolved "https://registry.yarnpkg.com/acorn/-/acorn-4.0.11.tgz#edcda3bd937e7556410d42ed5860f67399c794c0"
+
+ajv@^4.9.1:
+  version "4.11.8"
+  resolved "https://registry.yarnpkg.com/ajv/-/ajv-4.11.8.tgz#82ffb02b29e662ae53bdc20af15947706739c536"
+  dependencies:
+    co "^4.6.0"
+    json-stable-stringify "^1.0.1"
+
+amdefine@>=0.0.4:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5"
+
+ansi-color@~0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/ansi-color/-/ansi-color-0.2.1.tgz#3e75c037475217544ed763a8db5709fa9ae5bf9a"
+
+anymatch@^1.1.0, anymatch@~1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-1.3.0.tgz#a3e52fa39168c825ff57b0248126ce5a8ff95507"
+  dependencies:
+    arrify "^1.0.0"
+    micromatch "^2.1.5"
+
+anysort@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/anysort/-/anysort-1.0.1.tgz#341bd5d5ba1485f64e55ae865f1d45994b507fc4"
+  dependencies:
+    anymatch "~1.3.0"
+
+arr-diff@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-2.0.0.tgz#8f3b827f955a8bd669697e4a4256ac3ceae356cf"
+  dependencies:
+    arr-flatten "^1.0.1"
+
+arr-flatten@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.0.3.tgz#a274ed85ac08849b6bd7847c4580745dc51adfb1"
+
+array-equal@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/array-equal/-/array-equal-1.0.0.tgz#8c2a5ef2472fd9ea742b04c77a75093ba2757c93"
+
+array-unique@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.2.1.tgz#a1d97ccafcbc2625cc70fadceb36a50c58b01a53"
+
+arrify@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d"
+
+asap@~2.0.3:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/asap/-/asap-2.0.5.tgz#522765b50c3510490e52d7dcfe085ef9ba96958f"
+
+asn1@~0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.3.tgz#dac8787713c9966849fc8180777ebe9c1ddf3b86"
+
+assert-plus@1.0.0, assert-plus@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525"
+
+assert-plus@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.2.0.tgz#d74e1b87e7affc0db8aadb7021f3fe48101ab234"
+
+async-each@^0.1.5, async-each@~0.1.1, async-each@~0.1.2, async-each@~0.1.3, async-each@~0.1.4, async-each@~0.1.6:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/async-each/-/async-each-0.1.6.tgz#b67e99edcddf96541e44af56290cd7d5c6e70439"
+
+async-waterfall@~0.1.2:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/async-waterfall/-/async-waterfall-0.1.5.tgz#398bd48b0eac5d40ffbe400fe9e37a53ba966dae"
+
+async@~0.2.6:
+  version "0.2.10"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.2.10.tgz#b6bbe0b0674b9d719708ca38de8c237cb526c3d1"
+
+asynckit@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79"
+
+aws-sign2@~0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.6.0.tgz#14342dd38dbcc94d0e5b87d763cd63612c0e794f"
+
+aws4@^1.2.1:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.6.0.tgz#83ef5ca860b2b32e4a0deedee8c771b9db57471e"
+
+balanced-match@^0.4.1:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-0.4.2.tgz#cb3f3e3c732dc0f01ee70b403f302e61d7709838"
+
+batch@0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/batch/-/batch-0.2.1.tgz#4463997bb4d5fd1c7a011548813e52aa189c2c79"
+
+bcrypt-pbkdf@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.1.tgz#63bc5dcb61331b92bc05fd528953c33462a06f8d"
+  dependencies:
+    tweetnacl "^0.14.3"
+
+binary-extensions@^1.0.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-1.8.0.tgz#48ec8d16df4377eae5fa5884682480af4d95c774"
+
+boom@2.x.x:
+  version "2.10.1"
+  resolved "https://registry.yarnpkg.com/boom/-/boom-2.10.1.tgz#39c8918ceff5799f83f9492a848f625add0c766f"
+  dependencies:
+    hoek "2.x.x"
+
+bower@^1.2.8:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/bower/-/bower-1.8.0.tgz#55dbebef0ad9155382d9e9d3e497c1372345b44a"
+
+brace-expansion@^1.0.0:
+  version "1.1.7"
+  resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.7.tgz#3effc3c50e000531fb720eaff80f0ae8ef23cf59"
+  dependencies:
+    balanced-match "^0.4.1"
+    concat-map "0.0.1"
+
+braces@^1.8.2:
+  version "1.8.5"
+  resolved "https://registry.yarnpkg.com/braces/-/braces-1.8.5.tgz#ba77962e12dff969d6b76711e914b737857bf6a7"
+  dependencies:
+    expand-range "^1.8.1"
+    preserve "^0.2.0"
+    repeat-element "^1.1.2"
+
+browser-resolve@~1.7.2:
+  version "1.7.2"
+  resolved "https://registry.yarnpkg.com/browser-resolve/-/browser-resolve-1.7.2.tgz#b401695b615697856529389ae2bc1d0fab197bf4"
+  dependencies:
+    resolve "1.1.5"
+
+brunch@^1.7.13:
+  version "1.8.5"
+  resolved "https://registry.yarnpkg.com/brunch/-/brunch-1.8.5.tgz#50e6536eee40159a3fa3b65c14e09bc83a47cc39"
+  dependencies:
+    anymatch "~1.3.0"
+    anysort "~1.0.0"
+    async-each "~0.1.2"
+    async-waterfall "~0.1.2"
+    chokidar "~1.0.1"
+    coffee-script "~1.9.0"
+    commander "~2.7.1"
+    commonjs-require-definition "~0.2.0"
+    debug "~2.1.1"
+    deppack "~0.1.0"
+    fcache "~0.1.0"
+    init-skeleton "~0.4.0"
+    loggy "~0.2.0"
+    mkdirp "~0.5.0"
+    pushserve "~0.1.6"
+    quickly-copy-file "~0.1.0"
+    read-components "~0.6.0"
+    source-map "~0.3.0"
+
+buffer-crc32@0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/buffer-crc32/-/buffer-crc32-0.2.1.tgz#be3e5382fc02b6d6324956ac1af98aa98b08534c"
+
+bytes@0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-0.2.0.tgz#aad33ec14e3dc2ca74e8e7d451f9ba053ad4f7a0"
+
+camelcase@^1.0.2:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-1.2.1.tgz#9bb5304d2e0b56698b2c758b08a3eaa9daa58a39"
+
+caseless@~0.12.0:
+  version "0.12.0"
+  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc"
+
+cbify@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/cbify/-/cbify-1.0.0.tgz#1a7ad4044b2f92317e7a1badda3a727607835d84"
+  dependencies:
+    fn-args "^1.0.0"
+    wrappy "^1.0.1"
+
+chokidar@~1.0.1:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-1.0.6.tgz#0a1c0bce1e24993afc105a5b81ea26dda01e23af"
+  dependencies:
+    anymatch "^1.1.0"
+    arrify "^1.0.0"
+    async-each "^0.1.5"
+    glob-parent "^1.0.0"
+    is-binary-path "^1.0.0"
+    is-glob "^1.1.3"
+    path-is-absolute "^1.0.0"
+    readdirp "^1.3.0"
+  optionalDependencies:
+    fsevents "^0.3.8"
+
+clean-css-brunch@^1.7.1:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/clean-css-brunch/-/clean-css-brunch-1.8.0.tgz#efea5ac3e2a99d08155c6c14e1eef466757b0984"
+  dependencies:
+    clean-css "^3.4.8"
+
+clean-css@^3.4.8:
+  version "3.4.25"
+  resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-3.4.25.tgz#9e9a52d5c1e6bc5123e1b2783fa65fe958946ede"
+  dependencies:
+    commander "2.8.x"
+    source-map "0.4.x"
+
+co@^4.6.0:
+  version "4.6.0"
+  resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184"
+
+coffee-script@1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/coffee-script/-/coffee-script-1.4.0.tgz#5e3bc8aac26c01a8e27bf107722c5655f5ad7d36"
+
+coffee-script@~1.9.0:
+  version "1.9.3"
+  resolved "https://registry.yarnpkg.com/coffee-script/-/coffee-script-1.9.3.tgz#596e6e83fcfcb67c5964ab70d444beff0ac04ac7"
+
+combined-stream@^1.0.5, combined-stream@~1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.5.tgz#938370a57b4a51dea2c77c15d5c5fdf895164009"
+  dependencies:
+    delayed-stream "~1.0.0"
+
+commander@1.2.0, commander@~1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-1.2.0.tgz#fd5713bfa153c7d6cc599378a5ab4c45c535029e"
+  dependencies:
+    keypress "0.1.x"
+
+commander@2.8.x:
+  version "2.8.1"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.8.1.tgz#06be367febfda0c330aa1e2a072d3dc9762425d4"
+  dependencies:
+    graceful-readlink ">= 1.0.0"
+
+commander@~2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.0.0.tgz#d1b86f901f8b64bd941bdeadaf924530393be928"
+
+commander@~2.7.1:
+  version "2.7.1"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.7.1.tgz#5d419a2bbed2c32ee3e4dca9bb45ab83ecc3065a"
+  dependencies:
+    graceful-readlink ">= 1.0.0"
+
+commonjs-require-definition@~0.2.0:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/commonjs-require-definition/-/commonjs-require-definition-0.2.2.tgz#ef841b0756eae3557986e374f62e2345209f7bf7"
+
+component-builder@~0.10.0:
+  version "0.10.1"
+  resolved "https://registry.yarnpkg.com/component-builder/-/component-builder-0.10.1.tgz#d29c7ab70241a678e3f8dbea4d1759c68b6f8f9b"
+  dependencies:
+    batch "0.2.1"
+    component-require "0.3.1"
+    cp "~0.1.0"
+    debug "*"
+    mkdirp "0.3.4"
+    string-to-js "0.0.1"
+
+component-require@0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/component-require/-/component-require-0.3.1.tgz#50a00e2e2cb0fe273ab4268fe20ae4804f35fe6d"
+
+concat-map@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b"
+
+connect-slashes@~0.0.9:
+  version "0.0.11"
+  resolved "https://registry.yarnpkg.com/connect-slashes/-/connect-slashes-0.0.11.tgz#4b44efae7599cc03ee20b24e9287272f41d62258"
+
+connect@2.8.8:
+  version "2.8.8"
+  resolved "https://registry.yarnpkg.com/connect/-/connect-2.8.8.tgz#b9abf8caf0bd9773cb3dea29344119872582446d"
+  dependencies:
+    buffer-crc32 "0.2.1"
+    bytes "0.2.0"
+    cookie "0.1.0"
+    cookie-signature "1.0.1"
+    debug "*"
+    formidable "1.0.14"
+    fresh "0.2.0"
+    methods "0.0.1"
+    pause "0.0.1"
+    qs "0.6.5"
+    send "0.1.4"
+    uid2 "0.0.2"
+
+content-type-parser@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/content-type-parser/-/content-type-parser-1.0.1.tgz#c3e56988c53c65127fb46d4032a3a900246fdc94"
+
+cookie-signature@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.1.tgz#44e072148af01e6e8e24afbf12690d68ae698ecb"
+
+cookie@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.1.0.tgz#90eb469ddce905c866de687efc43131d8801f9d0"
+
+core-util-is@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7"
+
+cp@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/cp/-/cp-0.1.1.tgz#3946a76c1a53ffe0e68593f341c124b336c1f06d"
+
+cryptiles@2.x.x:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/cryptiles/-/cryptiles-2.0.5.tgz#3bdfecdc608147c1c67202fa291e7dca59eaa3b8"
+  dependencies:
+    boom "2.x.x"
+
+css-brunch@^1.7.0:
+  version "1.7.0"
+  resolved "https://registry.yarnpkg.com/css-brunch/-/css-brunch-1.7.0.tgz#258c1b038a970840af0c6a8e5582f614050294da"
+
+cssom@0.3.x, "cssom@>= 0.3.2 < 0.4.0":
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/cssom/-/cssom-0.3.2.tgz#b8036170c79f07a90ff2f16e22284027a243848b"
+
+"cssstyle@>= 0.2.37 < 0.3.0":
+  version "0.2.37"
+  resolved "https://registry.yarnpkg.com/cssstyle/-/cssstyle-0.2.37.tgz#541097234cb2513c83ceed3acddc27ff27987d54"
+  dependencies:
+    cssom "0.3.x"
+
+dashdash@^1.12.0:
+  version "1.14.1"
+  resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0"
+  dependencies:
+    assert-plus "^1.0.0"
+
+date-utils@~1.2.17:
+  version "1.2.21"
+  resolved "https://registry.yarnpkg.com/date-utils/-/date-utils-1.2.21.tgz#61fb16cdc1274b3c9acaaffe9fc69df8720a2b64"
+
+debug@*, debug@~2.1.1:
+  version "2.1.3"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.1.3.tgz#ce8ab1b5ee8fbee2bfa3b633cab93d366b63418e"
+  dependencies:
+    ms "0.7.0"
+
+decamelize@^1.0.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290"
+
+deep-is@~0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.3.tgz#b369d6fb5dbc13eecf524f91b070feedc357cf34"
+
+defined@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/defined/-/defined-1.0.0.tgz#c98d9bcef75674188e110969151199e39b1fa693"
+
+delayed-stream@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619"
+
+deppack@~0.1.0:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/deppack/-/deppack-0.1.5.tgz#7332ca30af2d1249af6ead80f3ae67323251f974"
+  dependencies:
+    async-each "~0.1.6"
+    browser-resolve "~1.7.2"
+    commonjs-require-definition "~0.2.0"
+    detective "^4.0.0"
+
+detective@^4.0.0:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/detective/-/detective-4.5.0.tgz#6e5a8c6b26e6c7a254b1c6b6d7490d98ec91edd1"
+  dependencies:
+    acorn "^4.0.3"
+    defined "^1.0.0"
+
+ecc-jsbn@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.1.tgz#0fc73a9ed5f0d53c38193398523ef7e543777505"
+  dependencies:
+    jsbn "~0.1.0"
+
+ember-precompile-brunch@^0.1.0:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/ember-precompile-brunch/-/ember-precompile-brunch-0.1.2.tgz#54e73d5054a35afde79aa1c1b454f6ed0e37611c"
+  dependencies:
+    coffee-script "1.4.0"
+    jsdom "^9.0.0"
+
+errno@^0.1.1:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/errno/-/errno-0.1.4.tgz#b896e23a9e5e8ba33871fc996abd3635fc9a1c7d"
+  dependencies:
+    prr "~0.0.0"
+
+escodegen@^1.6.1:
+  version "1.8.1"
+  resolved "https://registry.yarnpkg.com/escodegen/-/escodegen-1.8.1.tgz#5a5b53af4693110bebb0867aa3430dd3b70a1018"
+  dependencies:
+    esprima "^2.7.1"
+    estraverse "^1.9.1"
+    esutils "^2.0.2"
+    optionator "^0.8.1"
+  optionalDependencies:
+    source-map "~0.2.0"
+
+esprima@^2.6.0, esprima@^2.7.1:
+  version "2.7.3"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-2.7.3.tgz#96e3b70d5779f6ad49cd032673d1c312767ba581"
+
+estraverse@^1.9.1:
+  version "1.9.3"
+  resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-1.9.3.tgz#af67f2dc922582415950926091a4005d29c9bb44"
+
+esutils@^2.0.2:
+  version "2.0.2"
+  resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.2.tgz#0abf4f1caa5bcb1f7a9d8acc6dea4faaa04bac9b"
+
+expand-brackets@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-0.1.5.tgz#df07284e342a807cd733ac5af72411e581d1177b"
+  dependencies:
+    is-posix-bracket "^0.1.0"
+
+expand-range@^1.8.1:
+  version "1.8.2"
+  resolved "https://registry.yarnpkg.com/expand-range/-/expand-range-1.8.2.tgz#a299effd335fe2721ebae8e257ec79644fc85337"
+  dependencies:
+    fill-range "^2.1.0"
+
+express@~3.3.0:
+  version "3.3.8"
+  resolved "https://registry.yarnpkg.com/express/-/express-3.3.8.tgz#8e98ac30d81f4c95b85d71d2af6cf84f62ef19bd"
+  dependencies:
+    buffer-crc32 "0.2.1"
+    commander "1.2.0"
+    connect "2.8.8"
+    cookie "0.1.0"
+    cookie-signature "1.0.1"
+    debug "*"
+    fresh "0.2.0"
+    methods "0.0.1"
+    mkdirp "0.3.5"
+    range-parser "0.0.4"
+    send "0.1.4"
+
+extend@~3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.1.tgz#a755ea7bc1adfcc5a31ce7e762dbaadc5e636444"
+
+extglob@^0.3.1:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/extglob/-/extglob-0.3.2.tgz#2e18ff3d2f49ab2765cec9023f011daa8d8349a1"
+  dependencies:
+    is-extglob "^1.0.0"
+
+extsprintf@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.0.2.tgz#e1080e0658e300b06294990cc70e1502235fd550"
+
+fast-levenshtein@~2.0.4:
+  version "2.0.6"
+  resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917"
+
+fcache@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/fcache/-/fcache-0.1.1.tgz#2862109b92fe0f68b3d11bc0b06632475033d39c"
+
+filename-regex@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/filename-regex/-/filename-regex-2.0.1.tgz#c1c4b9bee3e09725ddb106b75c1e301fe2f18b26"
+
+fill-range@^2.1.0:
+  version "2.2.3"
+  resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-2.2.3.tgz#50b77dfd7e469bc7492470963699fe7a8485a723"
+  dependencies:
+    is-number "^2.1.0"
+    isobject "^2.0.0"
+    randomatic "^1.1.3"
+    repeat-element "^1.1.2"
+    repeat-string "^1.5.2"
+
+fn-args@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/fn-args/-/fn-args-1.0.0.tgz#974dafa1aeac4ac7c21fa09cc3b80f650106ed32"
+
+for-in@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80"
+
+for-own@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/for-own/-/for-own-0.1.5.tgz#5265c681a4f294dabbf17c9509b6763aa84510ce"
+  dependencies:
+    for-in "^1.0.1"
+
+forever-agent@~0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91"
+
+form-data@~2.1.1:
+  version "2.1.4"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.1.4.tgz#33c183acf193276ecaa98143a69e94bfee1750d1"
+  dependencies:
+    asynckit "^0.4.0"
+    combined-stream "^1.0.5"
+    mime-types "^2.1.12"
+
+formidable@1.0.14:
+  version "1.0.14"
+  resolved "https://registry.yarnpkg.com/formidable/-/formidable-1.0.14.tgz#2b3f4c411cbb5fdd695c44843e2a23514a43231a"
+
+fresh@0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.2.0.tgz#bfd9402cf3df12c4a4c310c79f99a3dde13d34a7"
+
+fs-mode@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/fs-mode/-/fs-mode-1.0.1.tgz#73102f40aa1a25221dda0eaa906616d6da08255a"
+  dependencies:
+    cbify "^1.0.0"
+
+fsevents@^0.3.8:
+  version "0.3.8"
+  resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-0.3.8.tgz#9992f1032c925c829554d0d59801dca0313a5356"
+  dependencies:
+    nan "^2.0.2"
+
+getpass@^0.1.1:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa"
+  dependencies:
+    assert-plus "^1.0.0"
+
+glob-base@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/glob-base/-/glob-base-0.3.0.tgz#dbb164f6221b1c0b1ccf82aea328b497df0ea3c4"
+  dependencies:
+    glob-parent "^2.0.0"
+    is-glob "^2.0.0"
+
+glob-parent@^1.0.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-1.3.0.tgz#971edd816ed5db58705b58079647a64d0aef7968"
+  dependencies:
+    is-glob "^2.0.0"
+
+glob-parent@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-2.0.0.tgz#81383d72db054fcccf5336daa902f182f6edbb28"
+  dependencies:
+    is-glob "^2.0.0"
+
+glob@^4.4.2:
+  version "4.5.3"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-4.5.3.tgz#c6cb73d3226c1efef04de3c56d012f03377ee15f"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^2.0.1"
+    once "^1.3.0"
+
+graceful-fs@^4.1.2, graceful-fs@~4.1.2:
+  version "4.1.11"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.11.tgz#0e8bdfe4d1ddb8854d64e04ea7c00e2a026e5658"
+
+"graceful-readlink@>= 1.0.0":
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/graceful-readlink/-/graceful-readlink-1.0.1.tgz#4cafad76bc62f02fa039b2f94e9a3dd3a391a725"
+
+growl@~1.8.1:
+  version "1.8.1"
+  resolved "https://registry.yarnpkg.com/growl/-/growl-1.8.1.tgz#4b2dec8d907e93db336624dcec0183502f8c9428"
+
+handlebars@~1.0.9:
+  version "1.0.12"
+  resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-1.0.12.tgz#18c6d3440c35e91b19b3ff582b9151ab4985d4fc"
+  dependencies:
+    optimist "~0.3"
+    uglify-js "~2.3"
+
+har-schema@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-1.0.5.tgz#d263135f43307c02c602afc8fe95970c0151369e"
+
+har-validator@~4.2.1:
+  version "4.2.1"
+  resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-4.2.1.tgz#33481d0f1bbff600dd203d75812a6a5fba002e2a"
+  dependencies:
+    ajv "^4.9.1"
+    har-schema "^1.0.5"
+
+hawk@~3.1.3:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/hawk/-/hawk-3.1.3.tgz#078444bd7c1640b0fe540d2c9b73d59678e8e1c4"
+  dependencies:
+    boom "2.x.x"
+    cryptiles "2.x.x"
+    hoek "2.x.x"
+    sntp "1.x.x"
+
+hoek@2.x.x:
+  version "2.16.3"
+  resolved "https://registry.yarnpkg.com/hoek/-/hoek-2.16.3.tgz#20bb7403d3cea398e91dc4710a8ff1b8274a25ed"
+
+html-encoding-sniffer@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/html-encoding-sniffer/-/html-encoding-sniffer-1.0.1.tgz#79bf7a785ea495fe66165e734153f363ff5437da"
+  dependencies:
+    whatwg-encoding "^1.0.1"
+
+http-signature@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.1.1.tgz#df72e267066cd0ac67fb76adf8e134a8fbcf91bf"
+  dependencies:
+    assert-plus "^0.2.0"
+    jsprim "^1.2.2"
+    sshpk "^1.7.0"
+
+iconv-lite@0.4.13:
+  version "0.4.13"
+  resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.13.tgz#1f88aba4ab0b1508e8312acc39345f36e992e2f2"
+
+image-size@~0.5.0:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/image-size/-/image-size-0.5.1.tgz#28eea8548a4b1443480ddddc1e083ae54652439f"
+
+inflection@~1.2.5:
+  version "1.2.7"
+  resolved "https://registry.yarnpkg.com/inflection/-/inflection-1.2.7.tgz#59db4505310a746677182ed46e155e003bfb3591"
+
+inflight@^1.0.4:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9"
+  dependencies:
+    once "^1.3.0"
+    wrappy "1"
+
+inherits@2, inherits@~2.0.1:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de"
+
+init-skeleton@~0.4.0:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/init-skeleton/-/init-skeleton-0.4.4.tgz#1b65f8470bb4924f19b9f81c0e83ff8e684352dd"
+  dependencies:
+    mkdirp "~0.5.0"
+    ncp "~2.0.0"
+    rimraf "~2.3.2"
+
+is-binary-path@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/is-binary-path/-/is-binary-path-1.0.1.tgz#75f16642b480f187a711c814161fd3a4a7655898"
+  dependencies:
+    binary-extensions "^1.0.0"
+
+is-buffer@^1.1.5:
+  version "1.1.5"
+  resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.5.tgz#1f3b26ef613b214b88cbca23cc6c01d87961eecc"
+
+is-dotfile@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-dotfile/-/is-dotfile-1.0.2.tgz#2c132383f39199f8edc268ca01b9b007d205cc4d"
+
+is-equal-shallow@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/is-equal-shallow/-/is-equal-shallow-0.1.3.tgz#2238098fc221de0bcfa5d9eac4c45d638aa1c534"
+  dependencies:
+    is-primitive "^2.0.0"
+
+is-extendable@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89"
+
+is-extglob@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-1.0.0.tgz#ac468177c4943405a092fc8f29760c6ffc6206c0"
+
+is-glob@^1.1.3:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-1.1.3.tgz#b4c64b8303d39114492a460d364ccfb0d3c0a045"
+
+is-glob@^2.0.0, is-glob@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-2.0.1.tgz#d096f926a3ded5600f3fdfd91198cb0888c2d863"
+  dependencies:
+    is-extglob "^1.0.0"
+
+is-number@^2.0.2, is-number@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/is-number/-/is-number-2.1.0.tgz#01fcbbb393463a548f2f466cce16dece49db908f"
+  dependencies:
+    kind-of "^3.0.2"
+
+is-posix-bracket@^0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/is-posix-bracket/-/is-posix-bracket-0.1.1.tgz#3334dc79774368e92f016e6fbc0a88f5cd6e6bc4"
+
+is-primitive@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/is-primitive/-/is-primitive-2.0.0.tgz#207bab91638499c07b2adf240a41a87210034575"
+
+is-typedarray@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a"
+
+isarray@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf"
+
+isarray@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11"
+
+isobject@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/isobject/-/isobject-2.1.0.tgz#f065561096a3f1da2ef46272f815c840d87e0c89"
+  dependencies:
+    isarray "1.0.0"
+
+isstream@~0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a"
+
+javascript-brunch@^1.7.1:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/javascript-brunch/-/javascript-brunch-1.8.0.tgz#9d01b681f9e4f66bbe51cc1dbb70325ef3d831a5"
+  dependencies:
+    esprima "^2.6.0"
+
+jodid25519@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/jodid25519/-/jodid25519-1.0.2.tgz#06d4912255093419477d425633606e0e90782967"
+  dependencies:
+    jsbn "~0.1.0"
+
+jsbn@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513"
+
+jsdom@^9.0.0:
+  version "9.12.0"
+  resolved "https://registry.yarnpkg.com/jsdom/-/jsdom-9.12.0.tgz#e8c546fffcb06c00d4833ca84410fed7f8a097d4"
+  dependencies:
+    abab "^1.0.3"
+    acorn "^4.0.4"
+    acorn-globals "^3.1.0"
+    array-equal "^1.0.0"
+    content-type-parser "^1.0.1"
+    cssom ">= 0.3.2 < 0.4.0"
+    cssstyle ">= 0.2.37 < 0.3.0"
+    escodegen "^1.6.1"
+    html-encoding-sniffer "^1.0.1"
+    nwmatcher ">= 1.3.9 < 2.0.0"
+    parse5 "^1.5.1"
+    request "^2.79.0"
+    sax "^1.2.1"
+    symbol-tree "^3.2.1"
+    tough-cookie "^2.3.2"
+    webidl-conversions "^4.0.0"
+    whatwg-encoding "^1.0.1"
+    whatwg-url "^4.3.0"
+    xml-name-validator "^2.0.1"
+
+json-schema@0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.2.3.tgz#b480c892e59a2f05954ce727bd3f2a4e882f9e13"
+
+json-stable-stringify@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/json-stable-stringify/-/json-stable-stringify-1.0.1.tgz#9a759d39c5f2ff503fd5300646ed445f88c4f9af"
+  dependencies:
+    jsonify "~0.0.0"
+
+json-stringify-safe@~5.0.1:
+  version "5.0.1"
+  resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb"
+
+jsonify@~0.0.0:
+  version "0.0.0"
+  resolved "https://registry.yarnpkg.com/jsonify/-/jsonify-0.0.0.tgz#2c74b6ee41d93ca51b7b5aaee8f503631d252a73"
+
+jsprim@^1.2.2:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.0.tgz#a3b87e40298d8c380552d8cc7628a0bb95a22918"
+  dependencies:
+    assert-plus "1.0.0"
+    extsprintf "1.0.2"
+    json-schema "0.2.3"
+    verror "1.3.6"
+
+keypress@0.1.x:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/keypress/-/keypress-0.1.0.tgz#4a3188d4291b66b4f65edb99f806aa9ae293592a"
+
+kind-of@^3.0.2:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.2.0.tgz#b58abe4d5c044ad33726a8c1525b48cf891bff07"
+  dependencies:
+    is-buffer "^1.1.5"
+
+less-brunch@^1.7.2:
+  version "1.8.1"
+  resolved "https://registry.yarnpkg.com/less-brunch/-/less-brunch-1.8.1.tgz#5e9a4ad00644ef6687d69e7f0578c5c3e56f7adc"
+  dependencies:
+    less "^2.2.0"
+    progeny "^0.5.0"
+
+less@^2.2.0:
+  version "2.7.2"
+  resolved "https://registry.yarnpkg.com/less/-/less-2.7.2.tgz#368d6cc73e1fb03981183280918743c5dcf9b3df"
+  optionalDependencies:
+    errno "^0.1.1"
+    graceful-fs "^4.1.2"
+    image-size "~0.5.0"
+    mime "^1.2.11"
+    mkdirp "^0.5.0"
+    promise "^7.1.1"
+    request "^2.72.0"
+    source-map "^0.5.3"
+
+levn@~0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/levn/-/levn-0.3.0.tgz#3b09924edf9f083c0490fdd4c0bc4421e04764ee"
+  dependencies:
+    prelude-ls "~1.1.2"
+    type-check "~0.3.2"
+
+loggy@~0.2.0:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/loggy/-/loggy-0.2.2.tgz#7edc85706a82d761ce9cef868f1afbad84165427"
+  dependencies:
+    ansi-color "~0.2.1"
+    date-utils "~1.2.17"
+    growl "~1.8.1"
+
+lru-cache@2:
+  version "2.7.3"
+  resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-2.7.3.tgz#6d4524e8b955f95d4f5b58851ce21dd72fb4e952"
+
+methods@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/methods/-/methods-0.0.1.tgz#277c90f8bef39709645a8371c51c3b6c648e068c"
+
+micromatch@^2.1.5:
+  version "2.3.11"
+  resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-2.3.11.tgz#86677c97d1720b363431d04d0d15293bd38c1565"
+  dependencies:
+    arr-diff "^2.0.0"
+    array-unique "^0.2.1"
+    braces "^1.8.2"
+    expand-brackets "^0.1.4"
+    extglob "^0.3.1"
+    filename-regex "^2.0.0"
+    is-extglob "^1.0.0"
+    is-glob "^2.0.1"
+    kind-of "^3.0.2"
+    normalize-path "^2.0.1"
+    object.omit "^2.0.0"
+    parse-glob "^3.0.4"
+    regex-cache "^0.4.2"
+
+mime-db@~1.27.0:
+  version "1.27.0"
+  resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.27.0.tgz#820f572296bbd20ec25ed55e5b5de869e5436eb1"
+
+mime-types@^2.1.12, mime-types@~2.1.7:
+  version "2.1.15"
+  resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.15.tgz#a4ebf5064094569237b8cf70046776d09fc92aed"
+  dependencies:
+    mime-db "~1.27.0"
+
+mime@^1.2.11:
+  version "1.3.4"
+  resolved "https://registry.yarnpkg.com/mime/-/mime-1.3.4.tgz#115f9e3b6b3daf2959983cb38f149a2d40eb5d53"
+
+mime@~1.2.9:
+  version "1.2.11"
+  resolved "https://registry.yarnpkg.com/mime/-/mime-1.2.11.tgz#58203eed86e3a5ef17aed2b7d9ebd47f0a60dd10"
+
+minimatch@^2.0.1:
+  version "2.0.10"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-2.0.10.tgz#8d087c39c6b38c001b97fca7ce6d0e1e80afbac7"
+  dependencies:
+    brace-expansion "^1.0.0"
+
+minimatch@~0.2.12:
+  version "0.2.14"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-0.2.14.tgz#c74e780574f63c6f9a090e90efbe6ef53a6a756a"
+  dependencies:
+    lru-cache "2"
+    sigmund "~1.0.0"
+
+minimist@0.0.8:
+  version "0.0.8"
+  resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d"
+
+mkdirp@0.3.4:
+  version "0.3.4"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.3.4.tgz#f8c81d213b7299a031f193a57d752a17d2f6c7d8"
+
+mkdirp@0.3.5, mkdirp@~0.3.5:
+  version "0.3.5"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.3.5.tgz#de3e5f8961c88c787ee1368df849ac4413eca8d7"
+
+mkdirp@^0.5.0, mkdirp@~0.5.0:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903"
+  dependencies:
+    minimist "0.0.8"
+
+ms@0.7.0:
+  version "0.7.0"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.0.tgz#865be94c2e7397ad8a57da6a633a6e2f30798b83"
+
+nan@^2.0.2:
+  version "2.6.2"
+  resolved "https://registry.yarnpkg.com/nan/-/nan-2.6.2.tgz#e4ff34e6c95fdfb5aecc08de6596f43605a7db45"
+
+ncp@~2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/ncp/-/ncp-2.0.0.tgz#195a21d6c46e361d2fb1281ba38b91e9df7bdbb3"
+
+normalize-path@^2.0.1:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-2.1.1.tgz#1ab28b556e198363a8c1a6f7e6fa20137fe6aed9"
+  dependencies:
+    remove-trailing-separator "^1.0.1"
+
+"nwmatcher@>= 1.3.9 < 2.0.0":
+  version "1.3.9"
+  resolved "https://registry.yarnpkg.com/nwmatcher/-/nwmatcher-1.3.9.tgz#8bab486ff7fa3dfd086656bbe8b17116d3692d2a"
+
+oauth-sign@~0.8.1:
+  version "0.8.2"
+  resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.8.2.tgz#46a6ab7f0aead8deae9ec0565780b7d4efeb9d43"
+
+object.omit@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/object.omit/-/object.omit-2.0.1.tgz#1a9c744829f39dbb858c76ca3579ae2a54ebd1fa"
+  dependencies:
+    for-own "^0.1.4"
+    is-extendable "^0.1.1"
+
+once@^1.3.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1"
+  dependencies:
+    wrappy "1"
+
+optimist@~0.3, optimist@~0.3.5:
+  version "0.3.7"
+  resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.3.7.tgz#c90941ad59e4273328923074d2cf2e7cbc6ec0d9"
+  dependencies:
+    wordwrap "~0.0.2"
+
+optionator@^0.8.1:
+  version "0.8.2"
+  resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.8.2.tgz#364c5e409d3f4d6301d6c0b4c05bba50180aeb64"
+  dependencies:
+    deep-is "~0.1.3"
+    fast-levenshtein "~2.0.4"
+    levn "~0.3.0"
+    prelude-ls "~1.1.2"
+    type-check "~0.3.2"
+    wordwrap "~1.0.0"
+
+parse-glob@^3.0.4:
+  version "3.0.4"
+  resolved "https://registry.yarnpkg.com/parse-glob/-/parse-glob-3.0.4.tgz#b2c376cfb11f35513badd173ef0bb6e3a388391c"
+  dependencies:
+    glob-base "^0.3.0"
+    is-dotfile "^1.0.0"
+    is-extglob "^1.0.0"
+    is-glob "^2.0.0"
+
+parse5@^1.5.1:
+  version "1.5.1"
+  resolved "https://registry.yarnpkg.com/parse5/-/parse5-1.5.1.tgz#9b7f3b0de32be78dc2401b17573ccaf0f6f59d94"
+
+path-is-absolute@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f"
+
+pause@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/pause/-/pause-0.0.1.tgz#1d408b3fdb76923b9543d96fb4c9dfd535d9cb5d"
+
+performance-now@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-0.2.0.tgz#33ef30c5c77d4ea21c5a53869d91b56d8f2555e5"
+
+prelude-ls@~1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.1.2.tgz#21932a549f5e52ffd9a827f570e04be62a97da54"
+
+preserve@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/preserve/-/preserve-0.2.0.tgz#815ed1f6ebc65926f865b310c0713bcb3315ce4b"
+
+progeny@^0.5.0:
+  version "0.5.2"
+  resolved "https://registry.yarnpkg.com/progeny/-/progeny-0.5.2.tgz#1fef1cccba6b057f344e780dbf7bdc8c5135121b"
+  dependencies:
+    async-each "~0.1.4"
+    fs-mode "^1.0.1"
+
+promise@^7.1.1:
+  version "7.1.1"
+  resolved "https://registry.yarnpkg.com/promise/-/promise-7.1.1.tgz#489654c692616b8aa55b0724fa809bb7db49c5bf"
+  dependencies:
+    asap "~2.0.3"
+
+prr@~0.0.0:
+  version "0.0.0"
+  resolved "https://registry.yarnpkg.com/prr/-/prr-0.0.0.tgz#1a84b85908325501411853d0081ee3fa86e2926a"
+
+punycode@^1.4.1:
+  version "1.4.1"
+  resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e"
+
+pushserve@~0.1.6:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/pushserve/-/pushserve-0.1.6.tgz#a07b173fc2488b71d9af4a5b37411bb3d91d6e27"
+  dependencies:
+    commander "~2.0.0"
+    connect-slashes "~0.0.9"
+    express "~3.3.0"
+
+qs@0.6.5:
+  version "0.6.5"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-0.6.5.tgz#294b268e4b0d4250f6dde19b3b8b34935dff14ef"
+
+qs@~6.4.0:
+  version "6.4.0"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-6.4.0.tgz#13e26d28ad6b0ffaa91312cd3bf708ed351e7233"
+
+quickly-copy-file@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/quickly-copy-file/-/quickly-copy-file-0.1.0.tgz#d14063ff659f68697e7cbfee817a07bb635237c2"
+  dependencies:
+    mkdirp "~0.5.0"
+
+randomatic@^1.1.3:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/randomatic/-/randomatic-1.1.6.tgz#110dcabff397e9dcff7c0789ccc0a49adf1ec5bb"
+  dependencies:
+    is-number "^2.0.2"
+    kind-of "^3.0.2"
+
+range-parser@0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-0.0.4.tgz#c0427ffef51c10acba0782a46c9602e744ff620b"
+
+read-components@~0.6.0:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/read-components/-/read-components-0.6.1.tgz#45752f1c7c7e450742f4085fe6e24fccc5c75720"
+  dependencies:
+    async-each "~0.1.3"
+    component-builder "~0.10.0"
+
+readable-stream@~1.0.26-2:
+  version "1.0.34"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.0.34.tgz#125820e34bc842d2f2aaafafe4c2916ee32c157c"
+  dependencies:
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "0.0.1"
+    string_decoder "~0.10.x"
+
+readdirp@^1.3.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-1.4.0.tgz#c5de6fcb3dec80523c1c70113f1a190d8af82c89"
+  dependencies:
+    graceful-fs "~4.1.2"
+    minimatch "~0.2.12"
+    readable-stream "~1.0.26-2"
+
+regex-cache@^0.4.2:
+  version "0.4.3"
+  resolved "https://registry.yarnpkg.com/regex-cache/-/regex-cache-0.4.3.tgz#9b1a6c35d4d0dfcef5711ae651e8e9d3d7114145"
+  dependencies:
+    is-equal-shallow "^0.1.3"
+    is-primitive "^2.0.0"
+
+remove-trailing-separator@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/remove-trailing-separator/-/remove-trailing-separator-1.0.1.tgz#615ebb96af559552d4bf4057c8436d486ab63cc4"
+
+repeat-element@^1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/repeat-element/-/repeat-element-1.1.2.tgz#ef089a178d1483baae4d93eb98b4f9e4e11d990a"
+
+repeat-string@^1.5.2:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637"
+
+request@^2.72.0, request@^2.79.0:
+  version "2.81.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.81.0.tgz#c6928946a0e06c5f8d6f8a9333469ffda46298a0"
+  dependencies:
+    aws-sign2 "~0.6.0"
+    aws4 "^1.2.1"
+    caseless "~0.12.0"
+    combined-stream "~1.0.5"
+    extend "~3.0.0"
+    forever-agent "~0.6.1"
+    form-data "~2.1.1"
+    har-validator "~4.2.1"
+    hawk "~3.1.3"
+    http-signature "~1.1.0"
+    is-typedarray "~1.0.0"
+    isstream "~0.1.2"
+    json-stringify-safe "~5.0.1"
+    mime-types "~2.1.7"
+    oauth-sign "~0.8.1"
+    performance-now "^0.2.0"
+    qs "~6.4.0"
+    safe-buffer "^5.0.1"
+    stringstream "~0.0.4"
+    tough-cookie "~2.3.0"
+    tunnel-agent "^0.6.0"
+    uuid "^3.0.0"
+
+resolve@1.1.5:
+  version "1.1.5"
+  resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.1.5.tgz#3b74c0c44cdf5eee32322b2cda0a4acbf6970fa7"
+
+rimraf@~2.3.2:
+  version "2.3.4"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.3.4.tgz#82d9bc1b2fcf31e205ac7b28138a025d08e9159a"
+  dependencies:
+    glob "^4.4.2"
+
+safe-buffer@^5.0.1:
+  version "5.0.1"
+  resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.0.1.tgz#d263ca54696cd8a306b5ca6551e92de57918fbe7"
+
+sax@^1.2.1:
+  version "1.2.2"
+  resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.2.tgz#fd8631a23bc7826bef5d871bdb87378c95647828"
+
+scaffolt@^0.4.3:
+  version "0.4.3"
+  resolved "https://registry.yarnpkg.com/scaffolt/-/scaffolt-0.4.3.tgz#eafa6eb31b733b79435cd7e24053de49cd2e145e"
+  dependencies:
+    async-each "~0.1.1"
+    commander "~1.2.0"
+    handlebars "~1.0.9"
+    inflection "~1.2.5"
+    loggy "~0.2.0"
+    mkdirp "~0.3.5"
+
+send@0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/send/-/send-0.1.4.tgz#be70d8d1be01de61821af13780b50345a4f71abd"
+  dependencies:
+    debug "*"
+    fresh "0.2.0"
+    mime "~1.2.9"
+    range-parser "0.0.4"
+
+sigmund@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/sigmund/-/sigmund-1.0.1.tgz#3ff21f198cad2175f9f3b781853fd94d0d19b590"
+
+sntp@1.x.x:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/sntp/-/sntp-1.0.9.tgz#6541184cc90aeea6c6e7b35e2659082443c66198"
+  dependencies:
+    hoek "2.x.x"
+
+source-map@0.1.34, source-map@~0.1.7:
+  version "0.1.34"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.34.tgz#a7cfe89aec7b1682c3b198d0acfb47d7d090566b"
+  dependencies:
+    amdefine ">=0.0.4"
+
+source-map@0.4.x:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.4.4.tgz#eba4f5da9c0dc999de68032d8b4f76173652036b"
+  dependencies:
+    amdefine ">=0.0.4"
+
+source-map@^0.5.3:
+  version "0.5.6"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.6.tgz#75ce38f52bf0733c5a7f0c118d81334a2bb5f412"
+
+source-map@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.2.0.tgz#dab73fbcfc2ba819b4de03bd6f6eaa48164b3f9d"
+  dependencies:
+    amdefine ">=0.0.4"
+
+source-map@~0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.3.0.tgz#8586fb9a5a005e5b501e21cd18b6f21b457ad1f9"
+  dependencies:
+    amdefine ">=0.0.4"
+
+sshpk@^1.7.0:
+  version "1.13.0"
+  resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.13.0.tgz#ff2a3e4fd04497555fed97b39a0fd82fafb3a33c"
+  dependencies:
+    asn1 "~0.2.3"
+    assert-plus "^1.0.0"
+    dashdash "^1.12.0"
+    getpass "^0.1.1"
+  optionalDependencies:
+    bcrypt-pbkdf "^1.0.0"
+    ecc-jsbn "~0.1.1"
+    jodid25519 "^1.0.0"
+    jsbn "~0.1.0"
+    tweetnacl "~0.14.0"
+
+string-to-js@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/string-to-js/-/string-to-js-0.0.1.tgz#bf153c760636faa30769b804a0195552ba7ad80f"
+
+string_decoder@~0.10.x:
+  version "0.10.31"
+  resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-0.10.31.tgz#62e203bc41766c6c28c9fc84301dab1c5310fa94"
+
+stringstream@~0.0.4:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/stringstream/-/stringstream-0.0.5.tgz#4e484cd4de5a0bbbee18e46307710a8a81621878"
+
+symbol-tree@^3.2.1:
+  version "3.2.2"
+  resolved "https://registry.yarnpkg.com/symbol-tree/-/symbol-tree-3.2.2.tgz#ae27db38f660a7ae2e1c3b7d1bc290819b8519e6"
+
+tough-cookie@^2.3.2, tough-cookie@~2.3.0:
+  version "2.3.2"
+  resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.3.2.tgz#f081f76e4c85720e6c37a5faced737150d84072a"
+  dependencies:
+    punycode "^1.4.1"
+
+tr46@~0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/tr46/-/tr46-0.0.3.tgz#8184fd347dac9cdc185992f3a6622e14b9d9ab6a"
+
+tunnel-agent@^0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd"
+  dependencies:
+    safe-buffer "^5.0.1"
+
+tweetnacl@^0.14.3, tweetnacl@~0.14.0:
+  version "0.14.5"
+  resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64"
+
+type-check@~0.3.2:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.3.2.tgz#5884cab512cf1d355e3fb784f30804b2b520db72"
+  dependencies:
+    prelude-ls "~1.1.2"
+
+uglify-js-brunch@^1.7.7:
+  version "1.7.8"
+  resolved "https://registry.yarnpkg.com/uglify-js-brunch/-/uglify-js-brunch-1.7.8.tgz#b36dffbcd19cfea27248d34797effef1ae9d3a62"
+  dependencies:
+    uglify-js "~2.4.7"
+
+uglify-js@~2.3:
+  version "2.3.6"
+  resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-2.3.6.tgz#fa0984770b428b7a9b2a8058f46355d14fef211a"
+  dependencies:
+    async "~0.2.6"
+    optimist "~0.3.5"
+    source-map "~0.1.7"
+
+uglify-js@~2.4.7:
+  version "2.4.24"
+  resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-2.4.24.tgz#fad5755c1e1577658bb06ff9ab6e548c95bebd6e"
+  dependencies:
+    async "~0.2.6"
+    source-map "0.1.34"
+    uglify-to-browserify "~1.0.0"
+    yargs "~3.5.4"
+
+uglify-to-browserify@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/uglify-to-browserify/-/uglify-to-browserify-1.0.2.tgz#6e0924d6bda6b5afe349e39a6d632850a0f882b7"
+
+uid2@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/uid2/-/uid2-0.0.2.tgz#107fb155c82c1136620797ed4c88cf2b08f6aab8"
+
+uuid@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.0.1.tgz#6544bba2dfda8c1cf17e629a3a305e2bb1fee6c1"
+
+verror@1.3.6:
+  version "1.3.6"
+  resolved "https://registry.yarnpkg.com/verror/-/verror-1.3.6.tgz#cff5df12946d297d2baaefaa2689e25be01c005c"
+  dependencies:
+    extsprintf "1.0.2"
+
+webidl-conversions@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871"
+
+webidl-conversions@^4.0.0:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-4.0.1.tgz#8015a17ab83e7e1b311638486ace81da6ce206a0"
+
+whatwg-encoding@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/whatwg-encoding/-/whatwg-encoding-1.0.1.tgz#3c6c451a198ee7aec55b1ec61d0920c67801a5f4"
+  dependencies:
+    iconv-lite "0.4.13"
+
+whatwg-url@^4.3.0:
+  version "4.7.1"
+  resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-4.7.1.tgz#df4dc2e3f25a63b1fa5b32ed6d6c139577d690de"
+  dependencies:
+    tr46 "~0.0.3"
+    webidl-conversions "^3.0.0"
+
+window-size@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.0.tgz#5438cd2ea93b202efa3a19fe8887aee7c94f9c9d"
+
+wordwrap@0.0.2, wordwrap@~0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.2.tgz#b79669bb42ecb409f83d583cad52ca17eaa1643f"
+
+wordwrap@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-1.0.0.tgz#27584810891456a4171c8d0226441ade90cbcaeb"
+
+wrappy@1, wrappy@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f"
+
+xml-name-validator@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/xml-name-validator/-/xml-name-validator-2.0.1.tgz#4d8b8f1eccd3419aa362061becef515e1e559635"
+
+yargs@~3.5.4:
+  version "3.5.4"
+  resolved "https://registry.yarnpkg.com/yargs/-/yargs-3.5.4.tgz#d8aff8f665e94c34bd259bdebd1bfaf0ddd35361"
+  dependencies:
+    camelcase "^1.0.2"
+    decamelize "^1.0.0"
+    window-size "0.1.0"
+    wordwrap "0.0.2"
diff --git a/contrib/views/files/pom.xml b/contrib/views/files/pom.xml
index 478a537..f7adbb5 100644
--- a/contrib/views/files/pom.xml
+++ b/contrib/views/files/pom.xml
@@ -179,29 +179,35 @@
       <plugin>
         <groupId>com.github.eirslett</groupId>
         <artifactId>frontend-maven-plugin</artifactId>
-        <version>1.3</version>
+        <version>1.4</version>
         <configuration>
           <nodeVersion>v4.5.0</nodeVersion>
-          <npmVersion>2.15.0</npmVersion>
+          <yarnVersion>v0.23.2</yarnVersion>
           <workingDirectory>src/main/resources/ui/</workingDirectory>
           <npmInheritsProxyConfigFromMaven>false</npmInheritsProxyConfigFromMaven>
+          <!-- setting npm_config_tmp environment variable is a workaround for 
+               https://github.com/Medium/phantomjs/issues/673 -->
+          <environmentVariables>
+            <npm_config_tmp>/tmp/npm_config_tmp</npm_config_tmp>
+          </environmentVariables>
         </configuration>
         <executions>
           <execution>
-            <id>install node and npm</id>
+            <id>install node and yarn</id>
             <phase>generate-sources</phase>
             <goals>
-              <goal>install-node-and-npm</goal>
+              <goal>install-node-and-yarn</goal>
             </goals>
           </execution>
           <execution>
-            <id>npm install</id>
+            <id>yarn install --pure-lockfile</id>
             <phase>generate-sources</phase>
             <goals>
-              <goal>npm</goal>
+              <goal>yarn</goal>
             </goals>
             <configuration>
               <arguments>install --python="${project.basedir}/../src/main/unix/ambari-python-wrap" --unsafe-perm</arguments>
+              <arguments>--ignore-engines</arguments>
             </configuration>
           </execution>
         </executions>
diff --git a/contrib/views/files/src/main/java/org/apache/ambari/view/filebrowser/DownloadService.java b/contrib/views/files/src/main/java/org/apache/ambari/view/filebrowser/DownloadService.java
index 10b7c9e..1334c06 100644
--- a/contrib/views/files/src/main/java/org/apache/ambari/view/filebrowser/DownloadService.java
+++ b/contrib/views/files/src/main/java/org/apache/ambari/view/filebrowser/DownloadService.java
@@ -112,7 +112,7 @@
       ResponseBuilder result = Response.ok(fs);
       if (download) {
         result.header("Content-Disposition",
-          "inline; filename=\"" + status.getPath().getName() + "\"").type(MediaType.APPLICATION_OCTET_STREAM);
+          "attachment; filename=\"" + status.getPath().getName() + "\"").type(MediaType.APPLICATION_OCTET_STREAM);
       } else {
         FileNameMap fileNameMap = URLConnection.getFileNameMap();
         String mimeType = fileNameMap.getContentTypeFor(status.getPath().getName());
@@ -278,7 +278,7 @@
       };
       ResponseBuilder response = Response.ok(result);
       if (request.download) {
-        response.header("Content-Disposition", "inline; filename=\"concatResult.txt\"").type(MediaType.APPLICATION_OCTET_STREAM);
+        response.header("Content-Disposition", "attachment; filename=\"concatResult.txt\"").type(MediaType.APPLICATION_OCTET_STREAM);
       } else {
         response.header("Content-Disposition", "filename=\"concatResult.txt\"").type(MediaType.TEXT_PLAIN);
       }
diff --git a/contrib/views/files/src/main/resources/ui/package.json b/contrib/views/files/src/main/resources/ui/package.json
index 3f27897..0089421 100644
--- a/contrib/views/files/src/main/resources/ui/package.json
+++ b/contrib/views/files/src/main/resources/ui/package.json
@@ -11,7 +11,7 @@
     "build": "ember build",
     "start": "ember server",
     "test": "ember test",
-    "preinstall": "chmod +x node/node_modules/npm/bin/node-gyp-bin/node-gyp",
+    "preinstall": "",
     "postinstall": "node node_modules/.bin/bower --allow-root install"
   },
   "repository": "",
diff --git a/contrib/views/files/src/main/resources/ui/yarn.lock b/contrib/views/files/src/main/resources/ui/yarn.lock
new file mode 100644
index 0000000..9974e3b
--- /dev/null
+++ b/contrib/views/files/src/main/resources/ui/yarn.lock
@@ -0,0 +1,6041 @@
+# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
+# yarn lockfile v1
+
+
+abbrev@1, abbrev@^1.0.5:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.0.tgz#d0554c2256636e2f56e7c2e5ad183f859428d81f"
+
+abbrev@~1.0.7:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.0.9.tgz#91b4792588a7738c25f35dd6f63752a2f8776135"
+
+accepts@1.3.3, accepts@~1.3.3:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.3.tgz#c3ca7434938648c3e0d9c1e328dd68b622c284ca"
+  dependencies:
+    mime-types "~2.1.11"
+    negotiator "0.6.1"
+
+acorn@^4.0.3:
+  version "4.0.11"
+  resolved "https://registry.yarnpkg.com/acorn/-/acorn-4.0.11.tgz#edcda3bd937e7556410d42ed5860f67399c794c0"
+
+after@0.8.1:
+  version "0.8.1"
+  resolved "https://registry.yarnpkg.com/after/-/after-0.8.1.tgz#ab5d4fb883f596816d3515f8f791c0af486dd627"
+
+ajv@^4.9.1:
+  version "4.11.8"
+  resolved "https://registry.yarnpkg.com/ajv/-/ajv-4.11.8.tgz#82ffb02b29e662ae53bdc20af15947706739c536"
+  dependencies:
+    co "^4.6.0"
+    json-stable-stringify "^1.0.1"
+
+align-text@^0.1.1, align-text@^0.1.3:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/align-text/-/align-text-0.1.4.tgz#0cd90a561093f35d0a99256c22b7069433fad117"
+  dependencies:
+    kind-of "^3.0.2"
+    longest "^1.0.1"
+    repeat-string "^1.5.2"
+
+alter@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/alter/-/alter-0.2.0.tgz#c7588808617572034aae62480af26b1d4d1cb3cd"
+  dependencies:
+    stable "~0.1.3"
+
+amd-name-resolver@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/amd-name-resolver/-/amd-name-resolver-0.0.2.tgz#7bee4e112aabeecc2e14429c4ca750c55d8e5ecd"
+
+amdefine@>=0.0.4:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5"
+
+ansi-escapes@^1.1.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-1.4.0.tgz#d3a8a83b319aa67793662b13e761c7911422306e"
+
+ansi-regex@^0.2.0, ansi-regex@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-0.2.1.tgz#0d8e946967a3d8143f93e24e298525fc1b2235f9"
+
+ansi-regex@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-1.1.1.tgz#41c847194646375e6a1a5d10c3ca054ef9fc980d"
+
+ansi-regex@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df"
+
+ansi-styles@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-1.1.0.tgz#eaecbf66cd706882760b2f4691582b8f55d7a7de"
+
+ansi-styles@^2.1.0, ansi-styles@^2.2.1:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe"
+
+ansi-styles@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-1.0.0.tgz#cb102df1c56f5123eab8b67cd7b98027a0279178"
+
+ansi@^0.3.0, ansi@~0.3.0, ansi@~0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/ansi/-/ansi-0.3.1.tgz#0c42d4fb17160d5a9af1e484bace1c66922c1b21"
+
+ansicolors@~0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/ansicolors/-/ansicolors-0.2.1.tgz#be089599097b74a5c9c4a84a0cdbcdb62bd87aef"
+
+ansicolors@~0.3.2:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/ansicolors/-/ansicolors-0.3.2.tgz#665597de86a9ffe3aa9bfbe6cae5c6ea426b4979"
+
+ansistyles@~0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/ansistyles/-/ansistyles-0.1.3.tgz#5de60415bda071bb37127854c864f41b23254539"
+
+anymatch@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-1.3.0.tgz#a3e52fa39168c825ff57b0248126ce5a8ff95507"
+  dependencies:
+    arrify "^1.0.0"
+    micromatch "^2.1.5"
+
+aproba@^1.0.3:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.1.1.tgz#95d3600f07710aa0e9298c726ad5ecf2eacbabab"
+
+archy@1.0.0, archy@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/archy/-/archy-1.0.0.tgz#f9c8c13757cc1dd7bc379ac77b2c62a5c2868c40"
+
+are-we-there-yet@~1.0.0:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.0.6.tgz#a2d28c93102aa6cc96245a26cb954de06ec53f0c"
+  dependencies:
+    delegates "^1.0.0"
+    readable-stream "^2.0.0 || ^1.1.13"
+
+are-we-there-yet@~1.1.2:
+  version "1.1.4"
+  resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.4.tgz#bb5dca382bb94f05e15194373d16fd3ba1ca110d"
+  dependencies:
+    delegates "^1.0.0"
+    readable-stream "^2.0.6"
+
+argparse@^1.0.7, argparse@~1.0.2:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.9.tgz#73d83bc263f86e97f8cc4f6bae1b0e90a7d22c86"
+  dependencies:
+    sprintf-js "~1.0.2"
+
+arr-diff@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-2.0.0.tgz#8f3b827f955a8bd669697e4a4256ac3ceae356cf"
+  dependencies:
+    arr-flatten "^1.0.1"
+
+arr-flatten@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.0.3.tgz#a274ed85ac08849b6bd7847c4580745dc51adfb1"
+
+array-equal@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/array-equal/-/array-equal-1.0.0.tgz#8c2a5ef2472fd9ea742b04c77a75093ba2757c93"
+
+array-filter@~0.0.0:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/array-filter/-/array-filter-0.0.1.tgz#7da8cf2e26628ed732803581fd21f67cacd2eeec"
+
+array-find-index@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/array-find-index/-/array-find-index-1.0.2.tgz#df010aa1287e164bbda6f9723b0a96a1ec4187a1"
+
+array-flatten@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2"
+
+array-index@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/array-index/-/array-index-1.0.0.tgz#ec56a749ee103e4e08c790b9c353df16055b97f9"
+  dependencies:
+    debug "^2.2.0"
+    es6-symbol "^3.0.2"
+
+array-map@~0.0.0:
+  version "0.0.0"
+  resolved "https://registry.yarnpkg.com/array-map/-/array-map-0.0.0.tgz#88a2bab73d1cf7bcd5c1b118a003f66f665fa662"
+
+array-reduce@~0.0.0:
+  version "0.0.0"
+  resolved "https://registry.yarnpkg.com/array-reduce/-/array-reduce-0.0.0.tgz#173899d3ffd1c7d9383e4479525dbe278cab5f2b"
+
+array-unique@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.2.1.tgz#a1d97ccafcbc2625cc70fadceb36a50c58b01a53"
+
+arraybuffer.slice@0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/arraybuffer.slice/-/arraybuffer.slice-0.0.6.tgz#f33b2159f0532a3f3107a272c0ccfbd1ad2979ca"
+
+arrify@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d"
+
+asap@^2.0.0, asap@~2.0.3:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/asap/-/asap-2.0.5.tgz#522765b50c3510490e52d7dcfe085ef9ba96958f"
+
+asn1@0.1.11:
+  version "0.1.11"
+  resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.1.11.tgz#559be18376d08a4ec4dbe80877d27818639b2df7"
+
+asn1@~0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.3.tgz#dac8787713c9966849fc8180777ebe9c1ddf3b86"
+
+assert-plus@1.0.0, assert-plus@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525"
+
+assert-plus@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.1.5.tgz#ee74009413002d84cec7219c6ac811812e723160"
+
+assert-plus@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.2.0.tgz#d74e1b87e7affc0db8aadb7021f3fe48101ab234"
+
+ast-traverse@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/ast-traverse/-/ast-traverse-0.1.1.tgz#69cf2b8386f19dcda1bb1e05d68fe359d8897de6"
+
+ast-types@0.8.12:
+  version "0.8.12"
+  resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.8.12.tgz#a0d90e4351bb887716c83fd637ebf818af4adfcc"
+
+ast-types@0.9.6:
+  version "0.9.6"
+  resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.9.6.tgz#102c9e9e9005d3e7e3829bf0c4fa24ee862ee9b9"
+
+async-disk-cache@^1.2.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/async-disk-cache/-/async-disk-cache-1.3.1.tgz#3394010d9448b16205b01e0e2e704180805413d3"
+  dependencies:
+    debug "^2.1.3"
+    heimdalljs "^0.2.3"
+    istextorbinary "2.1.0"
+    mkdirp "^0.5.0"
+    rimraf "^2.5.3"
+    rsvp "^3.0.18"
+
+async-foreach@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/async-foreach/-/async-foreach-0.1.3.tgz#36121f845c0578172de419a97dbeb1d16ec34542"
+
+async-some@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/async-some/-/async-some-1.0.2.tgz#4d8a81620d5958791b5b98f802d3207776e95509"
+  dependencies:
+    dezalgo "^1.0.2"
+
+async@0.9.0, async@~0.9.0:
+  version "0.9.0"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.9.0.tgz#ac3613b1da9bed1b47510bb4651b8931e47146c7"
+
+async@^0.2.8, async@~0.2.6, async@~0.2.9:
+  version "0.2.10"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.2.10.tgz#b6bbe0b0674b9d719708ca38de8c237cb526c3d1"
+
+async@^1.4.0, async@^1.4.2:
+  version "1.5.2"
+  resolved "https://registry.yarnpkg.com/async/-/async-1.5.2.tgz#ec6a61ae56480c0c3cb241c95618e20892f9672a"
+
+async@^2.0.1:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/async/-/async-2.3.0.tgz#1013d1051047dd320fe24e494d5c66ecaf6147d9"
+  dependencies:
+    lodash "^4.14.0"
+
+async@~0.8.0:
+  version "0.8.0"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.8.0.tgz#ee65ec77298c2ff1456bc4418a052d0f06435112"
+
+asynckit@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79"
+
+aws-sign2@~0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.5.0.tgz#c57103f7a17fc037f02d7c2e64b602ea223f7d63"
+
+aws-sign2@~0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.6.0.tgz#14342dd38dbcc94d0e5b87d763cd63612c0e794f"
+
+aws4@^1.2.1:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.6.0.tgz#83ef5ca860b2b32e4a0deedee8c771b9db57471e"
+
+babel-core@^5.0.0:
+  version "5.8.38"
+  resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-5.8.38.tgz#1fcaee79d7e61b750b00b8e54f6dfc9d0af86558"
+  dependencies:
+    babel-plugin-constant-folding "^1.0.1"
+    babel-plugin-dead-code-elimination "^1.0.2"
+    babel-plugin-eval "^1.0.1"
+    babel-plugin-inline-environment-variables "^1.0.1"
+    babel-plugin-jscript "^1.0.4"
+    babel-plugin-member-expression-literals "^1.0.1"
+    babel-plugin-property-literals "^1.0.1"
+    babel-plugin-proto-to-assign "^1.0.3"
+    babel-plugin-react-constant-elements "^1.0.3"
+    babel-plugin-react-display-name "^1.0.3"
+    babel-plugin-remove-console "^1.0.1"
+    babel-plugin-remove-debugger "^1.0.1"
+    babel-plugin-runtime "^1.0.7"
+    babel-plugin-undeclared-variables-check "^1.0.2"
+    babel-plugin-undefined-to-void "^1.1.6"
+    babylon "^5.8.38"
+    bluebird "^2.9.33"
+    chalk "^1.0.0"
+    convert-source-map "^1.1.0"
+    core-js "^1.0.0"
+    debug "^2.1.1"
+    detect-indent "^3.0.0"
+    esutils "^2.0.0"
+    fs-readdir-recursive "^0.1.0"
+    globals "^6.4.0"
+    home-or-tmp "^1.0.0"
+    is-integer "^1.0.4"
+    js-tokens "1.0.1"
+    json5 "^0.4.0"
+    lodash "^3.10.0"
+    minimatch "^2.0.3"
+    output-file-sync "^1.1.0"
+    path-exists "^1.0.0"
+    path-is-absolute "^1.0.0"
+    private "^0.1.6"
+    regenerator "0.8.40"
+    regexpu "^1.3.0"
+    repeating "^1.1.2"
+    resolve "^1.1.6"
+    shebang-regex "^1.0.0"
+    slash "^1.0.0"
+    source-map "^0.5.0"
+    source-map-support "^0.2.10"
+    to-fast-properties "^1.0.0"
+    trim-right "^1.0.0"
+    try-resolve "^1.0.0"
+
+babel-plugin-constant-folding@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-constant-folding/-/babel-plugin-constant-folding-1.0.1.tgz#8361d364c98e449c3692bdba51eff0844290aa8e"
+
+babel-plugin-dead-code-elimination@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/babel-plugin-dead-code-elimination/-/babel-plugin-dead-code-elimination-1.0.2.tgz#5f7c451274dcd7cccdbfbb3e0b85dd28121f0f65"
+
+babel-plugin-eval@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-eval/-/babel-plugin-eval-1.0.1.tgz#a2faed25ce6be69ade4bfec263f70169195950da"
+
+babel-plugin-feature-flags@^0.2.0:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/babel-plugin-feature-flags/-/babel-plugin-feature-flags-0.2.3.tgz#81d81ed77bda2014098fa8243abcf03a551cbd4d"
+  dependencies:
+    json-stable-stringify "^1.0.1"
+
+babel-plugin-filter-imports@^0.2.0:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-filter-imports/-/babel-plugin-filter-imports-0.2.1.tgz#784f96a892f2f7ed2ccf0955688bd8916cd2e212"
+  dependencies:
+    json-stable-stringify "^1.0.1"
+
+babel-plugin-htmlbars-inline-precompile@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-htmlbars-inline-precompile/-/babel-plugin-htmlbars-inline-precompile-0.1.0.tgz#b784723bd1f108796b56faf9f1c05eb5ca442983"
+
+babel-plugin-inline-environment-variables@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-inline-environment-variables/-/babel-plugin-inline-environment-variables-1.0.1.tgz#1f58ce91207ad6a826a8bf645fafe68ff5fe3ffe"
+
+babel-plugin-jscript@^1.0.4:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/babel-plugin-jscript/-/babel-plugin-jscript-1.0.4.tgz#8f342c38276e87a47d5fa0a8bd3d5eb6ccad8fcc"
+
+babel-plugin-member-expression-literals@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-member-expression-literals/-/babel-plugin-member-expression-literals-1.0.1.tgz#cc5edb0faa8dc927170e74d6d1c02440021624d3"
+
+babel-plugin-property-literals@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-property-literals/-/babel-plugin-property-literals-1.0.1.tgz#0252301900192980b1c118efea48ce93aab83336"
+
+babel-plugin-proto-to-assign@^1.0.3:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/babel-plugin-proto-to-assign/-/babel-plugin-proto-to-assign-1.0.4.tgz#c49e7afd02f577bc4da05ea2df002250cf7cd123"
+  dependencies:
+    lodash "^3.9.3"
+
+babel-plugin-react-constant-elements@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/babel-plugin-react-constant-elements/-/babel-plugin-react-constant-elements-1.0.3.tgz#946736e8378429cbc349dcff62f51c143b34e35a"
+
+babel-plugin-react-display-name@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/babel-plugin-react-display-name/-/babel-plugin-react-display-name-1.0.3.tgz#754fe38926e8424a4e7b15ab6ea6139dee0514fc"
+
+babel-plugin-remove-console@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-remove-console/-/babel-plugin-remove-console-1.0.1.tgz#d8f24556c3a05005d42aaaafd27787f53ff013a7"
+
+babel-plugin-remove-debugger@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-remove-debugger/-/babel-plugin-remove-debugger-1.0.1.tgz#fd2ea3cd61a428ad1f3b9c89882ff4293e8c14c7"
+
+babel-plugin-runtime@^1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/babel-plugin-runtime/-/babel-plugin-runtime-1.0.7.tgz#bf7c7d966dd56ecd5c17fa1cb253c9acb7e54aaf"
+
+babel-plugin-undeclared-variables-check@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/babel-plugin-undeclared-variables-check/-/babel-plugin-undeclared-variables-check-1.0.2.tgz#5cf1aa539d813ff64e99641290af620965f65dee"
+  dependencies:
+    leven "^1.0.2"
+
+babel-plugin-undefined-to-void@^1.1.6:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/babel-plugin-undefined-to-void/-/babel-plugin-undefined-to-void-1.1.6.tgz#7f578ef8b78dfae6003385d8417a61eda06e2f81"
+
+babylon@^5.8.38:
+  version "5.8.38"
+  resolved "https://registry.yarnpkg.com/babylon/-/babylon-5.8.38.tgz#ec9b120b11bf6ccd4173a18bf217e60b79859ffd"
+
+backbone@^1.1.2:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/backbone/-/backbone-1.3.3.tgz#4cc80ea7cb1631ac474889ce40f2f8bc683b2999"
+  dependencies:
+    underscore ">=1.8.3"
+
+backo2@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/backo2/-/backo2-1.0.2.tgz#31ab1ac8b129363463e35b3ebb69f4dfcfba7947"
+
+balanced-match@^0.4.1:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-0.4.2.tgz#cb3f3e3c732dc0f01ee70b403f302e61d7709838"
+
+base64-arraybuffer@0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/base64-arraybuffer/-/base64-arraybuffer-0.1.5.tgz#73926771923b5a19747ad666aa5cd4bf9c6e9ce8"
+
+base64id@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/base64id/-/base64id-0.1.0.tgz#02ce0fdeee0cef4f40080e1e73e834f0b1bfce3f"
+
+basic-auth@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/basic-auth/-/basic-auth-1.1.0.tgz#45221ee429f7ee1e5035be3f51533f1cdfd29884"
+
+bcrypt-pbkdf@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.1.tgz#63bc5dcb61331b92bc05fd528953c33462a06f8d"
+  dependencies:
+    tweetnacl "^0.14.3"
+
+better-assert@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/better-assert/-/better-assert-1.0.2.tgz#40866b9e1b9e0b55b481894311e68faffaebc522"
+  dependencies:
+    callsite "1.0.0"
+
+binary@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/binary/-/binary-0.3.0.tgz#9f60553bc5ce8c3386f3b553cff47462adecaa79"
+  dependencies:
+    buffers "~0.1.1"
+    chainsaw "~0.1.0"
+
+"binaryextensions@1 || 2":
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/binaryextensions/-/binaryextensions-2.0.0.tgz#e597d1a7a6a3558a2d1c7241a16c99965e6aa40f"
+
+bl@^1.0.0, bl@~1.0.0:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/bl/-/bl-1.0.3.tgz#fc5421a28fd4226036c3b3891a66a25bc64d226e"
+  dependencies:
+    readable-stream "~2.0.5"
+
+bl@~0.9.0:
+  version "0.9.5"
+  resolved "https://registry.yarnpkg.com/bl/-/bl-0.9.5.tgz#c06b797af085ea00bc527afc8efcf11de2232054"
+  dependencies:
+    readable-stream "~1.0.26"
+
+blank-object@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/blank-object/-/blank-object-1.0.2.tgz#f990793fbe9a8c8dd013fb3219420bec81d5f4b9"
+
+blob@0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/blob/-/blob-0.0.4.tgz#bcf13052ca54463f30f9fc7e95b9a47630a94921"
+
+block-stream@*, block-stream@0.0.8:
+  version "0.0.8"
+  resolved "https://registry.yarnpkg.com/block-stream/-/block-stream-0.0.8.tgz#0688f46da2bbf9cff0c4f68225a0cb95cbe8a46b"
+  dependencies:
+    inherits "~2.0.0"
+
+bluebird@^2.9.33:
+  version "2.11.0"
+  resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-2.11.0.tgz#534b9033c022c9579c56ba3b3e5a5caafbb650e1"
+
+bluebird@^3.1.1, bluebird@^3.4.6:
+  version "3.5.0"
+  resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.5.0.tgz#791420d7f551eea2897453a8a77653f96606d67c"
+
+body-parser@~1.14.0:
+  version "1.14.2"
+  resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.14.2.tgz#1015cb1fe2c443858259581db53332f8d0cf50f9"
+  dependencies:
+    bytes "2.2.0"
+    content-type "~1.0.1"
+    debug "~2.2.0"
+    depd "~1.1.0"
+    http-errors "~1.3.1"
+    iconv-lite "0.4.13"
+    on-finished "~2.3.0"
+    qs "5.2.0"
+    raw-body "~2.1.5"
+    type-is "~1.6.10"
+
+boom@2.x.x:
+  version "2.10.1"
+  resolved "https://registry.yarnpkg.com/boom/-/boom-2.10.1.tgz#39c8918ceff5799f83f9492a848f625add0c766f"
+  dependencies:
+    hoek "2.x.x"
+
+bower-config@0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/bower-config/-/bower-config-0.6.1.tgz#7093155688bef44079bf4cb32d189312c87ded60"
+  dependencies:
+    graceful-fs "~2.0.0"
+    mout "~0.9.0"
+    optimist "~0.6.0"
+    osenv "0.0.3"
+
+bower-config@^1.3.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/bower-config/-/bower-config-1.4.0.tgz#16c38c1135f8071c19f25938d61b0d8cbf18d3f1"
+  dependencies:
+    graceful-fs "^4.1.3"
+    mout "^1.0.0"
+    optimist "^0.6.1"
+    osenv "^0.1.3"
+    untildify "^2.1.0"
+
+bower-endpoint-parser@0.2.2, bower-endpoint-parser@^0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/bower-endpoint-parser/-/bower-endpoint-parser-0.2.2.tgz#00b565adbfab6f2d35addde977e97962acbcb3f6"
+
+bower-json@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/bower-json/-/bower-json-0.4.0.tgz#a99c3ccf416ef0590ed0ded252c760f1c6d93766"
+  dependencies:
+    deep-extend "~0.2.5"
+    graceful-fs "~2.0.0"
+    intersect "~0.0.3"
+
+bower-logger@^0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/bower-logger/-/bower-logger-0.2.2.tgz#39be07e979b2fc8e03a94634205ed9422373d381"
+
+bower-registry-client@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/bower-registry-client/-/bower-registry-client-1.0.0.tgz#697c3499067549a106b49f26d03e6dd1017a9241"
+  dependencies:
+    async "^0.2.8"
+    graceful-fs "^4.0.0"
+    lru-cache "^2.3.0"
+    mkdirp "^0.3.5"
+    request "^2.51.0"
+    request-replay "^0.2.0"
+    rimraf "^2.2.0"
+
+bower@1.7.2, bower@^1.3.12:
+  version "1.7.2"
+  resolved "https://registry.yarnpkg.com/bower/-/bower-1.7.2.tgz#b04228f9970f11777017e64ae39d71f9346c9575"
+  dependencies:
+    abbrev "^1.0.5"
+    archy "1.0.0"
+    bower-config "^1.3.0"
+    bower-endpoint-parser "^0.2.2"
+    bower-json "^0.4.0"
+    bower-logger "^0.2.2"
+    bower-registry-client "^1.0.0"
+    cardinal "0.4.4"
+    chalk "^1.0.0"
+    chmodr "^1.0.2"
+    configstore "^0.3.2"
+    decompress-zip "^0.1.0"
+    destroy "^1.0.3"
+    fs-write-stream-atomic "1.0.5"
+    fstream "^1.0.3"
+    fstream-ignore "^1.0.2"
+    github "^0.2.3"
+    glob "^4.3.2"
+    graceful-fs "^3.0.5"
+    handlebars "^2.0.0"
+    inquirer "0.10.0"
+    insight "^0.7.0"
+    is-root "^1.0.0"
+    junk "^1.0.0"
+    lockfile "^1.0.0"
+    lru-cache "^2.5.0"
+    md5-hex "^1.0.2"
+    mkdirp "0.5.0"
+    mout "^0.11.0"
+    nopt "^3.0.1"
+    opn "^1.0.1"
+    p-throttler "0.1.1"
+    promptly "0.2.0"
+    q "^1.1.2"
+    request "2.53.0"
+    request-progress "0.3.1"
+    retry "0.6.1"
+    rimraf "^2.2.8"
+    semver "^2.3.0"
+    semver-utils "^1.1.1"
+    shell-quote "^1.4.2"
+    stringify-object "^1.0.0"
+    tar-fs "^1.4.1"
+    tmp "0.0.24"
+    update-notifier "^0.6.0"
+    user-home "^1.1.0"
+    which "^1.0.8"
+
+boxen@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/boxen/-/boxen-0.3.1.tgz#a7d898243ae622f7abb6bb604d740a76c6a5461b"
+  dependencies:
+    chalk "^1.1.1"
+    filled-array "^1.0.0"
+    object-assign "^4.0.1"
+    repeating "^2.0.0"
+    string-width "^1.0.1"
+    widest-line "^1.0.0"
+
+brace-expansion@^1.0.0:
+  version "1.1.7"
+  resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.7.tgz#3effc3c50e000531fb720eaff80f0ae8ef23cf59"
+  dependencies:
+    balanced-match "^0.4.1"
+    concat-map "0.0.1"
+
+braces@^1.8.2:
+  version "1.8.5"
+  resolved "https://registry.yarnpkg.com/braces/-/braces-1.8.5.tgz#ba77962e12dff969d6b76711e914b737857bf6a7"
+  dependencies:
+    expand-range "^1.8.1"
+    preserve "^0.2.0"
+    repeat-element "^1.1.2"
+
+breakable@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/breakable/-/breakable-1.0.0.tgz#784a797915a38ead27bad456b5572cb4bbaa78c1"
+
+broccoli-asset-rev@^2.2.0:
+  version "2.5.0"
+  resolved "https://registry.yarnpkg.com/broccoli-asset-rev/-/broccoli-asset-rev-2.5.0.tgz#f5f66eac962bf9f086286921f0eaeaab6d00d819"
+  dependencies:
+    broccoli-asset-rewrite "^1.1.0"
+    broccoli-filter "^1.2.2"
+    json-stable-stringify "^1.0.0"
+    matcher-collection "^1.0.1"
+    rsvp "^3.0.6"
+
+broccoli-asset-rewrite@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-asset-rewrite/-/broccoli-asset-rewrite-1.1.0.tgz#77a5da56157aa318c59113245e8bafb4617f8830"
+  dependencies:
+    broccoli-filter "^1.2.3"
+
+broccoli-babel-transpiler@^5.4.5, broccoli-babel-transpiler@^5.5.0, broccoli-babel-transpiler@^5.6.0, broccoli-babel-transpiler@^5.6.2:
+  version "5.6.2"
+  resolved "https://registry.yarnpkg.com/broccoli-babel-transpiler/-/broccoli-babel-transpiler-5.6.2.tgz#958c72e43575b2f0a862a5096dba1ce1ebc7d74d"
+  dependencies:
+    babel-core "^5.0.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-persistent-filter "^1.0.1"
+    clone "^0.2.0"
+    hash-for-dep "^1.0.2"
+    json-stable-stringify "^1.0.0"
+
+broccoli-caching-writer@^2.0.4, broccoli-caching-writer@^2.2.0, broccoli-caching-writer@^2.3.1:
+  version "2.3.1"
+  resolved "https://registry.yarnpkg.com/broccoli-caching-writer/-/broccoli-caching-writer-2.3.1.tgz#b93cf58f9264f003075868db05774f4e7f25bd07"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.2.5"
+    broccoli-plugin "1.1.0"
+    debug "^2.1.1"
+    rimraf "^2.2.8"
+    rsvp "^3.0.17"
+    walk-sync "^0.2.5"
+
+broccoli-caching-writer@^3.0.3:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/broccoli-caching-writer/-/broccoli-caching-writer-3.0.3.tgz#0bd2c96a9738d6a6ab590f07ba35c5157d7db476"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-plugin "^1.2.1"
+    debug "^2.1.1"
+    rimraf "^2.2.8"
+    rsvp "^3.0.17"
+    walk-sync "^0.3.0"
+
+broccoli-clean-css@0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/broccoli-clean-css/-/broccoli-clean-css-0.2.0.tgz#15f1c265a6986585a972bfb070bf52e9c054c861"
+  dependencies:
+    broccoli-filter "^0.1.6"
+    clean-css "^2.2.1"
+
+broccoli-concat@^2.0.4, broccoli-concat@^2.2.0:
+  version "2.3.8"
+  resolved "https://registry.yarnpkg.com/broccoli-concat/-/broccoli-concat-2.3.8.tgz#590cdcc021bb905b6c121d87c2d1d57df44a2a48"
+  dependencies:
+    broccoli-caching-writer "^2.3.1"
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-stew "^1.3.3"
+    fast-sourcemap-concat "^1.0.1"
+    fs-extra "^0.30.0"
+    lodash.merge "^4.3.0"
+    lodash.omit "^4.1.0"
+    lodash.uniq "^4.2.0"
+
+broccoli-config-loader@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/broccoli-config-loader/-/broccoli-config-loader-1.0.0.tgz#c3cf5ecfaffc04338c6f1d5d38dc36baeaa131ba"
+  dependencies:
+    broccoli-caching-writer "^2.0.4"
+
+broccoli-config-replace@^1.1.0:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/broccoli-config-replace/-/broccoli-config-replace-1.1.2.tgz#6ea879d92a5bad634d11329b51fc5f4aafda9c00"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-plugin "^1.2.0"
+    debug "^2.2.0"
+    fs-extra "^0.24.0"
+
+broccoli-file-creator@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/broccoli-file-creator/-/broccoli-file-creator-1.1.1.tgz#1b35b67d215abdfadd8d49eeb69493c39e6c3450"
+  dependencies:
+    broccoli-kitchen-sink-helpers "~0.2.0"
+    broccoli-plugin "^1.1.0"
+    broccoli-writer "~0.1.1"
+    mkdirp "^0.5.1"
+    rsvp "~3.0.6"
+    symlink-or-copy "^1.0.1"
+
+broccoli-filter@^0.1.6:
+  version "0.1.14"
+  resolved "https://registry.yarnpkg.com/broccoli-filter/-/broccoli-filter-0.1.14.tgz#23cae3891ff9ebb7b4d7db00c6dcf03535daf7ad"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.2.6"
+    broccoli-writer "^0.1.1"
+    mkdirp "^0.3.5"
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.2"
+    rsvp "^3.0.16"
+    symlink-or-copy "^1.0.1"
+    walk-sync "^0.1.3"
+
+broccoli-filter@^1.2.2, broccoli-filter@^1.2.3:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/broccoli-filter/-/broccoli-filter-1.2.4.tgz#409afb94b9a3a6da9fac8134e91e205f40cc7330"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-plugin "^1.0.0"
+    copy-dereference "^1.0.0"
+    debug "^2.2.0"
+    mkdirp "^0.5.1"
+    promise-map-series "^0.2.1"
+    rsvp "^3.0.18"
+    symlink-or-copy "^1.0.1"
+    walk-sync "^0.3.1"
+
+broccoli-funnel-reducer@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/broccoli-funnel-reducer/-/broccoli-funnel-reducer-1.0.0.tgz#11365b2a785aec9b17972a36df87eef24c5cc0ea"
+
+broccoli-funnel@^1.0.0, broccoli-funnel@^1.0.1:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/broccoli-funnel/-/broccoli-funnel-1.2.0.tgz#cddc3afc5ff1685a8023488fff74ce6fb5a51296"
+  dependencies:
+    array-equal "^1.0.0"
+    blank-object "^1.0.1"
+    broccoli-plugin "^1.3.0"
+    debug "^2.2.0"
+    exists-sync "0.0.4"
+    fast-ordered-set "^1.0.0"
+    fs-tree-diff "^0.5.3"
+    heimdalljs "^0.2.0"
+    minimatch "^3.0.0"
+    mkdirp "^0.5.0"
+    path-posix "^1.0.0"
+    rimraf "^2.4.3"
+    symlink-or-copy "^1.0.0"
+    walk-sync "^0.3.1"
+
+broccoli-jshint@^1.0.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/broccoli-jshint/-/broccoli-jshint-1.2.0.tgz#8cd565d11a04bfd32cb8f85a0f7ede1e5be7a6a2"
+  dependencies:
+    broccoli-persistent-filter "^1.2.0"
+    chalk "~0.4.0"
+    findup-sync "^0.3.0"
+    jshint "^2.7.0"
+    json-stable-stringify "^1.0.0"
+    mkdirp "~0.4.0"
+
+broccoli-kitchen-sink-helpers@^0.2.5, broccoli-kitchen-sink-helpers@^0.2.6, broccoli-kitchen-sink-helpers@~0.2.0:
+  version "0.2.9"
+  resolved "https://registry.yarnpkg.com/broccoli-kitchen-sink-helpers/-/broccoli-kitchen-sink-helpers-0.2.9.tgz#a5e0986ed8d76fb5984b68c3f0450d3a96e36ecc"
+  dependencies:
+    glob "^5.0.10"
+    mkdirp "^0.5.1"
+
+broccoli-kitchen-sink-helpers@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/broccoli-kitchen-sink-helpers/-/broccoli-kitchen-sink-helpers-0.3.1.tgz#77c7c18194b9664163ec4fcee2793444926e0c06"
+  dependencies:
+    glob "^5.0.10"
+    mkdirp "^0.5.1"
+
+broccoli-less-single@^0.6.0:
+  version "0.6.4"
+  resolved "https://registry.yarnpkg.com/broccoli-less-single/-/broccoli-less-single-0.6.4.tgz#200316f4146b8cf7e6ab97fc661b8085cc89bdb9"
+  dependencies:
+    broccoli-caching-writer "^2.3.1"
+    include-path-searcher "^0.1.0"
+    less "^2.5.0"
+    lodash.merge "^3.3.2"
+    mkdirp "^0.5.0"
+
+broccoli-merge-trees@^1.0.0, broccoli-merge-trees@^1.1.0:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/broccoli-merge-trees/-/broccoli-merge-trees-1.2.4.tgz#a001519bb5067f06589d91afa2942445a2d0fdb5"
+  dependencies:
+    broccoli-plugin "^1.3.0"
+    can-symlink "^1.0.0"
+    fast-ordered-set "^1.0.2"
+    fs-tree-diff "^0.5.4"
+    heimdalljs "^0.2.1"
+    heimdalljs-logger "^0.1.7"
+    rimraf "^2.4.3"
+    symlink-or-copy "^1.0.0"
+
+broccoli-persistent-filter@^1.0.1, broccoli-persistent-filter@^1.0.3, broccoli-persistent-filter@^1.1.6, broccoli-persistent-filter@^1.2.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/broccoli-persistent-filter/-/broccoli-persistent-filter-1.3.1.tgz#d02556a135c77dfb859bba7844bc3539be7168e1"
+  dependencies:
+    async-disk-cache "^1.2.1"
+    broccoli-plugin "^1.0.0"
+    fs-tree-diff "^0.5.2"
+    hash-for-dep "^1.0.2"
+    heimdalljs "^0.2.1"
+    heimdalljs-logger "^0.1.7"
+    md5-hex "^1.0.2"
+    mkdirp "^0.5.1"
+    promise-map-series "^0.2.1"
+    rimraf "^2.6.1"
+    rsvp "^3.0.18"
+    symlink-or-copy "^1.0.1"
+    walk-sync "^0.3.1"
+
+broccoli-plugin@1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-plugin/-/broccoli-plugin-1.1.0.tgz#73e2cfa05f8ea1e3fc1420c40c3d9e7dc724bf02"
+  dependencies:
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.3"
+    rimraf "^2.3.4"
+    symlink-or-copy "^1.0.1"
+
+broccoli-plugin@^1.0.0, broccoli-plugin@^1.1.0, broccoli-plugin@^1.2.0, broccoli-plugin@^1.2.1, broccoli-plugin@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/broccoli-plugin/-/broccoli-plugin-1.3.0.tgz#bee704a8e42da08cb58e513aaa436efb7f0ef1ee"
+  dependencies:
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.3"
+    rimraf "^2.3.4"
+    symlink-or-copy "^1.1.8"
+
+broccoli-sane-watcher@^1.1.1:
+  version "1.1.5"
+  resolved "https://registry.yarnpkg.com/broccoli-sane-watcher/-/broccoli-sane-watcher-1.1.5.tgz#f2b0af9cf0afb74c7a49cd88eb11c6869ee8c0c0"
+  dependencies:
+    broccoli-slow-trees "^1.1.0"
+    debug "^2.1.0"
+    rsvp "^3.0.18"
+    sane "^1.1.1"
+
+broccoli-sass-source-maps@^1.8.0:
+  version "1.8.1"
+  resolved "https://registry.yarnpkg.com/broccoli-sass-source-maps/-/broccoli-sass-source-maps-1.8.1.tgz#115e32be25dc5f1686af1c8d1fa4c4c62749f0b6"
+  dependencies:
+    broccoli-caching-writer "^3.0.3"
+    include-path-searcher "^0.1.0"
+    mkdirp "^0.3.5"
+    node-sass "^3.8.0"
+    object-assign "^2.0.0"
+    rsvp "^3.0.6"
+
+broccoli-slow-trees@^1.0.0, broccoli-slow-trees@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-slow-trees/-/broccoli-slow-trees-1.1.0.tgz#426c5724e008107e4573f73e8a9ca702916b78f7"
+
+broccoli-source@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-source/-/broccoli-source-1.1.0.tgz#54f0e82c8b73f46580cbbc4f578f0b32fca8f809"
+
+broccoli-sri-hash@^2.1.0:
+  version "2.1.2"
+  resolved "https://registry.yarnpkg.com/broccoli-sri-hash/-/broccoli-sri-hash-2.1.2.tgz#bc69905ed7a381ad325cc0d02ded071328ebf3f3"
+  dependencies:
+    broccoli-caching-writer "^2.2.0"
+    mkdirp "^0.5.1"
+    rsvp "^3.1.0"
+    sri-toolbox "^0.2.0"
+    symlink-or-copy "^1.0.1"
+
+broccoli-stew@^1.3.3:
+  version "1.4.2"
+  resolved "https://registry.yarnpkg.com/broccoli-stew/-/broccoli-stew-1.4.2.tgz#9ec4062fd7162c6026561a2fbf64558363aff8d6"
+  dependencies:
+    broccoli-funnel "^1.0.1"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-persistent-filter "^1.1.6"
+    broccoli-plugin "^1.3.0"
+    chalk "^1.1.3"
+    debug "^2.4.0"
+    ensure-posix-path "^1.0.1"
+    fs-extra "^2.0.0"
+    minimatch "^3.0.2"
+    resolve "^1.1.6"
+    rsvp "^3.0.16"
+    sanitize-filename "^1.5.3"
+    symlink-or-copy "^1.1.8"
+    walk-sync "^0.3.0"
+
+broccoli-uglify-sourcemap@^1.0.0:
+  version "1.5.2"
+  resolved "https://registry.yarnpkg.com/broccoli-uglify-sourcemap/-/broccoli-uglify-sourcemap-1.5.2.tgz#04f84ab0db539031fa868ccfa563c9932d50cedb"
+  dependencies:
+    broccoli-plugin "^1.2.1"
+    debug "^2.2.0"
+    lodash.merge "^4.5.1"
+    matcher-collection "^1.0.0"
+    mkdirp "^0.5.0"
+    source-map-url "^0.3.0"
+    symlink-or-copy "^1.0.1"
+    uglify-js "^2.7.0"
+    walk-sync "^0.1.3"
+
+broccoli-viz@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/broccoli-viz/-/broccoli-viz-2.0.1.tgz#3f3ed2fb83e368aa5306fae460801dea552e40db"
+
+broccoli-writer@^0.1.1, broccoli-writer@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/broccoli-writer/-/broccoli-writer-0.1.1.tgz#d4d71aa8f2afbc67a3866b91a2da79084b96ab2d"
+  dependencies:
+    quick-temp "^0.1.0"
+    rsvp "^3.0.6"
+
+broccoli@0.16.9:
+  version "0.16.9"
+  resolved "https://registry.yarnpkg.com/broccoli/-/broccoli-0.16.9.tgz#b87ca679f09005c576901a9bc19f5df77efd55a4"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.2.5"
+    broccoli-slow-trees "^1.0.0"
+    commander "^2.5.0"
+    connect "^3.3.3"
+    copy-dereference "^1.0.0"
+    findup-sync "^0.2.1"
+    handlebars "^4.0.4"
+    mime "^1.2.11"
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.2"
+    rimraf "^2.2.8"
+    rsvp "^3.0.17"
+
+bser@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/bser/-/bser-1.0.2.tgz#381116970b2a6deea5646dd15dd7278444b56169"
+  dependencies:
+    node-int64 "^0.4.0"
+
+buffer-shims@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/buffer-shims/-/buffer-shims-1.0.0.tgz#9978ce317388c649ad8793028c3477ef044a8b51"
+
+buffers@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/buffers/-/buffers-0.1.1.tgz#b24579c3bed4d6d396aeee6d9a8ae7f5482ab7bb"
+
+builtin-modules@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/builtin-modules/-/builtin-modules-1.1.1.tgz#270f076c5a72c02f5b65a47df94c5fe3a278892f"
+
+builtins@0.0.7:
+  version "0.0.7"
+  resolved "https://registry.yarnpkg.com/builtins/-/builtins-0.0.7.tgz#355219cd6cf18dbe7c01cc7fd2dce765cfdc549a"
+
+builtins@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/builtins/-/builtins-1.0.3.tgz#cb94faeb61c8696451db36534e1422f94f0aee88"
+
+bytes@2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.2.0.tgz#fd35464a403f6f9117c2de3609ecff9cae000588"
+
+bytes@2.3.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.3.0.tgz#d5b680a165b6201739acb611542aabc2d8ceb070"
+
+bytes@2.4.0:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.4.0.tgz#7d97196f9d5baf7f6935e25985549edd2a6c2339"
+
+callsite@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/callsite/-/callsite-1.0.0.tgz#280398e5d664bd74038b6f0905153e6e8af1bc20"
+
+camelcase-keys@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/camelcase-keys/-/camelcase-keys-2.1.0.tgz#308beeaffdf28119051efa1d932213c91b8f92e7"
+  dependencies:
+    camelcase "^2.0.0"
+    map-obj "^1.0.0"
+
+camelcase@^1.0.2, camelcase@^1.2.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-1.2.1.tgz#9bb5304d2e0b56698b2c758b08a3eaa9daa58a39"
+
+camelcase@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-2.1.1.tgz#7c1d16d679a1bbe59ca02cacecfb011e201f5a1f"
+
+camelcase@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-3.0.0.tgz#32fc4b9fcdaf845fcdf7e73bb97cac2261f0ab0a"
+
+can-symlink@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/can-symlink/-/can-symlink-1.0.0.tgz#97b607d8a84bb6c6e228b902d864ecb594b9d219"
+  dependencies:
+    tmp "0.0.28"
+
+capture-stack-trace@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/capture-stack-trace/-/capture-stack-trace-1.0.0.tgz#4a6fa07399c26bba47f0b2496b4d0fb408c5550d"
+
+cardinal@0.4.4:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/cardinal/-/cardinal-0.4.4.tgz#ca5bb68a5b511b90fe93b9acea49bdee5c32bfe2"
+  dependencies:
+    ansicolors "~0.2.1"
+    redeyed "~0.4.0"
+
+cardinal@^0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/cardinal/-/cardinal-0.5.0.tgz#00d5f661dbd4aabfdf7d41ce48a5a59bca35a291"
+  dependencies:
+    ansicolors "~0.2.1"
+    redeyed "~0.5.0"
+
+caseless@~0.11.0:
+  version "0.11.0"
+  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.11.0.tgz#715b96ea9841593cc33067923f5ec60ebda4f7d7"
+
+caseless@~0.12.0:
+  version "0.12.0"
+  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc"
+
+caseless@~0.9.0:
+  version "0.9.0"
+  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.9.0.tgz#b7b65ce6bf1413886539cfd533f0b30effa9cf88"
+
+center-align@^0.1.1:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/center-align/-/center-align-0.1.3.tgz#aa0d32629b6ee972200411cbd4461c907bc2b7ad"
+  dependencies:
+    align-text "^0.1.3"
+    lazy-cache "^1.0.3"
+
+chainsaw@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/chainsaw/-/chainsaw-0.1.0.tgz#5eab50b28afe58074d0d58291388828b5e5fbc98"
+  dependencies:
+    traverse ">=0.3.0 <0.4"
+
+chalk@^0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-0.5.1.tgz#663b3a648b68b55d04690d49167aa837858f2174"
+  dependencies:
+    ansi-styles "^1.1.0"
+    escape-string-regexp "^1.0.0"
+    has-ansi "^0.1.0"
+    strip-ansi "^0.3.0"
+    supports-color "^0.2.0"
+
+chalk@^1.0.0, chalk@^1.1.1, chalk@^1.1.3:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98"
+  dependencies:
+    ansi-styles "^2.2.1"
+    escape-string-regexp "^1.0.2"
+    has-ansi "^2.0.0"
+    strip-ansi "^3.0.0"
+    supports-color "^2.0.0"
+
+chalk@~0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-0.4.0.tgz#5199a3ddcd0c1efe23bc08c1b027b06176e0c64f"
+  dependencies:
+    ansi-styles "~1.0.0"
+    has-color "~0.1.0"
+    strip-ansi "~0.1.0"
+
+char-spinner@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/char-spinner/-/char-spinner-1.0.1.tgz#e6ea67bd247e107112983b7ab0479ed362800081"
+
+charm@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/charm/-/charm-1.0.2.tgz#8add367153a6d9a581331052c4090991da995e35"
+  dependencies:
+    inherits "^2.0.1"
+
+chmodr@^1.0.2, chmodr@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/chmodr/-/chmodr-1.0.2.tgz#04662b932d0f02ec66deaa2b0ea42811968e3eb9"
+
+chownr@^1.0.1, chownr@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.0.1.tgz#e2a75042a9551908bebd25b8523d5f9769d79181"
+
+clean-base-url@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/clean-base-url/-/clean-base-url-1.0.0.tgz#c901cf0a20b972435b0eccd52d056824a4351b7b"
+
+clean-css@^2.2.1:
+  version "2.2.23"
+  resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-2.2.23.tgz#0590b5478b516c4903edc2d89bd3fdbdd286328c"
+  dependencies:
+    commander "2.2.x"
+
+cli-color@~0.3.2:
+  version "0.3.3"
+  resolved "https://registry.yarnpkg.com/cli-color/-/cli-color-0.3.3.tgz#12d5bdd158ff8a0b0db401198913c03df069f6f5"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.6"
+    memoizee "~0.3.8"
+    timers-ext "0.1"
+
+cli-cursor@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/cli-cursor/-/cli-cursor-1.0.2.tgz#64da3f7d56a54412e59794bd62dc35295e8f2987"
+  dependencies:
+    restore-cursor "^1.0.1"
+
+cli-table@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/cli-table/-/cli-table-0.3.1.tgz#f53b05266a8b1a0b934b3d0821e6e2dc5914ae23"
+  dependencies:
+    colors "1.0.3"
+
+cli-width@^1.0.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/cli-width/-/cli-width-1.1.1.tgz#a4d293ef67ebb7b88d4a4d42c0ccf00c4d1e366d"
+
+cli@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/cli/-/cli-1.0.1.tgz#22817534f24bfa4950c34d532d48ecbc621b8c14"
+  dependencies:
+    exit "0.1.2"
+    glob "^7.1.1"
+
+clipboard@^1.5.10:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/clipboard/-/clipboard-1.6.1.tgz#65c5b654812466b0faab82dc6ba0f1d2f8e4be53"
+  dependencies:
+    good-listener "^1.2.0"
+    select "^1.1.2"
+    tiny-emitter "^1.0.0"
+
+cliui@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/cliui/-/cliui-2.1.0.tgz#4b475760ff80264c762c3a1719032e91c7fea0d1"
+  dependencies:
+    center-align "^0.1.1"
+    right-align "^0.1.1"
+    wordwrap "0.0.2"
+
+cliui@^3.2.0:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/cliui/-/cliui-3.2.0.tgz#120601537a916d29940f934da3b48d585a39213d"
+  dependencies:
+    string-width "^1.0.1"
+    strip-ansi "^3.0.1"
+    wrap-ansi "^2.0.0"
+
+clone@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/clone/-/clone-0.2.0.tgz#c6126a90ad4f72dbf5acdb243cc37724fe93fc1f"
+
+clone@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/clone/-/clone-1.0.2.tgz#260b7a99ebb1edfe247538175f783243cb19d149"
+
+clone@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/clone/-/clone-2.1.1.tgz#d217d1e961118e3ac9a4b8bba3285553bf647cdb"
+
+cmd-shim@~2.0.1:
+  version "2.0.2"
+  resolved "https://registry.yarnpkg.com/cmd-shim/-/cmd-shim-2.0.2.tgz#6fcbda99483a8fd15d7d30a196ca69d688a2efdb"
+  dependencies:
+    graceful-fs "^4.1.2"
+    mkdirp "~0.5.0"
+
+co@^4.6.0:
+  version "4.6.0"
+  resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184"
+
+code-point-at@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77"
+
+colors@1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/colors/-/colors-1.0.3.tgz#0433f44d809680fdeb60ed260f1b0c262e82a40b"
+
+colors@~0.6.0-1:
+  version "0.6.2"
+  resolved "https://registry.yarnpkg.com/colors/-/colors-0.6.2.tgz#2423fe6678ac0c5dae8852e5d0e5be08c997abcc"
+
+columnify@~1.5.2:
+  version "1.5.4"
+  resolved "https://registry.yarnpkg.com/columnify/-/columnify-1.5.4.tgz#4737ddf1c7b69a8a7c340570782e947eec8e78bb"
+  dependencies:
+    strip-ansi "^3.0.0"
+    wcwidth "^1.0.0"
+
+combined-stream@^1.0.5, combined-stream@~1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.5.tgz#938370a57b4a51dea2c77c15d5c5fdf895164009"
+  dependencies:
+    delayed-stream "~1.0.0"
+
+combined-stream@~0.0.4, combined-stream@~0.0.5:
+  version "0.0.7"
+  resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-0.0.7.tgz#0137e657baa5a7541c57ac37ac5fc07d73b4dc1f"
+  dependencies:
+    delayed-stream "0.0.5"
+
+commander@2.2.x:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.2.0.tgz#175ad4b9317f3ff615f201c1e57224f55a3e91df"
+
+commander@^2.5.0, commander@^2.6.0, commander@^2.9.0:
+  version "2.9.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.9.0.tgz#9c99094176e12240cb22d6c5146098400fe0f7d4"
+  dependencies:
+    graceful-readlink ">= 1.0.0"
+
+commander@~2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.1.0.tgz#d121bbae860d9992a3d517ba96f56588e47c6781"
+
+commoner@~0.10.3:
+  version "0.10.8"
+  resolved "https://registry.yarnpkg.com/commoner/-/commoner-0.10.8.tgz#34fc3672cd24393e8bb47e70caa0293811f4f2c5"
+  dependencies:
+    commander "^2.5.0"
+    detective "^4.3.1"
+    glob "^5.0.15"
+    graceful-fs "^4.1.2"
+    iconv-lite "^0.4.5"
+    mkdirp "^0.5.0"
+    private "^0.1.6"
+    q "^1.1.2"
+    recast "^0.11.17"
+
+component-bind@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/component-bind/-/component-bind-1.0.0.tgz#00c608ab7dcd93897c0009651b1d3a8e1e73bbd1"
+
+component-emitter@1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.1.2.tgz#296594f2753daa63996d2af08d15a95116c9aec3"
+
+component-emitter@1.2.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.2.1.tgz#137918d6d78283f7df7a6b7c5a63e140e69425e6"
+
+component-inherit@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/component-inherit/-/component-inherit-0.0.3.tgz#645fc4adf58b72b649d5cae65135619db26ff143"
+
+compressible@~2.0.8:
+  version "2.0.10"
+  resolved "https://registry.yarnpkg.com/compressible/-/compressible-2.0.10.tgz#feda1c7f7617912732b29bf8cf26252a20b9eecd"
+  dependencies:
+    mime-db ">= 1.27.0 < 2"
+
+compression@^1.4.4:
+  version "1.6.2"
+  resolved "https://registry.yarnpkg.com/compression/-/compression-1.6.2.tgz#cceb121ecc9d09c52d7ad0c3350ea93ddd402bc3"
+  dependencies:
+    accepts "~1.3.3"
+    bytes "2.3.0"
+    compressible "~2.0.8"
+    debug "~2.2.0"
+    on-headers "~1.0.1"
+    vary "~1.1.0"
+
+concat-map@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b"
+
+concat-stream@^1.4.6:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.6.0.tgz#0aac662fd52be78964d5532f694784e70110acf7"
+  dependencies:
+    inherits "^2.0.3"
+    readable-stream "^2.2.2"
+    typedarray "^0.0.6"
+
+config-chain@~1.1.9:
+  version "1.1.11"
+  resolved "https://registry.yarnpkg.com/config-chain/-/config-chain-1.1.11.tgz#aba09747dfbe4c3e70e766a6e41586e1859fc6f2"
+  dependencies:
+    ini "^1.3.4"
+    proto-list "~1.2.1"
+
+configstore@^0.3.2:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/configstore/-/configstore-0.3.2.tgz#25e4c16c3768abf75c5a65bc61761f495055b459"
+  dependencies:
+    graceful-fs "^3.0.1"
+    js-yaml "^3.1.0"
+    mkdirp "^0.5.0"
+    object-assign "^2.0.0"
+    osenv "^0.1.0"
+    user-home "^1.0.0"
+    uuid "^2.0.1"
+    xdg-basedir "^1.0.0"
+
+configstore@^1.0.0, configstore@^1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/configstore/-/configstore-1.4.0.tgz#c35781d0501d268c25c54b8b17f6240e8a4fb021"
+  dependencies:
+    graceful-fs "^4.1.2"
+    mkdirp "^0.5.0"
+    object-assign "^4.0.1"
+    os-tmpdir "^1.0.0"
+    osenv "^0.1.0"
+    uuid "^2.0.1"
+    write-file-atomic "^1.1.2"
+    xdg-basedir "^2.0.0"
+
+configstore@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/configstore/-/configstore-2.1.0.tgz#737a3a7036e9886102aa6099e47bb33ab1aba1a1"
+  dependencies:
+    dot-prop "^3.0.0"
+    graceful-fs "^4.1.2"
+    mkdirp "^0.5.0"
+    object-assign "^4.0.1"
+    os-tmpdir "^1.0.0"
+    osenv "^0.1.0"
+    uuid "^2.0.1"
+    write-file-atomic "^1.1.2"
+    xdg-basedir "^2.0.0"
+
+connect@^3.3.3:
+  version "3.6.1"
+  resolved "https://registry.yarnpkg.com/connect/-/connect-3.6.1.tgz#b7760693a74f0454face1d9378edb3f885b43227"
+  dependencies:
+    debug "2.6.3"
+    finalhandler "1.0.1"
+    parseurl "~1.3.1"
+    utils-merge "1.0.0"
+
+console-browserify@1.1.x:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/console-browserify/-/console-browserify-1.1.0.tgz#f0241c45730a9fc6323b206dbf38edc741d0bb10"
+  dependencies:
+    date-now "^0.1.4"
+
+console-control-strings@^1.0.0, console-control-strings@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/console-control-strings/-/console-control-strings-1.1.0.tgz#3d7cf4464db6446ea644bf4b39507f9851008e8e"
+
+consolidate@^0.14.0:
+  version "0.14.5"
+  resolved "https://registry.yarnpkg.com/consolidate/-/consolidate-0.14.5.tgz#5a25047bc76f73072667c8cb52c989888f494c63"
+  dependencies:
+    bluebird "^3.1.1"
+
+content-disposition@0.5.2:
+  version "0.5.2"
+  resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.2.tgz#0cf68bb9ddf5f2be7961c3a85178cb85dba78cb4"
+
+content-type@~1.0.1, content-type@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.2.tgz#b7d113aee7a8dd27bd21133c4dc2529df1721eed"
+
+convert-source-map@^1.1.0:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.5.0.tgz#9acd70851c6d5dfdd93d9282e5edf94a03ff46b5"
+
+cookie-signature@1.0.6:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c"
+
+cookie@0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.3.1.tgz#e7e0a1f9ef43b4c8ba925c5c5a96e806d16873bb"
+
+copy-dereference@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/copy-dereference/-/copy-dereference-1.0.0.tgz#6b131865420fd81b413ba994b44d3655311152b6"
+
+core-js@^1.0.0:
+  version "1.2.7"
+  resolved "https://registry.yarnpkg.com/core-js/-/core-js-1.2.7.tgz#652294c14651db28fa93bd2d5ff2983a4f08c636"
+
+core-object@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/core-object/-/core-object-0.0.2.tgz#c9a6fee8f712e281fa9f6fba10243409ea2debc3"
+  dependencies:
+    lodash-node "^2.4.1"
+
+core-util-is@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7"
+
+cpr@0.4.2:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/cpr/-/cpr-0.4.2.tgz#cc5083e6d2fa31f52bbfeefae508a445fe6180f2"
+  dependencies:
+    graceful-fs "~4.1.2"
+    mkdirp "~0.5.0"
+    rimraf "~2.4.3"
+
+create-error-class@^3.0.1:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/create-error-class/-/create-error-class-3.0.2.tgz#06be7abef947a3f14a30fd610671d401bca8b7b6"
+  dependencies:
+    capture-stack-trace "^1.0.0"
+
+cross-spawn@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-3.0.1.tgz#1256037ecb9f0c5f79e3d6ef135e30770184b982"
+  dependencies:
+    lru-cache "^4.0.1"
+    which "^1.2.9"
+
+cross-spawn@^5.1.0:
+  version "5.1.0"
+  resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-5.1.0.tgz#e8bd0efee58fcff6f8f94510a0a554bbfa235449"
+  dependencies:
+    lru-cache "^4.0.1"
+    shebang-command "^1.2.0"
+    which "^1.2.9"
+
+cryptiles@2.x.x:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/cryptiles/-/cryptiles-2.0.5.tgz#3bdfecdc608147c1c67202fa291e7dca59eaa3b8"
+  dependencies:
+    boom "2.x.x"
+
+ctype@0.5.3:
+  version "0.5.3"
+  resolved "https://registry.yarnpkg.com/ctype/-/ctype-0.5.3.tgz#82c18c2461f74114ef16c135224ad0b9144ca12f"
+
+currently-unhandled@^0.4.1:
+  version "0.4.1"
+  resolved "https://registry.yarnpkg.com/currently-unhandled/-/currently-unhandled-0.4.1.tgz#988df33feab191ef799a61369dd76c17adf957ea"
+  dependencies:
+    array-find-index "^1.0.1"
+
+d@1:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/d/-/d-1.0.0.tgz#754bb5bfe55451da69a58b94d45f4c5b0462d58f"
+  dependencies:
+    es5-ext "^0.10.9"
+
+d@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/d/-/d-0.1.1.tgz#da184c535d18d8ee7ba2aa229b914009fae11309"
+  dependencies:
+    es5-ext "~0.10.2"
+
+dashdash@^1.12.0:
+  version "1.14.1"
+  resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0"
+  dependencies:
+    assert-plus "^1.0.0"
+
+date-now@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/date-now/-/date-now-0.1.4.tgz#eaf439fd4d4848ad74e5cc7dbef200672b9e345b"
+
+debug@2.2.0, debug@~2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.2.0.tgz#f87057e995b1a1f6ae6a4960664137bc56f039da"
+  dependencies:
+    ms "0.7.1"
+
+debug@2.3.3:
+  version "2.3.3"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.3.3.tgz#40c453e67e6e13c901ddec317af8986cda9eff8c"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.1:
+  version "2.6.1"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.1.tgz#79855090ba2c4e3115cc7d8769491d58f0491351"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.3:
+  version "2.6.3"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.3.tgz#0f7eb8c30965ec08c72accfa0130c8b79984141d"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.4:
+  version "2.6.4"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.4.tgz#7586a9b3c39741c0282ae33445c4e8ac74734fe0"
+  dependencies:
+    ms "0.7.3"
+
+debug@^2.1.0, debug@^2.1.1, debug@^2.1.3, debug@^2.2.0, debug@^2.4.0:
+  version "2.6.6"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.6.tgz#a9fa6fbe9ca43cf1e79f73b75c0189cbb7d6db5a"
+  dependencies:
+    ms "0.7.3"
+
+debuglog@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/debuglog/-/debuglog-1.0.1.tgz#aa24ffb9ac3df9a2351837cfb2d279360cd78492"
+
+decamelize@^1.0.0, decamelize@^1.1.1, decamelize@^1.1.2:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290"
+
+decompress-zip@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/decompress-zip/-/decompress-zip-0.1.0.tgz#bce60c11664f2d660fca4bcf634af6de5d6c14c7"
+  dependencies:
+    binary "^0.3.0"
+    graceful-fs "^3.0.0"
+    mkpath "^0.1.0"
+    nopt "^3.0.1"
+    q "^1.1.2"
+    readable-stream "^1.1.8"
+    touch "0.0.3"
+
+deep-extend@~0.2.5:
+  version "0.2.11"
+  resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.2.11.tgz#7a16ba69729132340506170494bc83f7076fe08f"
+
+deep-extend@~0.4.0:
+  version "0.4.1"
+  resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.4.1.tgz#efe4113d08085f4e6f9687759810f807469e2253"
+
+defaults@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/defaults/-/defaults-1.0.3.tgz#c656051e9817d9ff08ed881477f3fe4019f3ef7d"
+  dependencies:
+    clone "^1.0.2"
+
+defined@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/defined/-/defined-1.0.0.tgz#c98d9bcef75674188e110969151199e39b1fa693"
+
+defs@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/defs/-/defs-1.1.1.tgz#b22609f2c7a11ba7a3db116805c139b1caffa9d2"
+  dependencies:
+    alter "~0.2.0"
+    ast-traverse "~0.1.1"
+    breakable "~1.0.0"
+    esprima-fb "~15001.1001.0-dev-harmony-fb"
+    simple-fmt "~0.1.0"
+    simple-is "~0.2.0"
+    stringmap "~0.2.2"
+    stringset "~0.2.1"
+    tryor "~0.1.2"
+    yargs "~3.27.0"
+
+delayed-stream@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-0.0.5.tgz#d4b1f43a93e8296dfe02694f4680bc37a313c73f"
+
+delayed-stream@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619"
+
+delegate@^3.1.2:
+  version "3.1.2"
+  resolved "https://registry.yarnpkg.com/delegate/-/delegate-3.1.2.tgz#1e1bc6f5cadda6cb6cbf7e6d05d0bcdd5712aebe"
+
+delegates@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a"
+
+depd@1.1.0, depd@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.0.tgz#e1bd82c6aab6ced965b97b88b17ed3e528ca18c3"
+
+destroy@^1.0.3, destroy@~1.0.4:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.0.4.tgz#978857442c44749e4206613e37946205826abd80"
+
+detect-indent@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/detect-indent/-/detect-indent-3.0.1.tgz#9dc5e5ddbceef8325764b9451b02bc6d54084f75"
+  dependencies:
+    get-stdin "^4.0.1"
+    minimist "^1.1.0"
+    repeating "^1.1.0"
+
+detective@^4.3.1:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/detective/-/detective-4.5.0.tgz#6e5a8c6b26e6c7a254b1c6b6d7490d98ec91edd1"
+  dependencies:
+    acorn "^4.0.3"
+    defined "^1.0.0"
+
+dezalgo@^1.0.0, dezalgo@^1.0.1, dezalgo@^1.0.2, dezalgo@~1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/dezalgo/-/dezalgo-1.0.3.tgz#7f742de066fc748bc8db820569dddce49bf0d456"
+  dependencies:
+    asap "^2.0.0"
+    wrappy "1"
+
+diff@^1.3.1:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/diff/-/diff-1.4.0.tgz#7f28d2eb9ee7b15a97efd89ce63dcfdaa3ccbabf"
+
+dom-serializer@0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-0.1.0.tgz#073c697546ce0780ce23be4a28e293e40bc30c82"
+  dependencies:
+    domelementtype "~1.1.1"
+    entities "~1.1.1"
+
+domelementtype@1:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.3.0.tgz#b17aed82e8ab59e52dd9c19b1756e0fc187204c2"
+
+domelementtype@~1.1.1:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.1.3.tgz#bd28773e2642881aec51544924299c5cd822185b"
+
+domhandler@2.3:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-2.3.0.tgz#2de59a0822d5027fabff6f032c2b25a2a8abe738"
+  dependencies:
+    domelementtype "1"
+
+domutils@1.5:
+  version "1.5.1"
+  resolved "https://registry.yarnpkg.com/domutils/-/domutils-1.5.1.tgz#dcd8488a26f563d61079e48c9f7b7e32373682cf"
+  dependencies:
+    dom-serializer "0"
+    domelementtype "1"
+
+dot-prop@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/dot-prop/-/dot-prop-3.0.0.tgz#1b708af094a49c9a0e7dbcad790aba539dac1177"
+  dependencies:
+    is-obj "^1.0.0"
+
+duplexer2@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/duplexer2/-/duplexer2-0.1.4.tgz#8b12dab878c0d69e3e7891051662a32fc6bddcc1"
+  dependencies:
+    readable-stream "^2.0.2"
+
+ecc-jsbn@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.1.tgz#0fc73a9ed5f0d53c38193398523ef7e543777505"
+  dependencies:
+    jsbn "~0.1.0"
+
+editions@^1.1.1:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/editions/-/editions-1.3.3.tgz#0907101bdda20fac3cbe334c27cbd0688dc99a5b"
+
+editor@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/editor/-/editor-1.0.0.tgz#60c7f87bd62bcc6a894fa8ccd6afb7823a24f742"
+
+ee-first@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d"
+
+ember-ajax@0.7.1:
+  version "0.7.1"
+  resolved "https://registry.yarnpkg.com/ember-ajax/-/ember-ajax-0.7.1.tgz#0b3d1eeb99ed9d9251c013cc6ab6a1e7d4d14507"
+  dependencies:
+    ember-cli-babel "^5.1.5"
+
+ember-cli-app-version@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-app-version/-/ember-cli-app-version-1.0.1.tgz#d135eba75f30e791d8a5e5844f1251dcbcc40438"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+    ember-cli-htmlbars "^1.0.0"
+    git-repo-version "0.3.0"
+
+ember-cli-babel@5.1.10:
+  version "5.1.10"
+  resolved "https://registry.yarnpkg.com/ember-cli-babel/-/ember-cli-babel-5.1.10.tgz#d403f178aab602e1337c403c5a58c0200a8969aa"
+  dependencies:
+    broccoli-babel-transpiler "^5.6.0"
+    broccoli-funnel "^1.0.0"
+    clone "^1.0.2"
+    ember-cli-version-checker "^1.0.2"
+    resolve "^1.1.2"
+
+ember-cli-babel@^5.0.0, ember-cli-babel@^5.1.10, ember-cli-babel@^5.1.3, ember-cli-babel@^5.1.5, ember-cli-babel@^5.1.6, ember-cli-babel@^5.1.7, ember-cli-babel@^5.2.4:
+  version "5.2.4"
+  resolved "https://registry.yarnpkg.com/ember-cli-babel/-/ember-cli-babel-5.2.4.tgz#5ce4f46b08ed6f6d21e878619fb689719d6e8e13"
+  dependencies:
+    broccoli-babel-transpiler "^5.6.2"
+    broccoli-funnel "^1.0.0"
+    clone "^2.0.0"
+    ember-cli-version-checker "^1.0.2"
+    resolve "^1.1.2"
+
+ember-cli-clipboard@^0.3.1:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/ember-cli-clipboard/-/ember-cli-clipboard-0.3.2.tgz#4f9af67d2836ac03fc9e686337f371880ae1b0b9"
+  dependencies:
+    broccoli-funnel "^1.0.1"
+    clipboard "^1.5.10"
+    ember-cli-babel "^5.1.5"
+    ember-cli-htmlbars "0.7.9"
+
+ember-cli-copy-dereference@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-copy-dereference/-/ember-cli-copy-dereference-1.0.0.tgz#a1795bf6c70650317df4ab8674dd02e0bea5d4fd"
+
+ember-cli-dependency-checker@^1.2.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-dependency-checker/-/ember-cli-dependency-checker-1.4.0.tgz#2b13f977e1eea843fc1a21a001be6ca5d4ef1942"
+  dependencies:
+    chalk "^0.5.1"
+    is-git-url "^0.2.0"
+    semver "^4.1.0"
+
+ember-cli-file-picker@0.0.9:
+  version "0.0.9"
+  resolved "https://registry.yarnpkg.com/ember-cli-file-picker/-/ember-cli-file-picker-0.0.9.tgz#3aac5b924e963e39841b4508085c15eeac77d8c9"
+  dependencies:
+    ember-cli-babel "^5.1.5"
+
+ember-cli-flash@1.3.8:
+  version "1.3.8"
+  resolved "https://registry.yarnpkg.com/ember-cli-flash/-/ember-cli-flash-1.3.8.tgz#3cc01c6c6ffc663d634e2f0d8468d4c40310c2bf"
+  dependencies:
+    ember-cli-babel "^5.1.3"
+    ember-cli-htmlbars "0.7.9"
+    ember-new-computed "^1.0.3"
+
+ember-cli-font-awesome@1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-font-awesome/-/ember-cli-font-awesome-1.4.0.tgz#496e6f71ce07858ab106bc60a5b1a2aaebe8f6d5"
+  dependencies:
+    broccoli-funnel "^1.0.1"
+    ember-cli-babel "^5.1.5"
+    ember-cli-sass "^5.0.1"
+    ember-computed-decorators "0.2.2"
+
+ember-cli-get-dependency-depth@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-get-dependency-depth/-/ember-cli-get-dependency-depth-1.0.0.tgz#e0afecf82a2d52f00f28ab468295281aec368d11"
+
+ember-cli-htmlbars-inline-precompile@^0.3.1:
+  version "0.3.6"
+  resolved "https://registry.yarnpkg.com/ember-cli-htmlbars-inline-precompile/-/ember-cli-htmlbars-inline-precompile-0.3.6.tgz#4095fe423f93102724c0725e4dd1a31f25e24de5"
+  dependencies:
+    babel-plugin-htmlbars-inline-precompile "^0.1.0"
+    ember-cli-babel "^5.1.3"
+    ember-cli-htmlbars "^1.0.0"
+    hash-for-dep "^1.0.2"
+
+ember-cli-htmlbars@0.7.9:
+  version "0.7.9"
+  resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-0.7.9.tgz#142cd4325ab3f48c76cf8dc4d3a3800f38e721be"
+  dependencies:
+    broccoli-filter "^0.1.6"
+    ember-cli-version-checker "^1.0.2"
+
+ember-cli-htmlbars@^1.0.0, ember-cli-htmlbars@^1.0.1:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-1.3.0.tgz#e090f011239153bf45dab29625f94a46fce205af"
+  dependencies:
+    broccoli-persistent-filter "^1.0.3"
+    ember-cli-version-checker "^1.0.2"
+    hash-for-dep "^1.0.2"
+    json-stable-stringify "^1.0.0"
+    strip-bom "^2.0.0"
+
+ember-cli-inject-live-reload@^1.3.1:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-inject-live-reload/-/ember-cli-inject-live-reload-1.6.1.tgz#82b8f5be454815a75e7f6d42c9ce0bc883a914a3"
+
+ember-cli-is-package-missing@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-is-package-missing/-/ember-cli-is-package-missing-1.0.0.tgz#6e6184cafb92635dd93ca6c946b104292d4e3390"
+
+ember-cli-less@1.5.3:
+  version "1.5.3"
+  resolved "https://registry.yarnpkg.com/ember-cli-less/-/ember-cli-less-1.5.3.tgz#f48d8c967a92b9ea21aafdcf9ece0db4839639d3"
+  dependencies:
+    broccoli-less-single "^0.6.0"
+    broccoli-merge-trees "^1.0.0"
+    ember-cli-version-checker "^1.1.4"
+    lodash.merge "^3.3.2"
+
+ember-cli-normalize-entity-name@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-normalize-entity-name/-/ember-cli-normalize-entity-name-1.0.0.tgz#0b14f7bcbc599aa117b5fddc81e4fd03c4bad5b7"
+  dependencies:
+    silent-error "^1.0.0"
+
+ember-cli-path-utils@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-path-utils/-/ember-cli-path-utils-1.0.0.tgz#4e39af8b55301cddc5017739b77a804fba2071ed"
+
+ember-cli-preprocess-registry@^1.0.3:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-preprocess-registry/-/ember-cli-preprocess-registry-1.1.0.tgz#1a8f848876de2851507842e4c0c9051f62b4aac6"
+  dependencies:
+    broccoli-clean-css "0.2.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    debug "^2.2.0"
+    exists-sync "0.0.3"
+    lodash "^3.10.0"
+    process-relative-require "^1.0.0"
+    silent-error "^1.0.0"
+
+ember-cli-qunit@^1.1.0:
+  version "1.4.2"
+  resolved "https://registry.yarnpkg.com/ember-cli-qunit/-/ember-cli-qunit-1.4.2.tgz#7ca25495c70ca347106d44fc00f0d7aeca027475"
+  dependencies:
+    broccoli-babel-transpiler "^5.5.0"
+    broccoli-concat "^2.2.0"
+    broccoli-jshint "^1.0.0"
+    broccoli-merge-trees "^1.1.0"
+    ember-cli-babel "^5.1.5"
+    ember-cli-version-checker "^1.1.4"
+    ember-qunit "^0.4.18"
+    qunitjs "^1.20.0"
+    resolve "^1.1.6"
+
+ember-cli-release@0.2.8:
+  version "0.2.8"
+  resolved "https://registry.yarnpkg.com/ember-cli-release/-/ember-cli-release-0.2.8.tgz#e9fddd06058c0f3bc2ea57ab2667e9611f8fb205"
+  dependencies:
+    chalk "^1.0.0"
+    git-tools "^0.1.4"
+    make-array "^0.1.2"
+    merge "^1.2.0"
+    moment-timezone "^0.3.0"
+    nopt "^3.0.3"
+    rsvp "^3.0.17"
+    semver "^4.3.1"
+    silent-error "^1.0.0"
+
+ember-cli-sass@^5.0.1:
+  version "5.6.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-sass/-/ember-cli-sass-5.6.0.tgz#792de67544bb903eef421a3e59c484840fea5352"
+  dependencies:
+    broccoli-funnel "^1.0.0"
+    broccoli-merge-trees "^1.1.0"
+    broccoli-sass-source-maps "^1.8.0"
+    ember-cli-babel "5.1.10"
+    ember-cli-version-checker "^1.0.2"
+    merge "^1.2.0"
+
+ember-cli-sri@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-sri/-/ember-cli-sri-2.1.1.tgz#971620934a4b9183cf7923cc03e178b83aa907fd"
+  dependencies:
+    broccoli-sri-hash "^2.1.0"
+
+ember-cli-string-utils@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-string-utils/-/ember-cli-string-utils-1.1.0.tgz#39b677fc2805f55173735376fcef278eaa4452a1"
+
+ember-cli-test-info@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-test-info/-/ember-cli-test-info-1.0.0.tgz#ed4e960f249e97523cf891e4aed2072ce84577b4"
+  dependencies:
+    ember-cli-string-utils "^1.0.0"
+
+ember-cli-uglify@^1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-uglify/-/ember-cli-uglify-1.2.0.tgz#3208c32b54bc2783056e8bb0d5cfe9bbaf17ffb2"
+  dependencies:
+    broccoli-uglify-sourcemap "^1.0.0"
+
+ember-cli-version-checker@^1.0.2, ember-cli-version-checker@^1.1.4, ember-cli-version-checker@^1.1.6:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-version-checker/-/ember-cli-version-checker-1.3.1.tgz#0bc2d134c830142da64bf9627a0eded10b61ae72"
+  dependencies:
+    semver "^5.3.0"
+
+ember-cli@2.2.0-beta.4:
+  version "2.2.0-beta.4"
+  resolved "https://registry.yarnpkg.com/ember-cli/-/ember-cli-2.2.0-beta.4.tgz#c8e281f2ffac550e690eef71bc2cd00f6a27140a"
+  dependencies:
+    amd-name-resolver "0.0.2"
+    bower "^1.3.12"
+    bower-config "0.6.1"
+    bower-endpoint-parser "0.2.2"
+    broccoli "0.16.9"
+    broccoli-babel-transpiler "^5.4.5"
+    broccoli-concat "^2.0.4"
+    broccoli-config-loader "^1.0.0"
+    broccoli-config-replace "^1.1.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-funnel-reducer "^1.0.0"
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-plugin "^1.2.0"
+    broccoli-sane-watcher "^1.1.1"
+    broccoli-source "^1.1.0"
+    broccoli-viz "^2.0.1"
+    chalk "^1.1.1"
+    clean-base-url "^1.0.0"
+    compression "^1.4.4"
+    configstore "^1.4.0"
+    core-object "0.0.2"
+    cpr "0.4.2"
+    debug "^2.1.3"
+    diff "^1.3.1"
+    ember-cli-copy-dereference "^1.0.0"
+    ember-cli-get-dependency-depth "^1.0.0"
+    ember-cli-is-package-missing "^1.0.0"
+    ember-cli-normalize-entity-name "^1.0.0"
+    ember-cli-path-utils "^1.0.0"
+    ember-cli-preprocess-registry "^1.0.3"
+    ember-cli-string-utils "^1.0.0"
+    ember-cli-test-info "^1.0.0"
+    ember-router-generator "^1.0.0"
+    escape-string-regexp "^1.0.3"
+    exists-sync "0.0.3"
+    exit "^0.1.2"
+    express "^4.12.3"
+    findup "0.1.5"
+    findup-sync "^0.2.1"
+    fs-extra "0.26.2"
+    fs-monitor-stack "^1.0.2"
+    fs-tree-diff "^0.4.4"
+    get-caller-file "^1.0.0"
+    git-repo-info "^1.0.4"
+    glob "5.0.13"
+    http-proxy "^1.9.0"
+    inflection "^1.7.0"
+    inquirer "0.5.1"
+    is-git-url "^0.2.0"
+    isbinaryfile "^2.0.3"
+    leek "0.0.21"
+    lodash "^3.6.0"
+    markdown-it "4.3.0"
+    markdown-it-terminal "0.0.3"
+    merge-defaults "^0.2.1"
+    minimatch "^3.0.0"
+    morgan "^1.5.2"
+    node-modules-path "^1.0.0"
+    node-uuid "^1.4.3"
+    nopt "^3.0.1"
+    npm "2.14.10"
+    pleasant-progress "^1.0.2"
+    portfinder "^0.4.0"
+    promise-map-series "^0.2.1"
+    quick-temp "0.1.5"
+    readline2 "0.1.1"
+    resolve "^1.1.6"
+    rimraf "^2.4.4"
+    rsvp "^3.0.17"
+    sane "^1.1.1"
+    semver "^5.1.0"
+    silent-error "^1.0.0"
+    symlink-or-copy "^1.0.1"
+    temp "0.8.3"
+    testem "^1.0.0-rc.4"
+    through "^2.3.6"
+    tiny-lr "0.2.1"
+    walk-sync "^0.2.6"
+    yam "0.0.18"
+
+"ember-collection@git://github.com/emberjs/ember-collection.git#bf752508a501161791e3f3b9a546c9b97d5c387a":
+  version "1.0.0-alpha.4"
+  resolved "git://github.com/emberjs/ember-collection.git#bf752508a501161791e3f3b9a546c9b97d5c387a"
+  dependencies:
+    ember-cli-babel "^5.1.3"
+    ember-cli-htmlbars "0.7.9"
+    layout-bin-packer "^1.2.0"
+
+ember-computed-decorators@0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/ember-computed-decorators/-/ember-computed-decorators-0.2.2.tgz#7c934a575c55ac3a18b6aaeb7cd2cbe149bc9b34"
+  dependencies:
+    ember-cli-babel "^5.1.5"
+
+ember-data@2.3.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/ember-data/-/ember-data-2.3.0.tgz#9ca8b97a32725efb8feeecdc9e14eaac1188b58e"
+  dependencies:
+    babel-plugin-feature-flags "^0.2.0"
+    babel-plugin-filter-imports "^0.2.0"
+    broccoli-babel-transpiler "^5.5.0"
+    broccoli-file-creator "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    chalk "^1.1.1"
+    ember-cli-babel "^5.1.3"
+    ember-cli-path-utils "^1.0.0"
+    ember-cli-string-utils "^1.0.0"
+    ember-cli-test-info "^1.0.0"
+    ember-cli-version-checker "^1.1.4"
+    ember-inflector "^1.9.4"
+    inflection "^1.8.0"
+    semver "^5.1.0"
+    silent-error "^1.0.0"
+
+ember-disable-proxy-controllers@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/ember-disable-proxy-controllers/-/ember-disable-proxy-controllers-1.0.1.tgz#1254eeec0ba025c24eb9e8da611afa7b38754281"
+  dependencies:
+    ember-cli-babel "^5.0.0"
+
+ember-export-application-global@^1.0.4:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ember-export-application-global/-/ember-export-application-global-1.1.1.tgz#f257d5271268932a89d7392679ce4db89d7154af"
+  dependencies:
+    ember-cli-babel "^5.1.10"
+
+ember-inflector@^1.9.4:
+  version "1.12.1"
+  resolved "https://registry.yarnpkg.com/ember-inflector/-/ember-inflector-1.12.1.tgz#d8bd2ca2f327b439720f89923fe614d46b5da1ca"
+  dependencies:
+    ember-cli-babel "^5.1.7"
+
+ember-keyboard@0.2.5:
+  version "0.2.5"
+  resolved "https://registry.yarnpkg.com/ember-keyboard/-/ember-keyboard-0.2.5.tgz#8946846feecdf35dc624d737087d7c346a85bc7f"
+  dependencies:
+    ember-cli-babel "^5.1.3"
+
+ember-new-computed@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/ember-new-computed/-/ember-new-computed-1.0.3.tgz#592af8a778e0260ce7e812687c3aedface1622bf"
+  dependencies:
+    ember-cli-babel "^5.1.5"
+
+ember-qunit@^0.4.18:
+  version "0.4.24"
+  resolved "https://registry.yarnpkg.com/ember-qunit/-/ember-qunit-0.4.24.tgz#b54cf6688c442d07eacea47c3285879cdd7c2163"
+  dependencies:
+    ember-test-helpers "^0.5.32"
+
+ember-resolver@^2.0.3:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/ember-resolver/-/ember-resolver-2.1.1.tgz#5e4c1fffe9f5f48fc2194ad7592274ed0cd74f72"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+    ember-cli-version-checker "^1.1.6"
+
+ember-router-generator@^1.0.0:
+  version "1.2.3"
+  resolved "https://registry.yarnpkg.com/ember-router-generator/-/ember-router-generator-1.2.3.tgz#8ed2ca86ff323363120fc14278191e9e8f1315ee"
+  dependencies:
+    recast "^0.11.3"
+
+ember-test-helpers@^0.5.32:
+  version "0.5.34"
+  resolved "https://registry.yarnpkg.com/ember-test-helpers/-/ember-test-helpers-0.5.34.tgz#c8439108d1cba1d7d838c212208a5c4061471b83"
+  dependencies:
+    klassy "^0.1.3"
+
+encodeurl@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.1.tgz#79e3d58655346909fe6f0f45a5de68103b294d20"
+
+end-of-stream@^1.0.0, end-of-stream@^1.1.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-1.4.0.tgz#7a90d833efda6cfa6eac0f4949dbb0fad3a63206"
+  dependencies:
+    once "^1.4.0"
+
+engine.io-client@1.8.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/engine.io-client/-/engine.io-client-1.8.0.tgz#7b730e4127414087596d9be3c88d2bc5fdb6cf5c"
+  dependencies:
+    component-emitter "1.2.1"
+    component-inherit "0.0.3"
+    debug "2.3.3"
+    engine.io-parser "1.3.1"
+    has-cors "1.1.0"
+    indexof "0.0.1"
+    parsejson "0.0.3"
+    parseqs "0.0.5"
+    parseuri "0.0.5"
+    ws "1.1.1"
+    xmlhttprequest-ssl "1.5.3"
+    yeast "0.1.2"
+
+engine.io-parser@1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/engine.io-parser/-/engine.io-parser-1.3.1.tgz#9554f1ae33107d6fbd170ca5466d2f833f6a07cf"
+  dependencies:
+    after "0.8.1"
+    arraybuffer.slice "0.0.6"
+    base64-arraybuffer "0.1.5"
+    blob "0.0.4"
+    has-binary "0.1.6"
+    wtf-8 "1.0.0"
+
+engine.io@1.8.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/engine.io/-/engine.io-1.8.0.tgz#3eeb5f264cb75dbbec1baaea26d61f5a4eace2aa"
+  dependencies:
+    accepts "1.3.3"
+    base64id "0.1.0"
+    cookie "0.3.1"
+    debug "2.3.3"
+    engine.io-parser "1.3.1"
+    ws "1.1.1"
+
+ensure-posix-path@^1.0.0, ensure-posix-path@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/ensure-posix-path/-/ensure-posix-path-1.0.2.tgz#a65b3e42d0b71cfc585eb774f9943c8d9b91b0c2"
+
+entities@1.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/entities/-/entities-1.0.0.tgz#b2987aa3821347fcde642b24fdfc9e4fb712bf26"
+
+entities@~1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/entities/-/entities-1.1.1.tgz#6e5c2d0a5621b5dadaecef80b90edfb5cd7772f0"
+
+errno@^0.1.1:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/errno/-/errno-0.1.4.tgz#b896e23a9e5e8ba33871fc996abd3635fc9a1c7d"
+  dependencies:
+    prr "~0.0.0"
+
+error-ex@^1.2.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.1.tgz#f855a86ce61adc4e8621c3cda21e7a7612c3a8dc"
+  dependencies:
+    is-arrayish "^0.2.1"
+
+es5-ext@^0.10.14, es5-ext@^0.10.9, es5-ext@~0.10.11, es5-ext@~0.10.14, es5-ext@~0.10.2, es5-ext@~0.10.5, es5-ext@~0.10.6:
+  version "0.10.15"
+  resolved "https://registry.yarnpkg.com/es5-ext/-/es5-ext-0.10.15.tgz#c330a5934c1ee21284a7c081a86e5fd937c91ea6"
+  dependencies:
+    es6-iterator "2"
+    es6-symbol "~3.1"
+
+es6-iterator@2:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/es6-iterator/-/es6-iterator-2.0.1.tgz#8e319c9f0453bf575d374940a655920e59ca5512"
+  dependencies:
+    d "1"
+    es5-ext "^0.10.14"
+    es6-symbol "^3.1"
+
+es6-iterator@~0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/es6-iterator/-/es6-iterator-0.1.3.tgz#d6f58b8c4fc413c249b4baa19768f8e4d7c8944e"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.5"
+    es6-symbol "~2.0.1"
+
+es6-symbol@^3.0.2, es6-symbol@^3.1, es6-symbol@~3.1:
+  version "3.1.1"
+  resolved "https://registry.yarnpkg.com/es6-symbol/-/es6-symbol-3.1.1.tgz#bf00ef4fdab6ba1b46ecb7b629b4c7ed5715cc77"
+  dependencies:
+    d "1"
+    es5-ext "~0.10.14"
+
+es6-symbol@~2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/es6-symbol/-/es6-symbol-2.0.1.tgz#761b5c67cfd4f1d18afb234f691d678682cb3bf3"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.5"
+
+es6-weak-map@~0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/es6-weak-map/-/es6-weak-map-0.1.4.tgz#706cef9e99aa236ba7766c239c8b9e286ea7d228"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.6"
+    es6-iterator "~0.1.3"
+    es6-symbol "~2.0.1"
+
+escape-html@~1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988"
+
+escape-string-regexp@^1.0.0, escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.3, escape-string-regexp@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4"
+
+esprima-fb@~12001.1.0-dev-harmony-fb:
+  version "12001.1.0-dev-harmony-fb"
+  resolved "https://registry.yarnpkg.com/esprima-fb/-/esprima-fb-12001.1.0-dev-harmony-fb.tgz#d84400384ba95ce2678c617ad24a7f40808da915"
+
+esprima-fb@~15001.1001.0-dev-harmony-fb:
+  version "15001.1001.0-dev-harmony-fb"
+  resolved "https://registry.yarnpkg.com/esprima-fb/-/esprima-fb-15001.1001.0-dev-harmony-fb.tgz#43beb57ec26e8cf237d3dd8b33e42533577f2659"
+
+esprima@^2.6.0:
+  version "2.7.3"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-2.7.3.tgz#96e3b70d5779f6ad49cd032673d1c312767ba581"
+
+esprima@^3.1.1, esprima@~3.1.0:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-3.1.3.tgz#fdca51cee6133895e3c88d535ce49dbff62a4633"
+
+esprima@~1.0.4:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-1.0.4.tgz#9f557e08fc3b4d26ece9dd34f8fbf476b62585ad"
+
+esutils@^2.0.0:
+  version "2.0.2"
+  resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.2.tgz#0abf4f1caa5bcb1f7a9d8acc6dea4faaa04bac9b"
+
+etag@~1.8.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.0.tgz#6f631aef336d6c46362b51764044ce216be3c051"
+
+event-emitter@~0.3.4:
+  version "0.3.5"
+  resolved "https://registry.yarnpkg.com/event-emitter/-/event-emitter-0.3.5.tgz#df8c69eef1647923c7157b9ce83840610b02cc39"
+  dependencies:
+    d "1"
+    es5-ext "~0.10.14"
+
+eventemitter3@1.x.x:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-1.2.0.tgz#1c86991d816ad1e504750e73874224ecf3bec508"
+
+events-to-array@^1.0.1:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/events-to-array/-/events-to-array-1.1.2.tgz#2d41f563e1fe400ed4962fe1a4d5c6a7539df7f6"
+
+exec-sh@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/exec-sh/-/exec-sh-0.2.0.tgz#14f75de3f20d286ef933099b2ce50a90359cef10"
+  dependencies:
+    merge "^1.1.3"
+
+exists-sync@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/exists-sync/-/exists-sync-0.0.3.tgz#b910000bedbb113b378b82f5f5a7638107622dcf"
+
+exists-sync@0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/exists-sync/-/exists-sync-0.0.4.tgz#9744c2c428cc03b01060db454d4b12f0ef3c8879"
+
+exit-hook@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/exit-hook/-/exit-hook-1.1.1.tgz#f05ca233b48c05d54fff07765df8507e95c02ff8"
+
+exit@0.1.2, exit@0.1.x, exit@^0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/exit/-/exit-0.1.2.tgz#0632638f8d877cc82107d30a0fff1a17cba1cd0c"
+
+expand-brackets@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-0.1.5.tgz#df07284e342a807cd733ac5af72411e581d1177b"
+  dependencies:
+    is-posix-bracket "^0.1.0"
+
+expand-range@^1.8.1:
+  version "1.8.2"
+  resolved "https://registry.yarnpkg.com/expand-range/-/expand-range-1.8.2.tgz#a299effd335fe2721ebae8e257ec79644fc85337"
+  dependencies:
+    fill-range "^2.1.0"
+
+express@^4.10.7, express@^4.12.3:
+  version "4.15.2"
+  resolved "https://registry.yarnpkg.com/express/-/express-4.15.2.tgz#af107fc148504457f2dca9a6f2571d7129b97b35"
+  dependencies:
+    accepts "~1.3.3"
+    array-flatten "1.1.1"
+    content-disposition "0.5.2"
+    content-type "~1.0.2"
+    cookie "0.3.1"
+    cookie-signature "1.0.6"
+    debug "2.6.1"
+    depd "~1.1.0"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    etag "~1.8.0"
+    finalhandler "~1.0.0"
+    fresh "0.5.0"
+    merge-descriptors "1.0.1"
+    methods "~1.1.2"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    path-to-regexp "0.1.7"
+    proxy-addr "~1.1.3"
+    qs "6.4.0"
+    range-parser "~1.2.0"
+    send "0.15.1"
+    serve-static "1.12.1"
+    setprototypeof "1.0.3"
+    statuses "~1.3.1"
+    type-is "~1.6.14"
+    utils-merge "1.0.0"
+    vary "~1.1.0"
+
+extend@~3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.1.tgz#a755ea7bc1adfcc5a31ce7e762dbaadc5e636444"
+
+extglob@^0.3.1:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/extglob/-/extglob-0.3.2.tgz#2e18ff3d2f49ab2765cec9023f011daa8d8349a1"
+  dependencies:
+    is-extglob "^1.0.0"
+
+extsprintf@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.0.2.tgz#e1080e0658e300b06294990cc70e1502235fd550"
+
+fast-ordered-set@^1.0.0, fast-ordered-set@^1.0.2:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/fast-ordered-set/-/fast-ordered-set-1.0.3.tgz#3fbb36634f7be79e4f7edbdb4a357dee25d184eb"
+  dependencies:
+    blank-object "^1.0.1"
+
+fast-sourcemap-concat@^1.0.1:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/fast-sourcemap-concat/-/fast-sourcemap-concat-1.1.0.tgz#a800767abed5eda02e67238ec063a709be61f9d4"
+  dependencies:
+    chalk "^0.5.1"
+    debug "^2.2.0"
+    fs-extra "^0.30.0"
+    memory-streams "^0.1.0"
+    mkdirp "^0.5.0"
+    rsvp "^3.0.14"
+    source-map "^0.4.2"
+    source-map-url "^0.3.0"
+
+faye-websocket@~0.10.0:
+  version "0.10.0"
+  resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.10.0.tgz#4e492f8d04dfb6f89003507f6edbf2d501e7c6f4"
+  dependencies:
+    websocket-driver ">=0.5.1"
+
+fb-watchman@^1.8.0:
+  version "1.9.2"
+  resolved "https://registry.yarnpkg.com/fb-watchman/-/fb-watchman-1.9.2.tgz#a24cf47827f82d38fb59a69ad70b76e3b6ae7383"
+  dependencies:
+    bser "1.0.2"
+
+figures@^1.3.5:
+  version "1.7.0"
+  resolved "https://registry.yarnpkg.com/figures/-/figures-1.7.0.tgz#cbe1e3affcf1cd44b80cadfed28dc793a9701d2e"
+  dependencies:
+    escape-string-regexp "^1.0.5"
+    object-assign "^4.1.0"
+
+filename-regex@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/filename-regex/-/filename-regex-2.0.1.tgz#c1c4b9bee3e09725ddb106b75c1e301fe2f18b26"
+
+fill-range@^2.1.0:
+  version "2.2.3"
+  resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-2.2.3.tgz#50b77dfd7e469bc7492470963699fe7a8485a723"
+  dependencies:
+    is-number "^2.1.0"
+    isobject "^2.0.0"
+    randomatic "^1.1.3"
+    repeat-element "^1.1.2"
+    repeat-string "^1.5.2"
+
+filled-array@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/filled-array/-/filled-array-1.1.0.tgz#c3c4f6c663b923459a9aa29912d2d031f1507f84"
+
+finalhandler@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.0.1.tgz#bcd15d1689c0e5ed729b6f7f541a6df984117db8"
+  dependencies:
+    debug "2.6.3"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    statuses "~1.3.1"
+    unpipe "~1.0.0"
+
+finalhandler@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.0.2.tgz#d0e36f9dbc557f2de14423df6261889e9d60c93a"
+  dependencies:
+    debug "2.6.4"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    statuses "~1.3.1"
+    unpipe "~1.0.0"
+
+find-up@^1.0.0:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/find-up/-/find-up-1.1.2.tgz#6b2e9822b1a2ce0a60ab64d610eccad53cb24d0f"
+  dependencies:
+    path-exists "^2.0.0"
+    pinkie-promise "^2.0.0"
+
+findup-sync@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/findup-sync/-/findup-sync-0.2.1.tgz#e0a90a450075c49466ee513732057514b81e878c"
+  dependencies:
+    glob "~4.3.0"
+
+findup-sync@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/findup-sync/-/findup-sync-0.3.0.tgz#37930aa5d816b777c03445e1966cc6790a4c0b16"
+  dependencies:
+    glob "~5.0.0"
+
+findup@0.1.5, findup@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/findup/-/findup-0.1.5.tgz#8ad929a3393bac627957a7e5de4623b06b0e2ceb"
+  dependencies:
+    colors "~0.6.0-1"
+    commander "~2.1.0"
+
+fireworm@^0.7.0:
+  version "0.7.1"
+  resolved "https://registry.yarnpkg.com/fireworm/-/fireworm-0.7.1.tgz#ccf20f7941f108883fcddb99383dbe6e1861c758"
+  dependencies:
+    async "~0.2.9"
+    is-type "0.0.1"
+    lodash.debounce "^3.1.1"
+    lodash.flatten "^3.0.2"
+    minimatch "^3.0.2"
+
+for-in@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80"
+
+for-own@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/for-own/-/for-own-0.1.5.tgz#5265c681a4f294dabbf17c9509b6763aa84510ce"
+  dependencies:
+    for-in "^1.0.1"
+
+forever-agent@~0.5.0:
+  version "0.5.2"
+  resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.5.2.tgz#6d0e09c4921f94a27f63d3b49c5feff1ea4c5130"
+
+forever-agent@~0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91"
+
+form-data@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-0.2.0.tgz#26f8bc26da6440e299cbdcfb69035c4f77a6e466"
+  dependencies:
+    async "~0.9.0"
+    combined-stream "~0.0.4"
+    mime-types "~2.0.3"
+
+form-data@~1.0.0-rc3:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-1.0.1.tgz#ae315db9a4907fa065502304a66d7733475ee37c"
+  dependencies:
+    async "^2.0.1"
+    combined-stream "^1.0.5"
+    mime-types "^2.1.11"
+
+form-data@~2.1.1:
+  version "2.1.4"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.1.4.tgz#33c183acf193276ecaa98143a69e94bfee1750d1"
+  dependencies:
+    asynckit "^0.4.0"
+    combined-stream "^1.0.5"
+    mime-types "^2.1.12"
+
+forwarded@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.1.0.tgz#19ef9874c4ae1c297bcf078fde63a09b66a84363"
+
+fresh@0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.0.tgz#f474ca5e6a9246d6fd8e0953cfa9b9c805afa78e"
+
+fs-extra@0.26.2:
+  version "0.26.2"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.26.2.tgz#71b7697e539db037acf41e6e7923e94d605bf498"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    klaw "^1.0.0"
+    path-is-absolute "^1.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@^0.16.3:
+  version "0.16.5"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.16.5.tgz#1ad661fa6c86c9608cd1b49efc6fce834939a750"
+  dependencies:
+    graceful-fs "^3.0.5"
+    jsonfile "^2.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@^0.24.0:
+  version "0.24.0"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.24.0.tgz#d4e4342a96675cb7846633a6099249332b539952"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    path-is-absolute "^1.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@^0.30.0:
+  version "0.30.0"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.30.0.tgz#f233ffcc08d4da7d432daa449776989db1df93f0"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    klaw "^1.0.0"
+    path-is-absolute "^1.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@^2.0.0:
+  version "2.1.2"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-2.1.2.tgz#046c70163cef9aad46b0e4a7fa467fb22d71de35"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+
+fs-monitor-stack@^1.0.2:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/fs-monitor-stack/-/fs-monitor-stack-1.1.1.tgz#c4038d5977939b6b4e38396d7e7cd0895a7ac6b3"
+
+fs-readdir-recursive@^0.1.0:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/fs-readdir-recursive/-/fs-readdir-recursive-0.1.2.tgz#315b4fb8c1ca5b8c47defef319d073dad3568059"
+
+fs-tree-diff@^0.4.4:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/fs-tree-diff/-/fs-tree-diff-0.4.4.tgz#f6b75d70db22c1f3b05d592270f4ed6c9c2f82dd"
+  dependencies:
+    debug "^2.2.0"
+    fast-ordered-set "^1.0.2"
+
+fs-tree-diff@^0.5.2, fs-tree-diff@^0.5.3, fs-tree-diff@^0.5.4:
+  version "0.5.6"
+  resolved "https://registry.yarnpkg.com/fs-tree-diff/-/fs-tree-diff-0.5.6.tgz#342665749e8dca406800b672268c8f5073f3e623"
+  dependencies:
+    heimdalljs-logger "^0.1.7"
+    object-assign "^4.1.0"
+    path-posix "^1.0.0"
+    symlink-or-copy "^1.1.8"
+
+fs-vacuum@~1.2.7:
+  version "1.2.10"
+  resolved "https://registry.yarnpkg.com/fs-vacuum/-/fs-vacuum-1.2.10.tgz#b7629bec07a4031a2548fdf99f5ecf1cc8b31e36"
+  dependencies:
+    graceful-fs "^4.1.2"
+    path-is-inside "^1.0.1"
+    rimraf "^2.5.2"
+
+fs-write-stream-atomic@1.0.5, fs-write-stream-atomic@~1.0.4:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.5.tgz#862a4dabdffcafabfc16499458e37310c39925f6"
+  dependencies:
+    graceful-fs "^4.1.2"
+    imurmurhash "^0.1.4"
+
+fs.realpath@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f"
+
+fstream-ignore@^1.0.0, fstream-ignore@^1.0.2:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/fstream-ignore/-/fstream-ignore-1.0.5.tgz#9c31dae34767018fe1d249b24dada67d092da105"
+  dependencies:
+    fstream "^1.0.0"
+    inherits "2"
+    minimatch "^3.0.0"
+
+fstream-npm@~1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/fstream-npm/-/fstream-npm-1.0.7.tgz#7ed0d1ac13d7686dd9e1bf6ceb8be273bf6d2f86"
+  dependencies:
+    fstream-ignore "^1.0.0"
+    inherits "2"
+
+fstream@^1.0.0, fstream@^1.0.2, fstream@^1.0.3, fstream@~1.0.8:
+  version "1.0.11"
+  resolved "https://registry.yarnpkg.com/fstream/-/fstream-1.0.11.tgz#5c1fb1f117477114f0632a0eb4b71b3cb0fd3171"
+  dependencies:
+    graceful-fs "^4.1.2"
+    inherits "~2.0.0"
+    mkdirp ">=0.5 0"
+    rimraf "2"
+
+gauge@~1.2.0, gauge@~1.2.5:
+  version "1.2.7"
+  resolved "https://registry.yarnpkg.com/gauge/-/gauge-1.2.7.tgz#e9cec5483d3d4ee0ef44b60a7d99e4935e136d93"
+  dependencies:
+    ansi "^0.3.0"
+    has-unicode "^2.0.0"
+    lodash.pad "^4.1.0"
+    lodash.padend "^4.1.0"
+    lodash.padstart "^4.1.0"
+
+gauge@~2.7.1:
+  version "2.7.4"
+  resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.4.tgz#2c03405c7538c39d7eb37b317022e325fb018bf7"
+  dependencies:
+    aproba "^1.0.3"
+    console-control-strings "^1.0.0"
+    has-unicode "^2.0.0"
+    object-assign "^4.1.0"
+    signal-exit "^3.0.0"
+    string-width "^1.0.1"
+    strip-ansi "^3.0.1"
+    wide-align "^1.1.0"
+
+gaze@^1.0.0:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/gaze/-/gaze-1.1.2.tgz#847224677adb8870d679257ed3388fdb61e40105"
+  dependencies:
+    globule "^1.0.0"
+
+generate-function@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/generate-function/-/generate-function-2.0.0.tgz#6858fe7c0969b7d4e9093337647ac79f60dfbe74"
+
+generate-object-property@^1.1.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/generate-object-property/-/generate-object-property-1.2.0.tgz#9c0e1c40308ce804f4783618b937fa88f99d50d0"
+  dependencies:
+    is-property "^1.0.0"
+
+get-caller-file@^1.0.0, get-caller-file@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-1.0.2.tgz#f702e63127e7e231c160a80c1554acb70d5047e5"
+
+get-stdin@^4.0.1:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-4.0.1.tgz#b968c6b0a04384324902e8bf1a5df32579a450fe"
+
+getpass@^0.1.1:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa"
+  dependencies:
+    assert-plus "^1.0.0"
+
+git-repo-info@^1.0.4:
+  version "1.4.1"
+  resolved "https://registry.yarnpkg.com/git-repo-info/-/git-repo-info-1.4.1.tgz#2a072823254aaf62fcf0766007d7b6651bd41943"
+
+git-repo-version@0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/git-repo-version/-/git-repo-version-0.3.0.tgz#c9b97d0d21c4357d669dc1269c2b6a75da6cc0e9"
+  dependencies:
+    git-repo-info "^1.0.4"
+
+git-tools@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/git-tools/-/git-tools-0.1.4.tgz#5e43e59443b8a5dedb39dba663da49e79f943978"
+  dependencies:
+    spawnback "~1.0.0"
+
+github-url-from-git@~1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/github-url-from-git/-/github-url-from-git-1.4.0.tgz#285e6b520819001bde128674704379e4ff03e0de"
+
+github-url-from-username-repo@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/github-url-from-username-repo/-/github-url-from-username-repo-1.0.2.tgz#7dd79330d2abe69c10c2cef79714c97215791dfa"
+
+github@^0.2.3:
+  version "0.2.4"
+  resolved "https://registry.yarnpkg.com/github/-/github-0.2.4.tgz#24fa7f0e13fa11b946af91134c51982a91ce538b"
+  dependencies:
+    mime "^1.2.11"
+
+glob-base@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/glob-base/-/glob-base-0.3.0.tgz#dbb164f6221b1c0b1ccf82aea328b497df0ea3c4"
+  dependencies:
+    glob-parent "^2.0.0"
+    is-glob "^2.0.0"
+
+glob-parent@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-2.0.0.tgz#81383d72db054fcccf5336daa902f182f6edbb28"
+  dependencies:
+    is-glob "^2.0.0"
+
+"glob@3 || 4", glob@^4.3.2:
+  version "4.5.3"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-4.5.3.tgz#c6cb73d3226c1efef04de3c56d012f03377ee15f"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^2.0.1"
+    once "^1.3.0"
+
+glob@5.0.13, glob@^5.0.10:
+  version "5.0.13"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.13.tgz#0b6ffc3ac64eb90669f723a00a0ebb7281b33f8f"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^2.0.1"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@^5.0.15, glob@~5.0.0, glob@~5.0.15:
+  version "5.0.15"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.15.tgz#1bc936b9e02f4a603fcc222ecf7633d30b8b93b1"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "2 || 3"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@^6.0.1:
+  version "6.0.4"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-6.0.4.tgz#0f08860f6a155127b2fadd4f9ce24b1aab6e4d22"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "2 || 3"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@^7.0.0, glob@^7.0.3, glob@^7.0.4, glob@^7.0.5, glob@^7.1.1, glob@~7.1.1:
+  version "7.1.1"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.1.tgz#805211df04faaf1c63a3600306cdf5ade50b2ec8"
+  dependencies:
+    fs.realpath "^1.0.0"
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^3.0.2"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@~4.3.0:
+  version "4.3.5"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-4.3.5.tgz#80fbb08ca540f238acce5d11d1e9bc41e75173d3"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^2.0.1"
+    once "^1.3.0"
+
+globals@^6.4.0:
+  version "6.4.1"
+  resolved "https://registry.yarnpkg.com/globals/-/globals-6.4.1.tgz#8498032b3b6d1cc81eebc5f79690d8fe29fabf4f"
+
+globule@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/globule/-/globule-1.1.0.tgz#c49352e4dc183d85893ee825385eb994bb6df45f"
+  dependencies:
+    glob "~7.1.1"
+    lodash "~4.16.4"
+    minimatch "~3.0.2"
+
+good-listener@^1.2.0:
+  version "1.2.2"
+  resolved "https://registry.yarnpkg.com/good-listener/-/good-listener-1.2.2.tgz#d53b30cdf9313dffb7dc9a0d477096aa6d145c50"
+  dependencies:
+    delegate "^3.1.2"
+
+got@^5.0.0:
+  version "5.7.1"
+  resolved "https://registry.yarnpkg.com/got/-/got-5.7.1.tgz#5f81635a61e4a6589f180569ea4e381680a51f35"
+  dependencies:
+    create-error-class "^3.0.1"
+    duplexer2 "^0.1.4"
+    is-redirect "^1.0.0"
+    is-retry-allowed "^1.0.0"
+    is-stream "^1.0.0"
+    lowercase-keys "^1.0.0"
+    node-status-codes "^1.0.0"
+    object-assign "^4.0.1"
+    parse-json "^2.1.0"
+    pinkie-promise "^2.0.0"
+    read-all-stream "^3.0.0"
+    readable-stream "^2.0.5"
+    timed-out "^3.0.0"
+    unzip-response "^1.0.2"
+    url-parse-lax "^1.0.0"
+
+graceful-fs@^3.0.0, graceful-fs@^3.0.1, graceful-fs@^3.0.5:
+  version "3.0.11"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-3.0.11.tgz#7613c778a1afea62f25c630a086d7f3acbbdd818"
+  dependencies:
+    natives "^1.1.0"
+
+graceful-fs@^4.0.0, graceful-fs@^4.1.11, graceful-fs@^4.1.2, graceful-fs@^4.1.3, graceful-fs@^4.1.4, graceful-fs@^4.1.6, graceful-fs@^4.1.9, graceful-fs@~4.1.2:
+  version "4.1.11"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.11.tgz#0e8bdfe4d1ddb8854d64e04ea7c00e2a026e5658"
+
+graceful-fs@~2.0.0:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-2.0.3.tgz#7cd2cdb228a4a3f36e95efa6cc142de7d1a136d0"
+
+"graceful-readlink@>= 1.0.0":
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/graceful-readlink/-/graceful-readlink-1.0.1.tgz#4cafad76bc62f02fa039b2f94e9a3dd3a391a725"
+
+growly@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/growly/-/growly-1.3.0.tgz#f10748cbe76af964b7c96c93c6bcc28af120c081"
+
+handlebars@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-2.0.0.tgz#6e9d7f8514a3467fa5e9f82cc158ecfc1d5ac76f"
+  dependencies:
+    optimist "~0.3"
+  optionalDependencies:
+    uglify-js "~2.3"
+
+handlebars@^4.0.4:
+  version "4.0.6"
+  resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-4.0.6.tgz#2ce4484850537f9c97a8026d5399b935c4ed4ed7"
+  dependencies:
+    async "^1.4.0"
+    optimist "^0.6.1"
+    source-map "^0.4.4"
+  optionalDependencies:
+    uglify-js "^2.6"
+
+har-schema@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-1.0.5.tgz#d263135f43307c02c602afc8fe95970c0151369e"
+
+har-validator@~2.0.2:
+  version "2.0.6"
+  resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-2.0.6.tgz#cdcbc08188265ad119b6a5a7c8ab70eecfb5d27d"
+  dependencies:
+    chalk "^1.1.1"
+    commander "^2.9.0"
+    is-my-json-valid "^2.12.4"
+    pinkie-promise "^2.0.0"
+
+har-validator@~4.2.1:
+  version "4.2.1"
+  resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-4.2.1.tgz#33481d0f1bbff600dd203d75812a6a5fba002e2a"
+  dependencies:
+    ajv "^4.9.1"
+    har-schema "^1.0.5"
+
+has-ansi@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-0.1.0.tgz#84f265aae8c0e6a88a12d7022894b7568894c62e"
+  dependencies:
+    ansi-regex "^0.2.0"
+
+has-ansi@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91"
+  dependencies:
+    ansi-regex "^2.0.0"
+
+has-binary@0.1.6:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/has-binary/-/has-binary-0.1.6.tgz#25326f39cfa4f616ad8787894e3af2cfbc7b6e10"
+  dependencies:
+    isarray "0.0.1"
+
+has-binary@0.1.7:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/has-binary/-/has-binary-0.1.7.tgz#68e61eb16210c9545a0a5cce06a873912fe1e68c"
+  dependencies:
+    isarray "0.0.1"
+
+has-color@~0.1.0:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/has-color/-/has-color-0.1.7.tgz#67144a5260c34fc3cca677d041daf52fe7b78b2f"
+
+has-cors@1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/has-cors/-/has-cors-1.1.0.tgz#5e474793f7ea9843d1bb99c23eef49ff126fff39"
+
+has-unicode@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9"
+
+hash-for-dep@^1.0.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/hash-for-dep/-/hash-for-dep-1.1.2.tgz#e3347ed92960eb0bb53a2c6c2b70e36d75b7cd0c"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    heimdalljs "^0.2.3"
+    heimdalljs-logger "^0.1.7"
+    resolve "^1.1.6"
+
+hawk@~2.3.0:
+  version "2.3.1"
+  resolved "https://registry.yarnpkg.com/hawk/-/hawk-2.3.1.tgz#1e731ce39447fa1d0f6d707f7bceebec0fd1ec1f"
+  dependencies:
+    boom "2.x.x"
+    cryptiles "2.x.x"
+    hoek "2.x.x"
+    sntp "1.x.x"
+
+hawk@~3.1.0, hawk@~3.1.3:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/hawk/-/hawk-3.1.3.tgz#078444bd7c1640b0fe540d2c9b73d59678e8e1c4"
+  dependencies:
+    boom "2.x.x"
+    cryptiles "2.x.x"
+    hoek "2.x.x"
+    sntp "1.x.x"
+
+heimdalljs-logger@^0.1.7:
+  version "0.1.9"
+  resolved "https://registry.yarnpkg.com/heimdalljs-logger/-/heimdalljs-logger-0.1.9.tgz#d76ada4e45b7bb6f786fc9c010a68eb2e2faf176"
+  dependencies:
+    debug "^2.2.0"
+    heimdalljs "^0.2.0"
+
+heimdalljs@^0.2.0, heimdalljs@^0.2.1, heimdalljs@^0.2.3:
+  version "0.2.4"
+  resolved "https://registry.yarnpkg.com/heimdalljs/-/heimdalljs-0.2.4.tgz#34ead16eab422c94803065d33abeba1f7b24a910"
+  dependencies:
+    rsvp "~3.2.1"
+
+hoek@2.x.x:
+  version "2.16.3"
+  resolved "https://registry.yarnpkg.com/hoek/-/hoek-2.16.3.tgz#20bb7403d3cea398e91dc4710a8ff1b8274a25ed"
+
+home-or-tmp@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/home-or-tmp/-/home-or-tmp-1.0.0.tgz#4b9f1e40800c3e50c6c27f781676afcce71f3985"
+  dependencies:
+    os-tmpdir "^1.0.1"
+    user-home "^1.1.1"
+
+hosted-git-info@^2.1.4, hosted-git-info@^2.1.5, hosted-git-info@~2.1.4:
+  version "2.1.5"
+  resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.1.5.tgz#0ba81d90da2e25ab34a332e6ec77936e1598118b"
+
+htmlparser2@3.8.x:
+  version "3.8.3"
+  resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-3.8.3.tgz#996c28b191516a8be86501a7d79757e5c70c1068"
+  dependencies:
+    domelementtype "1"
+    domhandler "2.3"
+    domutils "1.5"
+    entities "1.0"
+    readable-stream "1.1"
+
+http-errors@~1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.3.1.tgz#197e22cdebd4198585e8694ef6786197b91ed942"
+  dependencies:
+    inherits "~2.0.1"
+    statuses "1"
+
+http-errors@~1.6.1:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.6.1.tgz#5f8b8ed98aca545656bf572997387f904a722257"
+  dependencies:
+    depd "1.1.0"
+    inherits "2.0.3"
+    setprototypeof "1.0.3"
+    statuses ">= 1.3.1 < 2"
+
+http-proxy@^1.13.1, http-proxy@^1.9.0:
+  version "1.16.2"
+  resolved "https://registry.yarnpkg.com/http-proxy/-/http-proxy-1.16.2.tgz#06dff292952bf64dbe8471fa9df73066d4f37742"
+  dependencies:
+    eventemitter3 "1.x.x"
+    requires-port "1.x.x"
+
+http-signature@~0.10.0:
+  version "0.10.1"
+  resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-0.10.1.tgz#4fbdac132559aa8323121e540779c0a012b27e66"
+  dependencies:
+    asn1 "0.1.11"
+    assert-plus "^0.1.5"
+    ctype "0.5.3"
+
+http-signature@~0.11.0:
+  version "0.11.0"
+  resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-0.11.0.tgz#1796cf67a001ad5cd6849dca0991485f09089fe6"
+  dependencies:
+    asn1 "0.1.11"
+    assert-plus "^0.1.5"
+    ctype "0.5.3"
+
+http-signature@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.1.1.tgz#df72e267066cd0ac67fb76adf8e134a8fbcf91bf"
+  dependencies:
+    assert-plus "^0.2.0"
+    jsprim "^1.2.2"
+    sshpk "^1.7.0"
+
+iconv-lite@0.4.13, iconv-lite@^0.4.5:
+  version "0.4.13"
+  resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.13.tgz#1f88aba4ab0b1508e8312acc39345f36e992e2f2"
+
+image-size@~0.5.0:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/image-size/-/image-size-0.5.1.tgz#28eea8548a4b1443480ddddc1e083ae54652439f"
+
+imurmurhash@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea"
+
+in-publish@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/in-publish/-/in-publish-2.0.0.tgz#e20ff5e3a2afc2690320b6dc552682a9c7fadf51"
+
+include-path-searcher@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/include-path-searcher/-/include-path-searcher-0.1.0.tgz#c0cf2ddfa164fb2eae07bc7ca43a7f191cb4d7bd"
+
+indent-string@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-2.1.0.tgz#8e2d48348742121b4a8218b7a137e9a52049dc80"
+  dependencies:
+    repeating "^2.0.0"
+
+indexof@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/indexof/-/indexof-0.0.1.tgz#82dc336d232b9062179d05ab3293a66059fd435d"
+
+inflection@^1.7.0, inflection@^1.8.0:
+  version "1.12.0"
+  resolved "https://registry.yarnpkg.com/inflection/-/inflection-1.12.0.tgz#a200935656d6f5f6bc4dc7502e1aecb703228416"
+
+inflight@^1.0.4, inflight@~1.0.4:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9"
+  dependencies:
+    once "^1.3.0"
+    wrappy "1"
+
+inherits@2, inherits@2.0.3, inherits@^2.0.1, inherits@^2.0.3, inherits@~2.0.0, inherits@~2.0.1:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de"
+
+ini@^1.3.4, ini@~1.3.0, ini@~1.3.4:
+  version "1.3.4"
+  resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.4.tgz#0537cb79daf59b59a1a517dff706c86ec039162e"
+
+init-package-json@~1.9.1:
+  version "1.9.6"
+  resolved "https://registry.yarnpkg.com/init-package-json/-/init-package-json-1.9.6.tgz#789fc2b74466a4952b9ea77c0575bc78ebd60a61"
+  dependencies:
+    glob "^7.1.1"
+    npm-package-arg "^4.0.0 || ^5.0.0"
+    promzard "^0.3.0"
+    read "~1.0.1"
+    read-package-json "1 || 2"
+    semver "2.x || 3.x || 4 || 5"
+    validate-npm-package-license "^3.0.1"
+    validate-npm-package-name "^3.0.0"
+
+inquirer@0.10.0, inquirer@^0.10.0:
+  version "0.10.0"
+  resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-0.10.0.tgz#48cd3e23f8d989a52d47dc5e10ec75324387e908"
+  dependencies:
+    ansi-escapes "^1.1.0"
+    ansi-regex "^2.0.0"
+    chalk "^1.0.0"
+    cli-cursor "^1.0.1"
+    cli-width "^1.0.1"
+    figures "^1.3.5"
+    lodash "^3.3.1"
+    readline2 "^1.0.1"
+    run-async "^0.1.0"
+    rx-lite "^3.1.2"
+    strip-ansi "^3.0.0"
+    through "^2.3.6"
+
+inquirer@0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-0.5.1.tgz#e9f2cd1ee172c7a32e054b78a03d4ddb0d7707f1"
+  dependencies:
+    async "~0.8.0"
+    chalk "~0.4.0"
+    cli-color "~0.3.2"
+    lodash "~2.4.1"
+    mute-stream "0.0.4"
+    readline2 "~0.1.0"
+    through "~2.3.4"
+
+insight@^0.7.0:
+  version "0.7.0"
+  resolved "https://registry.yarnpkg.com/insight/-/insight-0.7.0.tgz#061f9189835bd38a97a60c2b76ea0c6b30099ff6"
+  dependencies:
+    async "^1.4.2"
+    chalk "^1.0.0"
+    configstore "^1.0.0"
+    inquirer "^0.10.0"
+    lodash.debounce "^3.0.1"
+    object-assign "^4.0.1"
+    os-name "^1.0.0"
+    request "^2.40.0"
+    tough-cookie "^2.0.0"
+
+intersect@~0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/intersect/-/intersect-0.0.3.tgz#c1a4a5e5eac6ede4af7504cc07e0ada7bc9f4920"
+
+invert-kv@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/invert-kv/-/invert-kv-1.0.0.tgz#104a8e4aaca6d3d8cd157a8ef8bfab2d7a3ffdb6"
+
+ipaddr.js@1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.3.0.tgz#1e03a52fdad83a8bbb2b25cbf4998b4cffcd3dec"
+
+is-arrayish@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d"
+
+is-buffer@^1.1.5:
+  version "1.1.5"
+  resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.5.tgz#1f3b26ef613b214b88cbca23cc6c01d87961eecc"
+
+is-builtin-module@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-builtin-module/-/is-builtin-module-1.0.0.tgz#540572d34f7ac3119f8f76c30cbc1b1e037affbe"
+  dependencies:
+    builtin-modules "^1.0.0"
+
+is-dotfile@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-dotfile/-/is-dotfile-1.0.2.tgz#2c132383f39199f8edc268ca01b9b007d205cc4d"
+
+is-equal-shallow@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/is-equal-shallow/-/is-equal-shallow-0.1.3.tgz#2238098fc221de0bcfa5d9eac4c45d638aa1c534"
+  dependencies:
+    is-primitive "^2.0.0"
+
+is-extendable@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89"
+
+is-extglob@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-1.0.0.tgz#ac468177c4943405a092fc8f29760c6ffc6206c0"
+
+is-finite@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-finite/-/is-finite-1.0.2.tgz#cc6677695602be550ef11e8b4aa6305342b6d0aa"
+  dependencies:
+    number-is-nan "^1.0.0"
+
+is-fullwidth-code-point@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb"
+  dependencies:
+    number-is-nan "^1.0.0"
+
+is-git-url@^0.2.0:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/is-git-url/-/is-git-url-0.2.3.tgz#445200d6fbd6da028fb5e01440d9afc93f3ccb64"
+
+is-glob@^2.0.0, is-glob@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-2.0.1.tgz#d096f926a3ded5600f3fdfd91198cb0888c2d863"
+  dependencies:
+    is-extglob "^1.0.0"
+
+is-integer@^1.0.4:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/is-integer/-/is-integer-1.0.6.tgz#5273819fada880d123e1ac00a938e7172dd8d95e"
+  dependencies:
+    is-finite "^1.0.0"
+
+is-my-json-valid@^2.12.4:
+  version "2.16.0"
+  resolved "https://registry.yarnpkg.com/is-my-json-valid/-/is-my-json-valid-2.16.0.tgz#f079dd9bfdae65ee2038aae8acbc86ab109e3693"
+  dependencies:
+    generate-function "^2.0.0"
+    generate-object-property "^1.1.0"
+    jsonpointer "^4.0.0"
+    xtend "^4.0.0"
+
+is-npm@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-npm/-/is-npm-1.0.0.tgz#f2fb63a65e4905b406c86072765a1a4dc793b9f4"
+
+is-number@^2.0.2, is-number@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/is-number/-/is-number-2.1.0.tgz#01fcbbb393463a548f2f466cce16dece49db908f"
+  dependencies:
+    kind-of "^3.0.2"
+
+is-obj@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/is-obj/-/is-obj-1.0.1.tgz#3e4729ac1f5fde025cd7d83a896dab9f4f67db0f"
+
+is-posix-bracket@^0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/is-posix-bracket/-/is-posix-bracket-0.1.1.tgz#3334dc79774368e92f016e6fbc0a88f5cd6e6bc4"
+
+is-primitive@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/is-primitive/-/is-primitive-2.0.0.tgz#207bab91638499c07b2adf240a41a87210034575"
+
+is-property@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-property/-/is-property-1.0.2.tgz#57fe1c4e48474edd65b09911f26b1cd4095dda84"
+
+is-redirect@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-redirect/-/is-redirect-1.0.0.tgz#1d03dded53bd8db0f30c26e4f95d36fc7c87dc24"
+
+is-retry-allowed@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/is-retry-allowed/-/is-retry-allowed-1.1.0.tgz#11a060568b67339444033d0125a61a20d564fb34"
+
+is-root@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-root/-/is-root-1.0.0.tgz#07b6c233bc394cd9d02ba15c966bd6660d6342d5"
+
+is-stream@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-1.1.0.tgz#12d4a3dd4e68e0b79ceb8dbc84173ae80d91ca44"
+
+is-type@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/is-type/-/is-type-0.0.1.tgz#f651d85c365d44955d14a51d8d7061f3f6b4779c"
+  dependencies:
+    core-util-is "~1.0.0"
+
+is-typedarray@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a"
+
+is-utf8@^0.2.0:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/is-utf8/-/is-utf8-0.2.1.tgz#4b0da1442104d1b336340e80797e865cf39f7d72"
+
+isarray@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf"
+
+isarray@1.0.0, isarray@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11"
+
+isbinaryfile@^2.0.3:
+  version "2.0.4"
+  resolved "https://registry.yarnpkg.com/isbinaryfile/-/isbinaryfile-2.0.4.tgz#d23592e6a6f093efb84c2e6152056be294e414a1"
+
+isexe@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10"
+
+isobject@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/isobject/-/isobject-2.1.0.tgz#f065561096a3f1da2ef46272f815c840d87e0c89"
+  dependencies:
+    isarray "1.0.0"
+
+isstream@~0.1.1, isstream@~0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a"
+
+istextorbinary@2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/istextorbinary/-/istextorbinary-2.1.0.tgz#dbed2a6f51be2f7475b68f89465811141b758874"
+  dependencies:
+    binaryextensions "1 || 2"
+    editions "^1.1.1"
+    textextensions "1 || 2"
+
+jju@^1.1.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/jju/-/jju-1.3.0.tgz#dadd9ef01924bc728b03f2f7979bdbd62f7a2aaa"
+
+jodid25519@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/jodid25519/-/jodid25519-1.0.2.tgz#06d4912255093419477d425633606e0e90782967"
+  dependencies:
+    jsbn "~0.1.0"
+
+js-base64@^2.1.8:
+  version "2.1.9"
+  resolved "https://registry.yarnpkg.com/js-base64/-/js-base64-2.1.9.tgz#f0e80ae039a4bd654b5f281fc93f04a914a7fcce"
+
+js-tokens@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-1.0.1.tgz#cc435a5c8b94ad15acb7983140fc80182c89aeae"
+
+js-yaml@^3.1.0, js-yaml@^3.2.5, js-yaml@^3.2.7:
+  version "3.8.3"
+  resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.8.3.tgz#33a05ec481c850c8875929166fe1beb61c728766"
+  dependencies:
+    argparse "^1.0.7"
+    esprima "^3.1.1"
+
+jsbn@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513"
+
+jsesc@~0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-0.5.0.tgz#e7dee66e35d6fc16f710fe91d5cf69f70f08911d"
+
+jshint@^2.7.0:
+  version "2.9.4"
+  resolved "https://registry.yarnpkg.com/jshint/-/jshint-2.9.4.tgz#5e3ba97848d5290273db514aee47fe24cf592934"
+  dependencies:
+    cli "~1.0.0"
+    console-browserify "1.1.x"
+    exit "0.1.x"
+    htmlparser2 "3.8.x"
+    lodash "3.7.x"
+    minimatch "~3.0.2"
+    shelljs "0.3.x"
+    strip-json-comments "1.0.x"
+
+json-parse-helpfulerror@^1.0.2:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/json-parse-helpfulerror/-/json-parse-helpfulerror-1.0.3.tgz#13f14ce02eed4e981297b64eb9e3b932e2dd13dc"
+  dependencies:
+    jju "^1.1.0"
+
+json-schema@0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.2.3.tgz#b480c892e59a2f05954ce727bd3f2a4e882f9e13"
+
+json-stable-stringify@^1.0.0, json-stable-stringify@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/json-stable-stringify/-/json-stable-stringify-1.0.1.tgz#9a759d39c5f2ff503fd5300646ed445f88c4f9af"
+  dependencies:
+    jsonify "~0.0.0"
+
+json-stringify-safe@~5.0.0, json-stringify-safe@~5.0.1:
+  version "5.0.1"
+  resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb"
+
+json3@3.3.2:
+  version "3.3.2"
+  resolved "https://registry.yarnpkg.com/json3/-/json3-3.3.2.tgz#3c0434743df93e2f5c42aee7b19bcb483575f4e1"
+
+json5@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/json5/-/json5-0.4.0.tgz#054352e4c4c80c86c0923877d449de176a732c8d"
+
+jsonfile@^2.0.0, jsonfile@^2.1.0:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-2.4.0.tgz#3736a2b428b87bbda0cc83b53fa3d633a35c2ae8"
+  optionalDependencies:
+    graceful-fs "^4.1.6"
+
+jsonify@~0.0.0:
+  version "0.0.0"
+  resolved "https://registry.yarnpkg.com/jsonify/-/jsonify-0.0.0.tgz#2c74b6ee41d93ca51b7b5aaee8f503631d252a73"
+
+jsonpointer@^4.0.0:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/jsonpointer/-/jsonpointer-4.0.1.tgz#4fd92cb34e0e9db3c89c8622ecf51f9b978c6cb9"
+
+jsprim@^1.2.2:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.0.tgz#a3b87e40298d8c380552d8cc7628a0bb95a22918"
+  dependencies:
+    assert-plus "1.0.0"
+    extsprintf "1.0.2"
+    json-schema "0.2.3"
+    verror "1.3.6"
+
+junk@^1.0.0:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/junk/-/junk-1.0.3.tgz#87be63488649cbdca6f53ab39bec9ccd2347f592"
+
+kind-of@^3.0.2:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.2.0.tgz#b58abe4d5c044ad33726a8c1525b48cf891bff07"
+  dependencies:
+    is-buffer "^1.1.5"
+
+klassy@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/klassy/-/klassy-0.1.3.tgz#c31d5756d583197d75f582b6e692872be497067f"
+
+klaw@^1.0.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/klaw/-/klaw-1.3.1.tgz#4088433b46b3b1ba259d78785d8e96f73ba02439"
+  optionalDependencies:
+    graceful-fs "^4.1.9"
+
+latest-version@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/latest-version/-/latest-version-2.0.0.tgz#56f8d6139620847b8017f8f1f4d78e211324168b"
+  dependencies:
+    package-json "^2.0.0"
+
+layout-bin-packer@^1.2.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/layout-bin-packer/-/layout-bin-packer-1.3.0.tgz#6f232f67db7606b2a405f39ae7197f2931a26c0c"
+  dependencies:
+    ember-cli-babel "^5.2.4"
+
+lazy-cache@^1.0.3:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/lazy-cache/-/lazy-cache-1.0.4.tgz#a1d78fc3a50474cb80845d3b3b6e1da49a446e8e"
+
+lcid@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/lcid/-/lcid-1.0.0.tgz#308accafa0bc483a3867b4b6f2b9506251d1b835"
+  dependencies:
+    invert-kv "^1.0.0"
+
+leek@0.0.21:
+  version "0.0.21"
+  resolved "https://registry.yarnpkg.com/leek/-/leek-0.0.21.tgz#09804bf70f8aefbba745f5d56d2a4debf22711ff"
+  dependencies:
+    debug "^2.1.0"
+    lodash.assign "^3.2.0"
+    request "^2.27.0"
+    rsvp "^3.0.21"
+
+less@^2.5.0:
+  version "2.7.2"
+  resolved "https://registry.yarnpkg.com/less/-/less-2.7.2.tgz#368d6cc73e1fb03981183280918743c5dcf9b3df"
+  optionalDependencies:
+    errno "^0.1.1"
+    graceful-fs "^4.1.2"
+    image-size "~0.5.0"
+    mime "^1.2.11"
+    mkdirp "^0.5.0"
+    promise "^7.1.1"
+    request "^2.72.0"
+    source-map "^0.5.3"
+
+leven@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/leven/-/leven-1.0.2.tgz#9144b6eebca5f1d0680169f1a6770dcea60b75c3"
+
+linkify-it@~1.2.0:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/linkify-it/-/linkify-it-1.2.4.tgz#0773526c317c8fd13bd534ee1d180ff88abf881a"
+  dependencies:
+    uc.micro "^1.0.1"
+
+livereload-js@^2.2.0:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/livereload-js/-/livereload-js-2.2.2.tgz#6c87257e648ab475bc24ea257457edcc1f8d0bc2"
+
+load-json-file@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/load-json-file/-/load-json-file-1.1.0.tgz#956905708d58b4bab4c2261b04f59f31c99374c0"
+  dependencies:
+    graceful-fs "^4.1.2"
+    parse-json "^2.2.0"
+    pify "^2.0.0"
+    pinkie-promise "^2.0.0"
+    strip-bom "^2.0.0"
+
+lockfile@^1.0.0, lockfile@~1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/lockfile/-/lockfile-1.0.3.tgz#2638fc39a0331e9cac1a04b71799931c9c50df79"
+
+lodash-node@^2.4.1:
+  version "2.4.1"
+  resolved "https://registry.yarnpkg.com/lodash-node/-/lodash-node-2.4.1.tgz#ea82f7b100c733d1a42af76801e506105e2a80ec"
+
+lodash._arraycopy@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/lodash._arraycopy/-/lodash._arraycopy-3.0.0.tgz#76e7b7c1f1fb92547374878a562ed06a3e50f6e1"
+
+lodash._arrayeach@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/lodash._arrayeach/-/lodash._arrayeach-3.0.0.tgz#bab156b2a90d3f1bbd5c653403349e5e5933ef9e"
+
+lodash._baseassign@^3.0.0:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/lodash._baseassign/-/lodash._baseassign-3.2.0.tgz#8c38a099500f215ad09e59f1722fd0c52bfe0a4e"
+  dependencies:
+    lodash._basecopy "^3.0.0"
+    lodash.keys "^3.0.0"
+
+lodash._basecopy@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/lodash._basecopy/-/lodash._basecopy-3.0.1.tgz#8da0e6a876cf344c0ad8a54882111dd3c5c7ca36"
+
+lodash._baseflatten@^3.0.0:
+  version "3.1.4"
+  resolved "https://registry.yarnpkg.com/lodash._baseflatten/-/lodash._baseflatten-3.1.4.tgz#0770ff80131af6e34f3b511796a7ba5214e65ff7"
+  dependencies:
+    lodash.isarguments "^3.0.0"
+    lodash.isarray "^3.0.0"
+
+lodash._basefor@^3.0.0:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/lodash._basefor/-/lodash._basefor-3.0.3.tgz#7550b4e9218ef09fad24343b612021c79b4c20c2"
+
+lodash._bindcallback@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/lodash._bindcallback/-/lodash._bindcallback-3.0.1.tgz#e531c27644cf8b57a99e17ed95b35c748789392e"
+
+lodash._createassigner@^3.0.0:
+  version "3.1.1"
+  resolved "https://registry.yarnpkg.com/lodash._createassigner/-/lodash._createassigner-3.1.1.tgz#838a5bae2fdaca63ac22dee8e19fa4e6d6970b11"
+  dependencies:
+    lodash._bindcallback "^3.0.0"
+    lodash._isiterateecall "^3.0.0"
+    lodash.restparam "^3.0.0"
+
+lodash._getnative@^3.0.0:
+  version "3.9.1"
+  resolved "https://registry.yarnpkg.com/lodash._getnative/-/lodash._getnative-3.9.1.tgz#570bc7dede46d61cdcde687d65d3eecbaa3aaff5"
+
+lodash._isiterateecall@^3.0.0:
+  version "3.0.9"
+  resolved "https://registry.yarnpkg.com/lodash._isiterateecall/-/lodash._isiterateecall-3.0.9.tgz#5203ad7ba425fae842460e696db9cf3e6aac057c"
+
+lodash.assign@^3.2.0:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/lodash.assign/-/lodash.assign-3.2.0.tgz#3ce9f0234b4b2223e296b8fa0ac1fee8ebca64fa"
+  dependencies:
+    lodash._baseassign "^3.0.0"
+    lodash._createassigner "^3.0.0"
+    lodash.keys "^3.0.0"
+
+lodash.assign@^4.2.0:
+  version "4.2.0"
+  resolved "https://registry.yarnpkg.com/lodash.assign/-/lodash.assign-4.2.0.tgz#0d99f3ccd7a6d261d19bdaeb9245005d285808e7"
+
+lodash.assignin@^4.1.0:
+  version "4.2.0"
+  resolved "https://registry.yarnpkg.com/lodash.assignin/-/lodash.assignin-4.2.0.tgz#ba8df5fb841eb0a3e8044232b0e263a8dc6a28a2"
+
+lodash.clonedeep@^4.3.2, lodash.clonedeep@^4.4.1:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz#e23f3f9c4f8fbdde872529c1071857a086e5ccef"
+
+lodash.debounce@^3.0.1, lodash.debounce@^3.1.1:
+  version "3.1.1"
+  resolved "https://registry.yarnpkg.com/lodash.debounce/-/lodash.debounce-3.1.1.tgz#812211c378a94cc29d5aa4e3346cf0bfce3a7df5"
+  dependencies:
+    lodash._getnative "^3.0.0"
+
+lodash.find@^4.5.1:
+  version "4.6.0"
+  resolved "https://registry.yarnpkg.com/lodash.find/-/lodash.find-4.6.0.tgz#cb0704d47ab71789ffa0de8b97dd926fb88b13b1"
+
+lodash.flatten@^3.0.2:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/lodash.flatten/-/lodash.flatten-3.0.2.tgz#de1cf57758f8f4479319d35c3e9cc60c4501938c"
+  dependencies:
+    lodash._baseflatten "^3.0.0"
+    lodash._isiterateecall "^3.0.0"
+
+lodash.isarguments@^3.0.0:
+  version "3.1.0"
+  resolved "https://registry.yarnpkg.com/lodash.isarguments/-/lodash.isarguments-3.1.0.tgz#2f573d85c6a24289ff00663b491c1d338ff3458a"
+
+lodash.isarray@^3.0.0:
+  version "3.0.4"
+  resolved "https://registry.yarnpkg.com/lodash.isarray/-/lodash.isarray-3.0.4.tgz#79e4eb88c36a8122af86f844aa9bcd851b5fbb55"
+
+lodash.isplainobject@^3.0.0:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/lodash.isplainobject/-/lodash.isplainobject-3.2.0.tgz#9a8238ae16b200432960cd7346512d0123fbf4c5"
+  dependencies:
+    lodash._basefor "^3.0.0"
+    lodash.isarguments "^3.0.0"
+    lodash.keysin "^3.0.0"
+
+lodash.istypedarray@^3.0.0:
+  version "3.0.6"
+  resolved "https://registry.yarnpkg.com/lodash.istypedarray/-/lodash.istypedarray-3.0.6.tgz#c9a477498607501d8e8494d283b87c39281cef62"
+
+lodash.keys@^3.0.0:
+  version "3.1.2"
+  resolved "https://registry.yarnpkg.com/lodash.keys/-/lodash.keys-3.1.2.tgz#4dbc0472b156be50a0b286855d1bd0b0c656098a"
+  dependencies:
+    lodash._getnative "^3.0.0"
+    lodash.isarguments "^3.0.0"
+    lodash.isarray "^3.0.0"
+
+lodash.keysin@^3.0.0:
+  version "3.0.8"
+  resolved "https://registry.yarnpkg.com/lodash.keysin/-/lodash.keysin-3.0.8.tgz#22c4493ebbedb1427962a54b445b2c8a767fb47f"
+  dependencies:
+    lodash.isarguments "^3.0.0"
+    lodash.isarray "^3.0.0"
+
+lodash.merge@^3.0.2, lodash.merge@^3.3.2:
+  version "3.3.2"
+  resolved "https://registry.yarnpkg.com/lodash.merge/-/lodash.merge-3.3.2.tgz#0d90d93ed637b1878437bb3e21601260d7afe994"
+  dependencies:
+    lodash._arraycopy "^3.0.0"
+    lodash._arrayeach "^3.0.0"
+    lodash._createassigner "^3.0.0"
+    lodash._getnative "^3.0.0"
+    lodash.isarguments "^3.0.0"
+    lodash.isarray "^3.0.0"
+    lodash.isplainobject "^3.0.0"
+    lodash.istypedarray "^3.0.0"
+    lodash.keys "^3.0.0"
+    lodash.keysin "^3.0.0"
+    lodash.toplainobject "^3.0.0"
+
+lodash.merge@^4.3.0, lodash.merge@^4.5.1:
+  version "4.6.0"
+  resolved "https://registry.yarnpkg.com/lodash.merge/-/lodash.merge-4.6.0.tgz#69884ba144ac33fe699737a6086deffadd0f89c5"
+
+lodash.omit@^4.1.0:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/lodash.omit/-/lodash.omit-4.5.0.tgz#6eb19ae5a1ee1dd9df0b969e66ce0b7fa30b5e60"
+
+lodash.pad@^4.1.0:
+  version "4.5.1"
+  resolved "https://registry.yarnpkg.com/lodash.pad/-/lodash.pad-4.5.1.tgz#4330949a833a7c8da22cc20f6a26c4d59debba70"
+
+lodash.padend@^4.1.0:
+  version "4.6.1"
+  resolved "https://registry.yarnpkg.com/lodash.padend/-/lodash.padend-4.6.1.tgz#53ccba047d06e158d311f45da625f4e49e6f166e"
+
+lodash.padstart@^4.1.0:
+  version "4.6.1"
+  resolved "https://registry.yarnpkg.com/lodash.padstart/-/lodash.padstart-4.6.1.tgz#d2e3eebff0d9d39ad50f5cbd1b52a7bce6bb611b"
+
+lodash.restparam@^3.0.0:
+  version "3.6.1"
+  resolved "https://registry.yarnpkg.com/lodash.restparam/-/lodash.restparam-3.6.1.tgz#936a4e309ef330a7645ed4145986c85ae5b20805"
+
+lodash.toplainobject@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/lodash.toplainobject/-/lodash.toplainobject-3.0.0.tgz#28790ad942d293d78aa663a07ecf7f52ca04198d"
+  dependencies:
+    lodash._basecopy "^3.0.0"
+    lodash.keysin "^3.0.0"
+
+lodash.uniq@^4.2.0:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773"
+
+lodash.uniqby@^4.7.0:
+  version "4.7.0"
+  resolved "https://registry.yarnpkg.com/lodash.uniqby/-/lodash.uniqby-4.7.0.tgz#d99c07a669e9e6d24e1362dfe266c67616af1302"
+
+lodash@3.7.x:
+  version "3.7.0"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-3.7.0.tgz#3678bd8ab995057c07ade836ed2ef087da811d45"
+
+lodash@^3.10.0, lodash@^3.3.1, lodash@^3.6.0, lodash@^3.9.3:
+  version "3.10.1"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-3.10.1.tgz#5bf45e8e49ba4189e17d482789dfd15bd140b7b6"
+
+lodash@^4.0.0, lodash@^4.14.0:
+  version "4.17.4"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.4.tgz#78203a4d1c328ae1d86dca6460e369b57f4055ae"
+
+lodash@~2.4.1:
+  version "2.4.2"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-2.4.2.tgz#fadd834b9683073da179b3eae6d9c0d15053f73e"
+
+lodash@~4.16.4:
+  version "4.16.6"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.16.6.tgz#d22c9ac660288f3843e16ba7d2b5d06cca27d777"
+
+longest@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/longest/-/longest-1.0.1.tgz#30a0b2da38f73770e8294a0d22e6625ed77d0097"
+
+loud-rejection@^1.0.0:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/loud-rejection/-/loud-rejection-1.6.0.tgz#5b46f80147edee578870f086d04821cf998e551f"
+  dependencies:
+    currently-unhandled "^0.4.1"
+    signal-exit "^3.0.0"
+
+lowercase-keys@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-1.0.0.tgz#4e3366b39e7f5457e35f1324bdf6f88d0bfc7306"
+
+lru-cache@2, lru-cache@^2.3.0, lru-cache@^2.5.0, lru-cache@~2.7.0:
+  version "2.7.3"
+  resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-2.7.3.tgz#6d4524e8b955f95d4f5b58851ce21dd72fb4e952"
+
+lru-cache@^4.0.1:
+  version "4.0.2"
+  resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-4.0.2.tgz#1d17679c069cda5d040991a09dbc2c0db377e55e"
+  dependencies:
+    pseudomap "^1.0.1"
+    yallist "^2.0.0"
+
+lru-queue@0.1:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/lru-queue/-/lru-queue-0.1.0.tgz#2738bd9f0d3cf4f84490c5736c48699ac632cda3"
+  dependencies:
+    es5-ext "~0.10.2"
+
+make-array@^0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/make-array/-/make-array-0.1.2.tgz#335e36ebb0c5a43154d21213a1ecaeae2a1bb3ef"
+
+makeerror@1.0.x:
+  version "1.0.11"
+  resolved "https://registry.yarnpkg.com/makeerror/-/makeerror-1.0.11.tgz#e01a5c9109f2af79660e4e8b9587790184f5a96c"
+  dependencies:
+    tmpl "1.0.x"
+
+map-obj@^1.0.0, map-obj@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/map-obj/-/map-obj-1.0.1.tgz#d933ceb9205d82bdcf4886f6742bdc2b4dea146d"
+
+markdown-it-terminal@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/markdown-it-terminal/-/markdown-it-terminal-0.0.3.tgz#c77a8533c2170b46d2a907a3c3452d4d7f4aa5db"
+  dependencies:
+    ansi-styles "^2.1.0"
+    cardinal "^0.5.0"
+    cli-table "^0.3.1"
+    lodash.merge "^3.3.2"
+    markdown-it "^4.4.0"
+
+markdown-it@4.3.0:
+  version "4.3.0"
+  resolved "https://registry.yarnpkg.com/markdown-it/-/markdown-it-4.3.0.tgz#0ee2b0724079d186b3f04b7345ce395ae47cc474"
+  dependencies:
+    argparse "~1.0.2"
+    entities "~1.1.1"
+    linkify-it "~1.2.0"
+    mdurl "~1.0.0"
+    uc.micro "^1.0.0"
+
+markdown-it@^4.4.0:
+  version "4.4.0"
+  resolved "https://registry.yarnpkg.com/markdown-it/-/markdown-it-4.4.0.tgz#3df373dbea587a9a7fef3e56311b68908f75c414"
+  dependencies:
+    argparse "~1.0.2"
+    entities "~1.1.1"
+    linkify-it "~1.2.0"
+    mdurl "~1.0.0"
+    uc.micro "^1.0.0"
+
+matcher-collection@^1.0.0, matcher-collection@^1.0.1:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/matcher-collection/-/matcher-collection-1.0.4.tgz#2f66ae0869996f29e43d0b62c83dd1d43e581755"
+  dependencies:
+    minimatch "^3.0.2"
+
+md5-hex@^1.0.2:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/md5-hex/-/md5-hex-1.3.0.tgz#d2c4afe983c4370662179b8cad145219135046c4"
+  dependencies:
+    md5-o-matic "^0.1.1"
+
+md5-o-matic@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/md5-o-matic/-/md5-o-matic-0.1.1.tgz#822bccd65e117c514fab176b25945d54100a03c3"
+
+mdurl@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/mdurl/-/mdurl-1.0.1.tgz#fe85b2ec75a59037f2adfec100fd6c601761152e"
+
+media-typer@0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748"
+
+memoizee@~0.3.8:
+  version "0.3.10"
+  resolved "https://registry.yarnpkg.com/memoizee/-/memoizee-0.3.10.tgz#4eca0d8aed39ec9d017f4c5c2f2f6432f42e5c8f"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.11"
+    es6-weak-map "~0.1.4"
+    event-emitter "~0.3.4"
+    lru-queue "0.1"
+    next-tick "~0.2.2"
+    timers-ext "0.1"
+
+memory-streams@^0.1.0:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/memory-streams/-/memory-streams-0.1.2.tgz#273ff777ab60fec599b116355255282cca2c50c2"
+  dependencies:
+    readable-stream "~1.0.2"
+
+meow@^3.7.0:
+  version "3.7.0"
+  resolved "https://registry.yarnpkg.com/meow/-/meow-3.7.0.tgz#72cb668b425228290abbfa856892587308a801fb"
+  dependencies:
+    camelcase-keys "^2.0.0"
+    decamelize "^1.1.2"
+    loud-rejection "^1.0.0"
+    map-obj "^1.0.1"
+    minimist "^1.1.3"
+    normalize-package-data "^2.3.4"
+    object-assign "^4.0.1"
+    read-pkg-up "^1.0.1"
+    redent "^1.0.0"
+    trim-newlines "^1.0.0"
+
+merge-defaults@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/merge-defaults/-/merge-defaults-0.2.1.tgz#dd42248eb96bb6a51521724321c72ff9583dde80"
+  dependencies:
+    lodash "~2.4.1"
+
+merge-descriptors@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61"
+
+merge@^1.1.3, merge@^1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/merge/-/merge-1.2.0.tgz#7531e39d4949c281a66b8c5a6e0265e8b05894da"
+
+methods@~1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee"
+
+micromatch@^2.1.5:
+  version "2.3.11"
+  resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-2.3.11.tgz#86677c97d1720b363431d04d0d15293bd38c1565"
+  dependencies:
+    arr-diff "^2.0.0"
+    array-unique "^0.2.1"
+    braces "^1.8.2"
+    expand-brackets "^0.1.4"
+    extglob "^0.3.1"
+    filename-regex "^2.0.0"
+    is-extglob "^1.0.0"
+    is-glob "^2.0.1"
+    kind-of "^3.0.2"
+    normalize-path "^2.0.1"
+    object.omit "^2.0.0"
+    parse-glob "^3.0.4"
+    regex-cache "^0.4.2"
+
+"mime-db@>= 1.27.0 < 2", mime-db@~1.27.0:
+  version "1.27.0"
+  resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.27.0.tgz#820f572296bbd20ec25ed55e5b5de869e5436eb1"
+
+mime-db@~1.12.0:
+  version "1.12.0"
+  resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.12.0.tgz#3d0c63180f458eb10d325aaa37d7c58ae312e9d7"
+
+mime-types@^2.1.11, mime-types@^2.1.12, mime-types@~2.1.11, mime-types@~2.1.15, mime-types@~2.1.7:
+  version "2.1.15"
+  resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.15.tgz#a4ebf5064094569237b8cf70046776d09fc92aed"
+  dependencies:
+    mime-db "~1.27.0"
+
+mime-types@~2.0.1, mime-types@~2.0.3:
+  version "2.0.14"
+  resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.0.14.tgz#310e159db23e077f8bb22b748dabfa4957140aa6"
+  dependencies:
+    mime-db "~1.12.0"
+
+mime@1.3.4, mime@^1.2.11:
+  version "1.3.4"
+  resolved "https://registry.yarnpkg.com/mime/-/mime-1.3.4.tgz#115f9e3b6b3daf2959983cb38f149a2d40eb5d53"
+
+minimatch@1:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-1.0.0.tgz#e0dd2120b49e1b724ce8d714c520822a9438576d"
+  dependencies:
+    lru-cache "2"
+    sigmund "~1.0.0"
+
+"minimatch@2 || 3", minimatch@^3.0.0, minimatch@^3.0.2, minimatch@~3.0.0, minimatch@~3.0.2:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.3.tgz#2a4e4090b96b2db06a9d7df01055a62a77c9b774"
+  dependencies:
+    brace-expansion "^1.0.0"
+
+minimatch@^2.0.1, minimatch@^2.0.3:
+  version "2.0.10"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-2.0.10.tgz#8d087c39c6b38c001b97fca7ce6d0e1e80afbac7"
+  dependencies:
+    brace-expansion "^1.0.0"
+
+minimist@0.0.8, minimist@~0.0.1:
+  version "0.0.8"
+  resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d"
+
+minimist@^1.1.0, minimist@^1.1.1, minimist@^1.1.3, minimist@^1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.0.tgz#a35008b20f41383eec1fb914f4cd5df79a264284"
+
+mkdirp@0.5.0, "mkdirp@>=0.5 0", mkdirp@^0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.0.tgz#1d73076a6df986cd9344e15e71fcc05a4c9abf12"
+  dependencies:
+    minimist "0.0.8"
+
+mkdirp@0.5.x, mkdirp@^0.5.1, mkdirp@~0.5.0, mkdirp@~0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903"
+  dependencies:
+    minimist "0.0.8"
+
+mkdirp@^0.3.5:
+  version "0.3.5"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.3.5.tgz#de3e5f8961c88c787ee1368df849ac4413eca8d7"
+
+mkdirp@~0.4.0:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.4.2.tgz#427c8c18ece398b932f6f666f4e1e5b7740e78c8"
+  dependencies:
+    minimist "0.0.8"
+
+mkpath@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/mkpath/-/mkpath-0.1.0.tgz#7554a6f8d871834cc97b5462b122c4c124d6de91"
+
+mktemp@~0.3.4:
+  version "0.3.5"
+  resolved "https://registry.yarnpkg.com/mktemp/-/mktemp-0.3.5.tgz#a1504c706d0d2b198c6a0eb645f7fdaf8181f7de"
+
+moment-timezone@^0.3.0:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/moment-timezone/-/moment-timezone-0.3.1.tgz#3ef47856b02d53b718a10a5ec2023aa299e07bf5"
+  dependencies:
+    moment ">= 2.6.0"
+
+"moment@>= 2.6.0":
+  version "2.18.1"
+  resolved "https://registry.yarnpkg.com/moment/-/moment-2.18.1.tgz#c36193dd3ce1c2eed2adb7c802dbbc77a81b1c0f"
+
+morgan@^1.5.2:
+  version "1.8.1"
+  resolved "https://registry.yarnpkg.com/morgan/-/morgan-1.8.1.tgz#f93023d3887bd27b78dfd6023cea7892ee27a4b1"
+  dependencies:
+    basic-auth "~1.1.0"
+    debug "2.6.1"
+    depd "~1.1.0"
+    on-finished "~2.3.0"
+    on-headers "~1.0.1"
+
+mout@^0.11.0:
+  version "0.11.1"
+  resolved "https://registry.yarnpkg.com/mout/-/mout-0.11.1.tgz#ba3611df5f0e5b1ffbfd01166b8f02d1f5fa2b99"
+
+mout@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/mout/-/mout-1.0.0.tgz#9bdf1d4af57d66d47cb353a6335a3281098e1501"
+
+mout@~0.9.0:
+  version "0.9.1"
+  resolved "https://registry.yarnpkg.com/mout/-/mout-0.9.1.tgz#84f0f3fd6acc7317f63de2affdcc0cee009b0477"
+
+ms@0.7.1:
+  version "0.7.1"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.1.tgz#9cd13c03adbff25b65effde7ce864ee952017098"
+
+ms@0.7.2:
+  version "0.7.2"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.2.tgz#ae25cf2512b3885a1d95d7f037868d8431124765"
+
+ms@0.7.3:
+  version "0.7.3"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.3.tgz#708155a5e44e33f5fd0fc53e81d0d40a91be1fff"
+
+mustache@^2.2.1:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/mustache/-/mustache-2.3.0.tgz#4028f7778b17708a489930a6e52ac3bca0da41d0"
+
+mute-stream@0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.4.tgz#a9219960a6d5d5d046597aee51252c6655f7177e"
+
+mute-stream@0.0.5, mute-stream@~0.0.4:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.5.tgz#8fbfabb0a98a253d3184331f9e8deb7372fac6c0"
+
+nan@^2.3.2:
+  version "2.6.2"
+  resolved "https://registry.yarnpkg.com/nan/-/nan-2.6.2.tgz#e4ff34e6c95fdfb5aecc08de6596f43605a7db45"
+
+natives@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/natives/-/natives-1.1.0.tgz#e9ff841418a6b2ec7a495e939984f78f163e6e31"
+
+negotiator@0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.1.tgz#2b327184e8992101177b28563fb5e7102acd0ca9"
+
+next-tick@1:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/next-tick/-/next-tick-1.0.0.tgz#ca86d1fe8828169b0120208e3dc8424b9db8342c"
+
+next-tick@~0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/next-tick/-/next-tick-0.2.2.tgz#75da4a927ee5887e39065880065b7336413b310d"
+
+node-gyp@^3.3.1:
+  version "3.6.0"
+  resolved "https://registry.yarnpkg.com/node-gyp/-/node-gyp-3.6.0.tgz#7474f63a3a0501161dda0b6341f022f14c423fa6"
+  dependencies:
+    fstream "^1.0.0"
+    glob "^7.0.3"
+    graceful-fs "^4.1.2"
+    minimatch "^3.0.2"
+    mkdirp "^0.5.0"
+    nopt "2 || 3"
+    npmlog "0 || 1 || 2 || 3 || 4"
+    osenv "0"
+    request "2"
+    rimraf "2"
+    semver "~5.3.0"
+    tar "^2.0.0"
+    which "1"
+
+node-gyp@~3.0.3:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/node-gyp/-/node-gyp-3.0.3.tgz#9b004219f4fa9efbfd78c5fc674aa12e58fb8694"
+  dependencies:
+    fstream "^1.0.0"
+    glob "3 || 4"
+    graceful-fs "^4.1.2"
+    minimatch "1"
+    mkdirp "^0.5.0"
+    nopt "2 || 3"
+    npmlog "0 || 1"
+    osenv "0"
+    path-array "^1.0.0"
+    request "2"
+    rimraf "2"
+    semver "2.x || 3.x || 4 || 5"
+    tar "^1.0.0"
+    which "1"
+
+node-int64@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/node-int64/-/node-int64-0.4.0.tgz#87a9065cdb355d3182d8f94ce11188b825c68a3b"
+
+node-modules-path@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/node-modules-path/-/node-modules-path-1.0.1.tgz#40096b08ce7ad0ea14680863af449c7c75a5d1c8"
+
+node-notifier@^5.0.1:
+  version "5.1.2"
+  resolved "https://registry.yarnpkg.com/node-notifier/-/node-notifier-5.1.2.tgz#2fa9e12605fa10009d44549d6fcd8a63dde0e4ff"
+  dependencies:
+    growly "^1.3.0"
+    semver "^5.3.0"
+    shellwords "^0.1.0"
+    which "^1.2.12"
+
+node-sass@^3.8.0:
+  version "3.13.1"
+  resolved "https://registry.yarnpkg.com/node-sass/-/node-sass-3.13.1.tgz#7240fbbff2396304b4223527ed3020589c004fc2"
+  dependencies:
+    async-foreach "^0.1.3"
+    chalk "^1.1.1"
+    cross-spawn "^3.0.0"
+    gaze "^1.0.0"
+    get-stdin "^4.0.1"
+    glob "^7.0.3"
+    in-publish "^2.0.0"
+    lodash.assign "^4.2.0"
+    lodash.clonedeep "^4.3.2"
+    meow "^3.7.0"
+    mkdirp "^0.5.1"
+    nan "^2.3.2"
+    node-gyp "^3.3.1"
+    npmlog "^4.0.0"
+    request "^2.61.0"
+    sass-graph "^2.1.1"
+
+node-status-codes@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/node-status-codes/-/node-status-codes-1.0.0.tgz#5ae5541d024645d32a58fcddc9ceecea7ae3ac2f"
+
+node-uuid@^1.4.3, node-uuid@~1.4.0, node-uuid@~1.4.3:
+  version "1.4.8"
+  resolved "https://registry.yarnpkg.com/node-uuid/-/node-uuid-1.4.8.tgz#b040eb0923968afabf8d32fb1f17f1167fdab907"
+
+"nopt@2 || 3", nopt@^3.0.1, nopt@^3.0.3, nopt@~3.0.4:
+  version "3.0.6"
+  resolved "https://registry.yarnpkg.com/nopt/-/nopt-3.0.6.tgz#c6465dbf08abcd4db359317f79ac68a646b28ff9"
+  dependencies:
+    abbrev "1"
+
+nopt@~1.0.10:
+  version "1.0.10"
+  resolved "https://registry.yarnpkg.com/nopt/-/nopt-1.0.10.tgz#6ddd21bd2a31417b92727dd585f8a6f37608ebee"
+  dependencies:
+    abbrev "1"
+
+normalize-git-url@~3.0.1:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/normalize-git-url/-/normalize-git-url-3.0.2.tgz#8e5f14be0bdaedb73e07200310aa416c27350fc4"
+
+normalize-package-data@^2.0.0, normalize-package-data@^2.3.2, normalize-package-data@^2.3.4, "normalize-package-data@~1.0.1 || ^2.0.0", normalize-package-data@~2.3.5:
+  version "2.3.8"
+  resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-2.3.8.tgz#d819eda2a9dedbd1ffa563ea4071d936782295bb"
+  dependencies:
+    hosted-git-info "^2.1.4"
+    is-builtin-module "^1.0.0"
+    semver "2 || 3 || 4 || 5"
+    validate-npm-package-license "^3.0.1"
+
+normalize-path@^2.0.1:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-2.1.1.tgz#1ab28b556e198363a8c1a6f7e6fa20137fe6aed9"
+  dependencies:
+    remove-trailing-separator "^1.0.1"
+
+npm-cache-filename@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/npm-cache-filename/-/npm-cache-filename-1.0.2.tgz#ded306c5b0bfc870a9e9faf823bc5f283e05ae11"
+
+npm-install-checks@~1.0.6:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/npm-install-checks/-/npm-install-checks-1.0.7.tgz#6d91aeda0ac96801f1ed7aadee116a6c0a086a57"
+  dependencies:
+    npmlog "0.1 || 1 || 2"
+    semver "^2.3.0 || 3.x || 4 || 5"
+
+"npm-package-arg@^3.0.0 || ^4.0.0", "npm-package-arg@^4.0.0 || ^5.0.0", npm-package-arg@~4.0.2:
+  version "4.0.2"
+  resolved "https://registry.yarnpkg.com/npm-package-arg/-/npm-package-arg-4.0.2.tgz#3f28235f9f6428e54bfeca73629e27d6c81a7e82"
+  dependencies:
+    hosted-git-info "^2.1.4"
+    semver "4 || 5"
+
+npm-package-arg@^4.1.1:
+  version "4.2.1"
+  resolved "https://registry.yarnpkg.com/npm-package-arg/-/npm-package-arg-4.2.1.tgz#593303fdea85f7c422775f17f9eb7670f680e3ec"
+  dependencies:
+    hosted-git-info "^2.1.5"
+    semver "^5.1.0"
+
+npm-registry-client@~7.0.7:
+  version "7.0.9"
+  resolved "https://registry.yarnpkg.com/npm-registry-client/-/npm-registry-client-7.0.9.tgz#1baf86ee5285c4e6d38d4556208ded56049231bb"
+  dependencies:
+    chownr "^1.0.1"
+    concat-stream "^1.4.6"
+    graceful-fs "^4.1.2"
+    mkdirp "^0.5.0"
+    normalize-package-data "~1.0.1 || ^2.0.0"
+    npm-package-arg "^3.0.0 || ^4.0.0"
+    once "^1.3.0"
+    request "^2.47.0"
+    retry "^0.8.0"
+    rimraf "2"
+    semver "2 >=2.2.1 || 3.x || 4 || 5"
+    slide "^1.1.3"
+  optionalDependencies:
+    npmlog "~2.0.0"
+
+npm-user-validate@~0.1.2:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/npm-user-validate/-/npm-user-validate-0.1.5.tgz#52465d50c2d20294a57125b996baedbf56c5004b"
+
+npm@2.14.10:
+  version "2.14.10"
+  resolved "https://registry.yarnpkg.com/npm/-/npm-2.14.10.tgz#96597ee1e5efeebdcf5f91b57763fe9ae17b9903"
+  dependencies:
+    abbrev "~1.0.7"
+    ansi "~0.3.0"
+    ansicolors "~0.3.2"
+    ansistyles "~0.1.3"
+    archy "~1.0.0"
+    async-some "~1.0.2"
+    block-stream "0.0.8"
+    char-spinner "~1.0.1"
+    chmodr "~1.0.2"
+    chownr "~1.0.1"
+    cmd-shim "~2.0.1"
+    columnify "~1.5.2"
+    config-chain "~1.1.9"
+    dezalgo "~1.0.3"
+    editor "~1.0.0"
+    fs-vacuum "~1.2.7"
+    fs-write-stream-atomic "~1.0.4"
+    fstream "~1.0.8"
+    fstream-npm "~1.0.7"
+    github-url-from-git "~1.4.0"
+    github-url-from-username-repo "~1.0.2"
+    glob "~5.0.15"
+    graceful-fs "~4.1.2"
+    hosted-git-info "~2.1.4"
+    inflight "~1.0.4"
+    inherits "~2.0.1"
+    ini "~1.3.4"
+    init-package-json "~1.9.1"
+    lockfile "~1.0.1"
+    lru-cache "~2.7.0"
+    minimatch "~3.0.0"
+    mkdirp "~0.5.1"
+    node-gyp "~3.0.3"
+    nopt "~3.0.4"
+    normalize-git-url "~3.0.1"
+    normalize-package-data "~2.3.5"
+    npm-cache-filename "~1.0.2"
+    npm-install-checks "~1.0.6"
+    npm-package-arg "~4.0.2"
+    npm-registry-client "~7.0.7"
+    npm-user-validate "~0.1.2"
+    npmlog "~2.0.0"
+    once "~1.3.2"
+    opener "~1.4.1"
+    osenv "~0.1.3"
+    path-is-inside "~1.0.0"
+    read "~1.0.7"
+    read-installed "~4.0.3"
+    read-package-json "~2.0.2"
+    readable-stream "~1.1.13"
+    realize-package-specifier "~3.0.1"
+    request "~2.65.0"
+    retry "~0.8.0"
+    rimraf "~2.4.3"
+    semver "~5.0.3"
+    sha "~2.0.1"
+    slide "~1.1.6"
+    sorted-object "~1.0.0"
+    spdx "~0.4.1"
+    tar "~2.2.1"
+    text-table "~0.2.0"
+    uid-number "0.0.6"
+    umask "~1.1.0"
+    validate-npm-package-license "~3.0.1"
+    validate-npm-package-name "~2.2.2"
+    which "~1.2.0"
+    wrappy "~1.0.1"
+    write-file-atomic "~1.1.3"
+
+"npmlog@0 || 1":
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-1.2.1.tgz#28e7be619609b53f7ad1dd300a10d64d716268b6"
+  dependencies:
+    ansi "~0.3.0"
+    are-we-there-yet "~1.0.0"
+    gauge "~1.2.0"
+
+"npmlog@0 || 1 || 2 || 3 || 4", npmlog@^4.0.0:
+  version "4.0.2"
+  resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-4.0.2.tgz#d03950e0e78ce1527ba26d2a7592e9348ac3e75f"
+  dependencies:
+    are-we-there-yet "~1.1.2"
+    console-control-strings "~1.1.0"
+    gauge "~2.7.1"
+    set-blocking "~2.0.0"
+
+"npmlog@0.1 || 1 || 2", npmlog@~2.0.0:
+  version "2.0.4"
+  resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-2.0.4.tgz#98b52530f2514ca90d09ec5b22c8846722375692"
+  dependencies:
+    ansi "~0.3.1"
+    are-we-there-yet "~1.1.2"
+    gauge "~1.2.5"
+
+number-is-nan@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/number-is-nan/-/number-is-nan-1.0.1.tgz#097b602b53422a522c1afb8790318336941a011d"
+
+oauth-sign@~0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.6.0.tgz#7dbeae44f6ca454e1f168451d630746735813ce3"
+
+oauth-sign@~0.8.0, oauth-sign@~0.8.1:
+  version "0.8.2"
+  resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.8.2.tgz#46a6ab7f0aead8deae9ec0565780b7d4efeb9d43"
+
+object-assign@4.1.0:
+  version "4.1.0"
+  resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.0.tgz#7a3b3d0e98063d43f4c03f2e8ae6cd51a86883a0"
+
+object-assign@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-2.1.1.tgz#43c36e5d569ff8e4816c4efa8be02d26967c18aa"
+
+object-assign@^4.0.1, object-assign@^4.1.0:
+  version "4.1.1"
+  resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863"
+
+object-component@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/object-component/-/object-component-0.0.3.tgz#f0c69aa50efc95b866c186f400a33769cb2f1291"
+
+object.omit@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/object.omit/-/object.omit-2.0.1.tgz#1a9c744829f39dbb858c76ca3579ae2a54ebd1fa"
+  dependencies:
+    for-own "^0.1.4"
+    is-extendable "^0.1.1"
+
+on-finished@~2.3.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.3.0.tgz#20f1336481b083cd75337992a16971aa2d906947"
+  dependencies:
+    ee-first "1.1.1"
+
+on-headers@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/on-headers/-/on-headers-1.0.1.tgz#928f5d0f470d49342651ea6794b0857c100693f7"
+
+once@^1.3.0, once@^1.3.1, once@^1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1"
+  dependencies:
+    wrappy "1"
+
+once@~1.3.2:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/once/-/once-1.3.3.tgz#b2e261557ce4c314ec8304f3fa82663e4297ca20"
+  dependencies:
+    wrappy "1"
+
+onetime@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/onetime/-/onetime-1.1.0.tgz#a1f7838f8314c516f05ecefcbc4ccfe04b4ed789"
+
+opener@~1.4.1:
+  version "1.4.3"
+  resolved "https://registry.yarnpkg.com/opener/-/opener-1.4.3.tgz#5c6da2c5d7e5831e8ffa3964950f8d6674ac90b8"
+
+opn@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/opn/-/opn-1.0.2.tgz#b909643346d00a1abc977a8b96f3ce3c53d5cf5f"
+
+optimist@^0.6.1, optimist@~0.6.0:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.6.1.tgz#da3ea74686fa21a19a111c326e90eb15a0196686"
+  dependencies:
+    minimist "~0.0.1"
+    wordwrap "~0.0.2"
+
+optimist@~0.3, optimist@~0.3.5:
+  version "0.3.7"
+  resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.3.7.tgz#c90941ad59e4273328923074d2cf2e7cbc6ec0d9"
+  dependencies:
+    wordwrap "~0.0.2"
+
+options@>=0.0.5:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/options/-/options-0.0.6.tgz#ec22d312806bb53e731773e7cdaefcf1c643128f"
+
+os-homedir@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3"
+
+os-locale@^1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/os-locale/-/os-locale-1.4.0.tgz#20f9f17ae29ed345e8bde583b13d2009803c14d9"
+  dependencies:
+    lcid "^1.0.0"
+
+os-name@^1.0.0:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/os-name/-/os-name-1.0.3.tgz#1b379f64835af7c5a7f498b357cb95215c159edf"
+  dependencies:
+    osx-release "^1.0.0"
+    win-release "^1.0.0"
+
+os-tmpdir@^1.0.0, os-tmpdir@^1.0.1, os-tmpdir@~1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274"
+
+osenv@0, osenv@^0.1.0, osenv@^0.1.3, osenv@~0.1.3:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.1.4.tgz#42fe6d5953df06c8064be6f176c3d05aaaa34644"
+  dependencies:
+    os-homedir "^1.0.0"
+    os-tmpdir "^1.0.0"
+
+osenv@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.0.3.tgz#cd6ad8ddb290915ad9e22765576025d411f29cb6"
+
+osx-release@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/osx-release/-/osx-release-1.1.0.tgz#f217911a28136949af1bf9308b241e2737d3cd6c"
+  dependencies:
+    minimist "^1.1.0"
+
+output-file-sync@^1.1.0:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/output-file-sync/-/output-file-sync-1.1.2.tgz#d0a33eefe61a205facb90092e826598d5245ce76"
+  dependencies:
+    graceful-fs "^4.1.4"
+    mkdirp "^0.5.1"
+    object-assign "^4.1.0"
+
+p-throttler@0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/p-throttler/-/p-throttler-0.1.1.tgz#15246409d225d3eefca85c50de710a83a78cca6a"
+  dependencies:
+    q "~0.9.2"
+
+package-json@^2.0.0:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/package-json/-/package-json-2.4.0.tgz#0d15bd67d1cbbddbb2ca222ff2edb86bcb31a8bb"
+  dependencies:
+    got "^5.0.0"
+    registry-auth-token "^3.0.1"
+    registry-url "^3.0.3"
+    semver "^5.1.0"
+
+parse-glob@^3.0.4:
+  version "3.0.4"
+  resolved "https://registry.yarnpkg.com/parse-glob/-/parse-glob-3.0.4.tgz#b2c376cfb11f35513badd173ef0bb6e3a388391c"
+  dependencies:
+    glob-base "^0.3.0"
+    is-dotfile "^1.0.0"
+    is-extglob "^1.0.0"
+    is-glob "^2.0.0"
+
+parse-json@^2.1.0, parse-json@^2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-2.2.0.tgz#f480f40434ef80741f8469099f8dea18f55a4dc9"
+  dependencies:
+    error-ex "^1.2.0"
+
+parsejson@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/parsejson/-/parsejson-0.0.3.tgz#ab7e3759f209ece99437973f7d0f1f64ae0e64ab"
+  dependencies:
+    better-assert "~1.0.0"
+
+parseqs@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/parseqs/-/parseqs-0.0.5.tgz#d5208a3738e46766e291ba2ea173684921a8b89d"
+  dependencies:
+    better-assert "~1.0.0"
+
+parseuri@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/parseuri/-/parseuri-0.0.5.tgz#80204a50d4dbb779bfdc6ebe2778d90e4bce320a"
+  dependencies:
+    better-assert "~1.0.0"
+
+parseurl@~1.3.0, parseurl@~1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.1.tgz#c8ab8c9223ba34888aa64a297b28853bec18da56"
+
+path-array@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/path-array/-/path-array-1.0.1.tgz#7e2f0f35f07a2015122b868b7eac0eb2c4fec271"
+  dependencies:
+    array-index "^1.0.0"
+
+path-exists@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-1.0.0.tgz#d5a8998eb71ef37a74c34eb0d9eba6e878eea081"
+
+path-exists@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-2.1.0.tgz#0feb6c64f0fc518d9a754dd5efb62c7022761f4b"
+  dependencies:
+    pinkie-promise "^2.0.0"
+
+path-is-absolute@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f"
+
+path-is-inside@^1.0.1, path-is-inside@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/path-is-inside/-/path-is-inside-1.0.2.tgz#365417dede44430d1c11af61027facf074bdfc53"
+
+path-parse@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.5.tgz#3c1adf871ea9cd6c9431b6ea2bd74a0ff055c4c1"
+
+path-posix@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/path-posix/-/path-posix-1.0.0.tgz#06b26113f56beab042545a23bfa88003ccac260f"
+
+path-to-regexp@0.1.7:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c"
+
+path-type@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/path-type/-/path-type-1.1.0.tgz#59c44f7ee491da704da415da5a4070ba4f8fe441"
+  dependencies:
+    graceful-fs "^4.1.2"
+    pify "^2.0.0"
+    pinkie-promise "^2.0.0"
+
+performance-now@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-0.2.0.tgz#33ef30c5c77d4ea21c5a53869d91b56d8f2555e5"
+
+pify@^2.0.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c"
+
+pinkie-promise@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa"
+  dependencies:
+    pinkie "^2.0.0"
+
+pinkie@^2.0.0:
+  version "2.0.4"
+  resolved "https://registry.yarnpkg.com/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870"
+
+pleasant-progress@^1.0.2:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/pleasant-progress/-/pleasant-progress-1.1.0.tgz#c99cd730a2e50cffdd3badff845fc4d5282e266b"
+
+portfinder@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/portfinder/-/portfinder-0.4.0.tgz#a3ffadffafe4fb98e0601a85eda27c27ce84ca1e"
+  dependencies:
+    async "0.9.0"
+    mkdirp "0.5.x"
+
+prepend-http@^1.0.1:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-1.0.4.tgz#d4f4562b0ce3696e41ac52d0e002e57a635dc6dc"
+
+preserve@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/preserve/-/preserve-0.2.0.tgz#815ed1f6ebc65926f865b310c0713bcb3315ce4b"
+
+printf@^0.2.3:
+  version "0.2.5"
+  resolved "https://registry.yarnpkg.com/printf/-/printf-0.2.5.tgz#c438ca2ca33e3927671db4ab69c0e52f936a4f0f"
+
+private@^0.1.6, private@~0.1.5:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/private/-/private-0.1.7.tgz#68ce5e8a1ef0a23bb570cc28537b5332aba63ef1"
+
+process-nextick-args@~1.0.6:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-1.0.7.tgz#150e20b756590ad3f91093f25a4f2ad8bff30ba3"
+
+process-relative-require@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/process-relative-require/-/process-relative-require-1.0.0.tgz#1590dfcf5b8f2983ba53e398446b68240b4cc68a"
+  dependencies:
+    node-modules-path "^1.0.0"
+
+promise-map-series@^0.2.1:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/promise-map-series/-/promise-map-series-0.2.3.tgz#c2d377afc93253f6bd03dbb77755eb88ab20a847"
+  dependencies:
+    rsvp "^3.0.14"
+
+promise@^7.1.1:
+  version "7.1.1"
+  resolved "https://registry.yarnpkg.com/promise/-/promise-7.1.1.tgz#489654c692616b8aa55b0724fa809bb7db49c5bf"
+  dependencies:
+    asap "~2.0.3"
+
+promptly@0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/promptly/-/promptly-0.2.0.tgz#73ef200fa8329d5d3a8df41798950b8646ca46d9"
+  dependencies:
+    read "~1.0.4"
+
+promzard@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/promzard/-/promzard-0.3.0.tgz#26a5d6ee8c7dee4cb12208305acfb93ba382a9ee"
+  dependencies:
+    read "1"
+
+proto-list@~1.2.1:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/proto-list/-/proto-list-1.2.4.tgz#212d5bfe1318306a420f6402b8e26ff39647a849"
+
+proxy-addr@~1.1.3:
+  version "1.1.4"
+  resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-1.1.4.tgz#27e545f6960a44a627d9b44467e35c1b6b4ce2f3"
+  dependencies:
+    forwarded "~0.1.0"
+    ipaddr.js "1.3.0"
+
+prr@~0.0.0:
+  version "0.0.0"
+  resolved "https://registry.yarnpkg.com/prr/-/prr-0.0.0.tgz#1a84b85908325501411853d0081ee3fa86e2926a"
+
+pseudomap@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/pseudomap/-/pseudomap-1.0.2.tgz#f052a28da70e618917ef0a8ac34c1ae5a68286b3"
+
+pump@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/pump/-/pump-1.0.2.tgz#3b3ee6512f94f0e575538c17995f9f16990a5d51"
+  dependencies:
+    end-of-stream "^1.1.0"
+    once "^1.3.1"
+
+punycode@^1.4.1:
+  version "1.4.1"
+  resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e"
+
+q@^1.1.2:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/q/-/q-1.5.0.tgz#dd01bac9d06d30e6f219aecb8253ee9ebdc308f1"
+
+q@~0.9.2:
+  version "0.9.7"
+  resolved "https://registry.yarnpkg.com/q/-/q-0.9.7.tgz#4de2e6cb3b29088c9e4cbc03bf9d42fb96ce2f75"
+
+qs@5.2.0:
+  version "5.2.0"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-5.2.0.tgz#a9f31142af468cb72b25b30136ba2456834916be"
+
+qs@6.4.0, qs@~6.4.0:
+  version "6.4.0"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-6.4.0.tgz#13e26d28ad6b0ffaa91312cd3bf708ed351e7233"
+
+qs@~2.3.1:
+  version "2.3.3"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-2.3.3.tgz#e9e85adbe75da0bbe4c8e0476a086290f863b404"
+
+qs@~5.1.0:
+  version "5.1.0"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-5.1.0.tgz#4d932e5c7ea411cca76a312d39a606200fd50cd9"
+
+qs@~5.2.0:
+  version "5.2.1"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-5.2.1.tgz#801fee030e0b9450d6385adc48a4cc55b44aedfc"
+
+quick-temp@0.1.5, quick-temp@^0.1.0, quick-temp@^0.1.2, quick-temp@^0.1.3:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/quick-temp/-/quick-temp-0.1.5.tgz#0d0d67f0fb6a589a0e142f90985f76cdbaf403f7"
+  dependencies:
+    mktemp "~0.3.4"
+    rimraf "~2.2.6"
+    underscore.string "~2.3.3"
+
+qunitjs@^1.20.0:
+  version "1.23.1"
+  resolved "https://registry.yarnpkg.com/qunitjs/-/qunitjs-1.23.1.tgz#1971cf97ac9be01a64d2315508d2e48e6fd4e719"
+
+randomatic@^1.1.3:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/randomatic/-/randomatic-1.1.6.tgz#110dcabff397e9dcff7c0789ccc0a49adf1ec5bb"
+  dependencies:
+    is-number "^2.0.2"
+    kind-of "^3.0.2"
+
+range-parser@~1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.0.tgz#f49be6b487894ddc40dcc94a322f611092e00d5e"
+
+raw-body@~2.1.5:
+  version "2.1.7"
+  resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.1.7.tgz#adfeace2e4fb3098058014d08c072dcc59758774"
+  dependencies:
+    bytes "2.4.0"
+    iconv-lite "0.4.13"
+    unpipe "1.0.0"
+
+rc@^1.0.1, rc@^1.1.6:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/rc/-/rc-1.2.1.tgz#2e03e8e42ee450b8cb3dce65be1bf8974e1dfd95"
+  dependencies:
+    deep-extend "~0.4.0"
+    ini "~1.3.0"
+    minimist "^1.2.0"
+    strip-json-comments "~2.0.1"
+
+read-all-stream@^3.0.0:
+  version "3.1.0"
+  resolved "https://registry.yarnpkg.com/read-all-stream/-/read-all-stream-3.1.0.tgz#35c3e177f2078ef789ee4bfafa4373074eaef4fa"
+  dependencies:
+    pinkie-promise "^2.0.0"
+    readable-stream "^2.0.0"
+
+read-installed@~4.0.3:
+  version "4.0.3"
+  resolved "https://registry.yarnpkg.com/read-installed/-/read-installed-4.0.3.tgz#ff9b8b67f187d1e4c29b9feb31f6b223acd19067"
+  dependencies:
+    debuglog "^1.0.1"
+    read-package-json "^2.0.0"
+    readdir-scoped-modules "^1.0.0"
+    semver "2 || 3 || 4 || 5"
+    slide "~1.1.3"
+    util-extend "^1.0.1"
+  optionalDependencies:
+    graceful-fs "^4.1.2"
+
+"read-package-json@1 || 2", read-package-json@^2.0.0, read-package-json@~2.0.2:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/read-package-json/-/read-package-json-2.0.5.tgz#f93a64e641529df68a08c64de46389e8a3f88845"
+  dependencies:
+    glob "^7.1.1"
+    json-parse-helpfulerror "^1.0.2"
+    normalize-package-data "^2.0.0"
+  optionalDependencies:
+    graceful-fs "^4.1.2"
+
+read-pkg-up@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-1.0.1.tgz#9d63c13276c065918d57f002a57f40a1b643fb02"
+  dependencies:
+    find-up "^1.0.0"
+    read-pkg "^1.0.0"
+
+read-pkg@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-1.1.0.tgz#f5ffaa5ecd29cb31c0474bca7d756b6bb29e3f28"
+  dependencies:
+    load-json-file "^1.0.0"
+    normalize-package-data "^2.3.2"
+    path-type "^1.0.0"
+
+read@1, read@~1.0.1, read@~1.0.4, read@~1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/read/-/read-1.0.7.tgz#b3da19bd052431a97671d44a42634adf710b40c4"
+  dependencies:
+    mute-stream "~0.0.4"
+
+readable-stream@1.1, readable-stream@^1.1.8, readable-stream@~1.1.13:
+  version "1.1.14"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.1.14.tgz#7cf4c54ef648e3813084c636dd2079e166c081d9"
+  dependencies:
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "0.0.1"
+    string_decoder "~0.10.x"
+
+readable-stream@^2, readable-stream@^2.0.0, readable-stream@^2.0.2, readable-stream@~2.0.5:
+  version "2.0.6"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.0.6.tgz#8f90341e68a53ccc928788dacfcd11b36eb9b78e"
+  dependencies:
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "~1.0.0"
+    process-nextick-args "~1.0.6"
+    string_decoder "~0.10.x"
+    util-deprecate "~1.0.1"
+
+"readable-stream@^2.0.0 || ^1.1.13", readable-stream@^2.0.5, readable-stream@^2.0.6, readable-stream@^2.2.2:
+  version "2.2.9"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.2.9.tgz#cf78ec6f4a6d1eb43d26488cac97f042e74b7fc8"
+  dependencies:
+    buffer-shims "~1.0.0"
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "~1.0.0"
+    process-nextick-args "~1.0.6"
+    string_decoder "~1.0.0"
+    util-deprecate "~1.0.1"
+
+readable-stream@~1.0.2, readable-stream@~1.0.26:
+  version "1.0.34"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.0.34.tgz#125820e34bc842d2f2aaafafe4c2916ee32c157c"
+  dependencies:
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "0.0.1"
+    string_decoder "~0.10.x"
+
+readdir-scoped-modules@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/readdir-scoped-modules/-/readdir-scoped-modules-1.0.2.tgz#9fafa37d286be5d92cbaebdee030dc9b5f406747"
+  dependencies:
+    debuglog "^1.0.1"
+    dezalgo "^1.0.0"
+    graceful-fs "^4.1.2"
+    once "^1.3.0"
+
+readline2@0.1.1, readline2@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/readline2/-/readline2-0.1.1.tgz#99443ba6e83b830ef3051bfd7dc241a82728d568"
+  dependencies:
+    mute-stream "0.0.4"
+    strip-ansi "^2.0.1"
+
+readline2@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/readline2/-/readline2-1.0.1.tgz#41059608ffc154757b715d9989d199ffbf372e35"
+  dependencies:
+    code-point-at "^1.0.0"
+    is-fullwidth-code-point "^1.0.0"
+    mute-stream "0.0.5"
+
+realize-package-specifier@~3.0.1:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/realize-package-specifier/-/realize-package-specifier-3.0.3.tgz#d0def882952b8de3f67eba5e91199661271f41f4"
+  dependencies:
+    dezalgo "^1.0.1"
+    npm-package-arg "^4.1.1"
+
+recast@0.10.33, recast@^0.10.10:
+  version "0.10.33"
+  resolved "https://registry.yarnpkg.com/recast/-/recast-0.10.33.tgz#942808f7aa016f1fa7142c461d7e5704aaa8d697"
+  dependencies:
+    ast-types "0.8.12"
+    esprima-fb "~15001.1001.0-dev-harmony-fb"
+    private "~0.1.5"
+    source-map "~0.5.0"
+
+recast@^0.11.17, recast@^0.11.3:
+  version "0.11.23"
+  resolved "https://registry.yarnpkg.com/recast/-/recast-0.11.23.tgz#451fd3004ab1e4df9b4e4b66376b2a21912462d3"
+  dependencies:
+    ast-types "0.9.6"
+    esprima "~3.1.0"
+    private "~0.1.5"
+    source-map "~0.5.0"
+
+redent@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/redent/-/redent-1.0.0.tgz#cf916ab1fd5f1f16dfb20822dd6ec7f730c2afde"
+  dependencies:
+    indent-string "^2.1.0"
+    strip-indent "^1.0.1"
+
+redeyed@~0.4.0:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/redeyed/-/redeyed-0.4.4.tgz#37e990a6f2b21b2a11c2e6a48fd4135698cba97f"
+  dependencies:
+    esprima "~1.0.4"
+
+redeyed@~0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/redeyed/-/redeyed-0.5.0.tgz#7ab000e60ee3875ac115d29edb32c1403c6c25d1"
+  dependencies:
+    esprima-fb "~12001.1.0-dev-harmony-fb"
+
+regenerate@^1.2.1:
+  version "1.3.2"
+  resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.3.2.tgz#d1941c67bad437e1be76433add5b385f95b19260"
+
+regenerator@0.8.40:
+  version "0.8.40"
+  resolved "https://registry.yarnpkg.com/regenerator/-/regenerator-0.8.40.tgz#a0e457c58ebdbae575c9f8cd75127e93756435d8"
+  dependencies:
+    commoner "~0.10.3"
+    defs "~1.1.0"
+    esprima-fb "~15001.1001.0-dev-harmony-fb"
+    private "~0.1.5"
+    recast "0.10.33"
+    through "~2.3.8"
+
+regex-cache@^0.4.2:
+  version "0.4.3"
+  resolved "https://registry.yarnpkg.com/regex-cache/-/regex-cache-0.4.3.tgz#9b1a6c35d4d0dfcef5711ae651e8e9d3d7114145"
+  dependencies:
+    is-equal-shallow "^0.1.3"
+    is-primitive "^2.0.0"
+
+regexpu@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/regexpu/-/regexpu-1.3.0.tgz#e534dc991a9e5846050c98de6d7dd4a55c9ea16d"
+  dependencies:
+    esprima "^2.6.0"
+    recast "^0.10.10"
+    regenerate "^1.2.1"
+    regjsgen "^0.2.0"
+    regjsparser "^0.1.4"
+
+registry-auth-token@^3.0.1:
+  version "3.3.0"
+  resolved "https://registry.yarnpkg.com/registry-auth-token/-/registry-auth-token-3.3.0.tgz#57ae67347e73d96345ed1bc01294c7237c02aa63"
+  dependencies:
+    rc "^1.1.6"
+    safe-buffer "^5.0.1"
+
+registry-url@^3.0.3:
+  version "3.1.0"
+  resolved "https://registry.yarnpkg.com/registry-url/-/registry-url-3.1.0.tgz#3d4ef870f73dde1d77f0cf9a381432444e174942"
+  dependencies:
+    rc "^1.0.1"
+
+regjsgen@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.2.0.tgz#6c016adeac554f75823fe37ac05b92d5a4edb1f7"
+
+regjsparser@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.1.5.tgz#7ee8f84dc6fa792d3fd0ae228d24bd949ead205c"
+  dependencies:
+    jsesc "~0.5.0"
+
+remove-trailing-separator@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/remove-trailing-separator/-/remove-trailing-separator-1.0.1.tgz#615ebb96af559552d4bf4057c8436d486ab63cc4"
+
+repeat-element@^1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/repeat-element/-/repeat-element-1.1.2.tgz#ef089a178d1483baae4d93eb98b4f9e4e11d990a"
+
+repeat-string@^1.5.2:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637"
+
+repeating@^1.1.0, repeating@^1.1.2:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/repeating/-/repeating-1.1.3.tgz#3d4114218877537494f97f77f9785fab810fa4ac"
+  dependencies:
+    is-finite "^1.0.0"
+
+repeating@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/repeating/-/repeating-2.0.1.tgz#5214c53a926d3552707527fbab415dbc08d06dda"
+  dependencies:
+    is-finite "^1.0.0"
+
+request-progress@0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/request-progress/-/request-progress-0.3.1.tgz#0721c105d8a96ac6b2ce8b2c89ae2d5ecfcf6b3a"
+  dependencies:
+    throttleit "~0.0.2"
+
+request-replay@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/request-replay/-/request-replay-0.2.0.tgz#9b693a5d118b39f5c596ead5ed91a26444057f60"
+  dependencies:
+    retry "~0.6.0"
+
+request@2, request@^2.47.0, request@~2.65.0:
+  version "2.65.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.65.0.tgz#cc1a3bc72b96254734fc34296da322f9486ddeba"
+  dependencies:
+    aws-sign2 "~0.6.0"
+    bl "~1.0.0"
+    caseless "~0.11.0"
+    combined-stream "~1.0.5"
+    extend "~3.0.0"
+    forever-agent "~0.6.1"
+    form-data "~1.0.0-rc3"
+    har-validator "~2.0.2"
+    hawk "~3.1.0"
+    http-signature "~0.11.0"
+    isstream "~0.1.2"
+    json-stringify-safe "~5.0.1"
+    mime-types "~2.1.7"
+    node-uuid "~1.4.3"
+    oauth-sign "~0.8.0"
+    qs "~5.2.0"
+    stringstream "~0.0.4"
+    tough-cookie "~2.2.0"
+    tunnel-agent "~0.4.1"
+
+request@2.53.0, request@^2.27.0, request@^2.40.0, request@^2.51.0:
+  version "2.53.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.53.0.tgz#180a3ae92b7b639802e4f9545dd8fcdeb71d760c"
+  dependencies:
+    aws-sign2 "~0.5.0"
+    bl "~0.9.0"
+    caseless "~0.9.0"
+    combined-stream "~0.0.5"
+    forever-agent "~0.5.0"
+    form-data "~0.2.0"
+    hawk "~2.3.0"
+    http-signature "~0.10.0"
+    isstream "~0.1.1"
+    json-stringify-safe "~5.0.0"
+    mime-types "~2.0.1"
+    node-uuid "~1.4.0"
+    oauth-sign "~0.6.0"
+    qs "~2.3.1"
+    stringstream "~0.0.4"
+    tough-cookie ">=0.12.0"
+    tunnel-agent "~0.4.0"
+
+request@^2.61.0, request@^2.72.0:
+  version "2.81.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.81.0.tgz#c6928946a0e06c5f8d6f8a9333469ffda46298a0"
+  dependencies:
+    aws-sign2 "~0.6.0"
+    aws4 "^1.2.1"
+    caseless "~0.12.0"
+    combined-stream "~1.0.5"
+    extend "~3.0.0"
+    forever-agent "~0.6.1"
+    form-data "~2.1.1"
+    har-validator "~4.2.1"
+    hawk "~3.1.3"
+    http-signature "~1.1.0"
+    is-typedarray "~1.0.0"
+    isstream "~0.1.2"
+    json-stringify-safe "~5.0.1"
+    mime-types "~2.1.7"
+    oauth-sign "~0.8.1"
+    performance-now "^0.2.0"
+    qs "~6.4.0"
+    safe-buffer "^5.0.1"
+    stringstream "~0.0.4"
+    tough-cookie "~2.3.0"
+    tunnel-agent "^0.6.0"
+    uuid "^3.0.0"
+
+require-directory@^2.1.1:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42"
+
+require-main-filename@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-1.0.1.tgz#97f717b69d48784f5f526a6c5aa8ffdda055a4d1"
+
+requires-port@1.x.x:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/requires-port/-/requires-port-1.0.0.tgz#925d2601d39ac485e091cf0da5c6e694dc3dcaff"
+
+resolve@^1.1.2, resolve@^1.1.6:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.3.3.tgz#655907c3469a8680dc2de3a275a8fdd69691f0e5"
+  dependencies:
+    path-parse "^1.0.5"
+
+restore-cursor@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/restore-cursor/-/restore-cursor-1.0.1.tgz#34661f46886327fed2991479152252df92daa541"
+  dependencies:
+    exit-hook "^1.0.0"
+    onetime "^1.0.0"
+
+retry@0.6.1, retry@~0.6.0:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/retry/-/retry-0.6.1.tgz#fdc90eed943fde11b893554b8cc63d0e899ba918"
+
+retry@^0.8.0, retry@~0.8.0:
+  version "0.8.0"
+  resolved "https://registry.yarnpkg.com/retry/-/retry-0.8.0.tgz#2367628dc0edb247b1eab649dc53ac8628ac2d5f"
+
+right-align@^0.1.1:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/right-align/-/right-align-0.1.3.tgz#61339b722fe6a3515689210d24e14c96148613ef"
+  dependencies:
+    align-text "^0.1.1"
+
+rimraf@2, rimraf@^2.2.0, rimraf@^2.2.8, rimraf@^2.3.4, rimraf@^2.4.3, rimraf@^2.4.4, rimraf@^2.5.2, rimraf@^2.5.3, rimraf@^2.6.1:
+  version "2.6.1"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.1.tgz#c2338ec643df7a1b7fe5c54fa86f57428a55f33d"
+  dependencies:
+    glob "^7.0.5"
+
+rimraf@~2.2.6:
+  version "2.2.8"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.2.8.tgz#e439be2aaee327321952730f99a8929e4fc50582"
+
+rimraf@~2.4.3:
+  version "2.4.5"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.4.5.tgz#ee710ce5d93a8fdb856fb5ea8ff0e2d75934b2da"
+  dependencies:
+    glob "^6.0.1"
+
+rsvp@^3.0.14, rsvp@^3.0.16, rsvp@^3.0.17, rsvp@^3.0.18, rsvp@^3.0.21, rsvp@^3.0.6, rsvp@^3.1.0:
+  version "3.5.0"
+  resolved "https://registry.yarnpkg.com/rsvp/-/rsvp-3.5.0.tgz#a62c573a4ae4e1dfd0697ebc6242e79c681eaa34"
+
+rsvp@~3.0.6:
+  version "3.0.21"
+  resolved "https://registry.yarnpkg.com/rsvp/-/rsvp-3.0.21.tgz#49c588fe18ef293bcd0ab9f4e6756e6ac433359f"
+
+rsvp@~3.2.1:
+  version "3.2.1"
+  resolved "https://registry.yarnpkg.com/rsvp/-/rsvp-3.2.1.tgz#07cb4a5df25add9e826ebc67dcc9fd89db27d84a"
+
+run-async@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/run-async/-/run-async-0.1.0.tgz#c8ad4a5e110661e402a7d21b530e009f25f8e389"
+  dependencies:
+    once "^1.3.0"
+
+rx-lite@^3.1.2:
+  version "3.1.2"
+  resolved "https://registry.yarnpkg.com/rx-lite/-/rx-lite-3.1.2.tgz#19ce502ca572665f3b647b10939f97fd1615f102"
+
+safe-buffer@^5.0.1:
+  version "5.0.1"
+  resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.0.1.tgz#d263ca54696cd8a306b5ca6551e92de57918fbe7"
+
+sane@^1.1.1:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/sane/-/sane-1.6.0.tgz#9610c452307a135d29c1fdfe2547034180c46775"
+  dependencies:
+    anymatch "^1.3.0"
+    exec-sh "^0.2.0"
+    fb-watchman "^1.8.0"
+    minimatch "^3.0.2"
+    minimist "^1.1.1"
+    walker "~1.0.5"
+    watch "~0.10.0"
+
+sanitize-filename@^1.5.3:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/sanitize-filename/-/sanitize-filename-1.6.1.tgz#612da1c96473fa02dccda92dcd5b4ab164a6772a"
+  dependencies:
+    truncate-utf8-bytes "^1.0.0"
+
+sass-graph@^2.1.1:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/sass-graph/-/sass-graph-2.2.2.tgz#f4d6c95b546ea2a09d14176d0fc1a07ee2b48354"
+  dependencies:
+    glob "^7.0.0"
+    lodash "^4.0.0"
+    scss-tokenizer "^0.2.1"
+    yargs "^6.6.0"
+
+scss-tokenizer@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/scss-tokenizer/-/scss-tokenizer-0.2.1.tgz#07c0cc577bb7ab4d08fd900185adbf4bc844141d"
+  dependencies:
+    js-base64 "^2.1.8"
+    source-map "^0.4.2"
+
+select@^1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/select/-/select-1.1.2.tgz#0e7350acdec80b1108528786ec1d4418d11b396d"
+
+semver-diff@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/semver-diff/-/semver-diff-2.1.0.tgz#4bbb8437c8d37e4b0cf1a68fd726ec6d645d6d36"
+  dependencies:
+    semver "^5.0.3"
+
+semver-utils@^1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/semver-utils/-/semver-utils-1.1.1.tgz#27d92fec34d27cfa42707d3b40d025ae9855f2df"
+
+"semver@2 >=2.2.1 || 3.x || 4 || 5", "semver@2 || 3 || 4 || 5", "semver@2.x || 3.x || 4 || 5", "semver@4 || 5", "semver@^2.3.0 || 3.x || 4 || 5", semver@^5.0.1, semver@^5.0.3, semver@^5.1.0, semver@^5.3.0, semver@~5.3.0:
+  version "5.3.0"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-5.3.0.tgz#9b2ce5d3de02d17c6012ad326aa6b4d0cf54f94f"
+
+semver@^2.3.0:
+  version "2.3.2"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-2.3.2.tgz#b9848f25d6cf36333073ec9ef8856d42f1233e52"
+
+semver@^4.1.0, semver@^4.3.1:
+  version "4.3.6"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-4.3.6.tgz#300bc6e0e86374f7ba61068b5b1ecd57fc6532da"
+
+semver@~5.0.3:
+  version "5.0.3"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-5.0.3.tgz#77466de589cd5d3c95f138aa78bc569a3cb5d27a"
+
+send@0.15.1:
+  version "0.15.1"
+  resolved "https://registry.yarnpkg.com/send/-/send-0.15.1.tgz#8a02354c26e6f5cca700065f5f0cdeba90ec7b5f"
+  dependencies:
+    debug "2.6.1"
+    depd "~1.1.0"
+    destroy "~1.0.4"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    etag "~1.8.0"
+    fresh "0.5.0"
+    http-errors "~1.6.1"
+    mime "1.3.4"
+    ms "0.7.2"
+    on-finished "~2.3.0"
+    range-parser "~1.2.0"
+    statuses "~1.3.1"
+
+serve-static@1.12.1:
+  version "1.12.1"
+  resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.12.1.tgz#7443a965e3ced647aceb5639fa06bf4d1bbe0039"
+  dependencies:
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    parseurl "~1.3.1"
+    send "0.15.1"
+
+set-blocking@^2.0.0, set-blocking@~2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7"
+
+setprototypeof@1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.0.3.tgz#66567e37043eeb4f04d91bd658c0cbefb55b8e04"
+
+sha@~2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/sha/-/sha-2.0.1.tgz#6030822fbd2c9823949f8f72ed6411ee5cf25aae"
+  dependencies:
+    graceful-fs "^4.1.2"
+    readable-stream "^2.0.2"
+
+shebang-command@^1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-1.2.0.tgz#44aac65b695b03398968c39f363fee5deafdf1ea"
+  dependencies:
+    shebang-regex "^1.0.0"
+
+shebang-regex@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-1.0.0.tgz#da42f49740c0b42db2ca9728571cb190c98efea3"
+
+shell-quote@^1.4.2:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/shell-quote/-/shell-quote-1.6.1.tgz#f4781949cce402697127430ea3b3c5476f481767"
+  dependencies:
+    array-filter "~0.0.0"
+    array-map "~0.0.0"
+    array-reduce "~0.0.0"
+    jsonify "~0.0.0"
+
+shelljs@0.3.x:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/shelljs/-/shelljs-0.3.0.tgz#3596e6307a781544f591f37da618360f31db57b1"
+
+shellwords@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/shellwords/-/shellwords-0.1.0.tgz#66afd47b6a12932d9071cbfd98a52e785cd0ba14"
+
+sigmund@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/sigmund/-/sigmund-1.0.1.tgz#3ff21f198cad2175f9f3b781853fd94d0d19b590"
+
+signal-exit@^3.0.0:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.2.tgz#b5fdc08f1287ea1178628e415e25132b73646c6d"
+
+silent-error@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/silent-error/-/silent-error-1.0.1.tgz#71b7d503d1c6f94882b51b56be879b113cb4822c"
+  dependencies:
+    debug "^2.2.0"
+
+simple-fmt@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/simple-fmt/-/simple-fmt-0.1.0.tgz#191bf566a59e6530482cb25ab53b4a8dc85c3a6b"
+
+simple-is@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/simple-is/-/simple-is-0.2.0.tgz#2abb75aade39deb5cc815ce10e6191164850baf0"
+
+slash@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/slash/-/slash-1.0.0.tgz#c41f2f6c39fc16d1cd17ad4b5d896114ae470d55"
+
+slide@^1.1.3, slide@^1.1.5, slide@~1.1.3, slide@~1.1.6:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/slide/-/slide-1.1.6.tgz#56eb027d65b4d2dce6cb2e2d32c4d4afc9e1d707"
+
+sntp@1.x.x:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/sntp/-/sntp-1.0.9.tgz#6541184cc90aeea6c6e7b35e2659082443c66198"
+  dependencies:
+    hoek "2.x.x"
+
+socket.io-adapter@0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/socket.io-adapter/-/socket.io-adapter-0.5.0.tgz#cb6d4bb8bec81e1078b99677f9ced0046066bb8b"
+  dependencies:
+    debug "2.3.3"
+    socket.io-parser "2.3.1"
+
+socket.io-client@1.6.0:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/socket.io-client/-/socket.io-client-1.6.0.tgz#5b668f4f771304dfeed179064708386fa6717853"
+  dependencies:
+    backo2 "1.0.2"
+    component-bind "1.0.0"
+    component-emitter "1.2.1"
+    debug "2.3.3"
+    engine.io-client "1.8.0"
+    has-binary "0.1.7"
+    indexof "0.0.1"
+    object-component "0.0.3"
+    parseuri "0.0.5"
+    socket.io-parser "2.3.1"
+    to-array "0.1.4"
+
+socket.io-parser@2.3.1:
+  version "2.3.1"
+  resolved "https://registry.yarnpkg.com/socket.io-parser/-/socket.io-parser-2.3.1.tgz#dd532025103ce429697326befd64005fcfe5b4a0"
+  dependencies:
+    component-emitter "1.1.2"
+    debug "2.2.0"
+    isarray "0.0.1"
+    json3 "3.3.2"
+
+socket.io@1.6.0:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/socket.io/-/socket.io-1.6.0.tgz#3e40d932637e6bd923981b25caf7c53e83b6e2e1"
+  dependencies:
+    debug "2.3.3"
+    engine.io "1.8.0"
+    has-binary "0.1.7"
+    object-assign "4.1.0"
+    socket.io-adapter "0.5.0"
+    socket.io-client "1.6.0"
+    socket.io-parser "2.3.1"
+
+sorted-object@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/sorted-object/-/sorted-object-1.0.0.tgz#5d1f4f9c1fb2cd48965967304e212eb44cfb6d05"
+
+source-map-support@^0.2.10:
+  version "0.2.10"
+  resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.2.10.tgz#ea5a3900a1c1cb25096a0ae8cc5c2b4b10ded3dc"
+  dependencies:
+    source-map "0.1.32"
+
+source-map-url@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/source-map-url/-/source-map-url-0.3.0.tgz#7ecaf13b57bcd09da8a40c5d269db33799d4aaf9"
+
+source-map@0.1.32:
+  version "0.1.32"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.32.tgz#c8b6c167797ba4740a8ea33252162ff08591b266"
+  dependencies:
+    amdefine ">=0.0.4"
+
+source-map@^0.4.2, source-map@^0.4.4:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.4.4.tgz#eba4f5da9c0dc999de68032d8b4f76173652036b"
+  dependencies:
+    amdefine ">=0.0.4"
+
+source-map@^0.5.0, source-map@^0.5.3, source-map@~0.5.0, source-map@~0.5.1:
+  version "0.5.6"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.6.tgz#75ce38f52bf0733c5a7f0c118d81334a2bb5f412"
+
+source-map@~0.1.7:
+  version "0.1.43"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.43.tgz#c24bc146ca517c1471f5dacbe2571b2b7f9e3346"
+  dependencies:
+    amdefine ">=0.0.4"
+
+spawn-args@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/spawn-args/-/spawn-args-0.2.0.tgz#fb7d0bd1d70fd4316bd9e3dec389e65f9d6361bb"
+
+spawnback@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/spawnback/-/spawnback-1.0.0.tgz#f73662f7e54d95367eca74d6426c677dd7ea686f"
+
+spdx-correct@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-1.0.2.tgz#4b3073d933ff51f3912f03ac5519498a4150db40"
+  dependencies:
+    spdx-license-ids "^1.0.2"
+
+spdx-expression-parse@~1.0.0:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-1.0.4.tgz#9bdf2f20e1f40ed447fbe273266191fced51626c"
+
+spdx-license-ids@^1.0.0, spdx-license-ids@^1.0.2:
+  version "1.2.2"
+  resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-1.2.2.tgz#c9df7a3424594ade6bd11900d596696dc06bac57"
+
+spdx@~0.4.1:
+  version "0.4.3"
+  resolved "https://registry.yarnpkg.com/spdx/-/spdx-0.4.3.tgz#ab373c3fcf7b84ffd8fdeb0592d24ff0d14812e4"
+  dependencies:
+    spdx-license-ids "^1.0.0"
+
+sprintf-js@~1.0.2:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c"
+
+sri-toolbox@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/sri-toolbox/-/sri-toolbox-0.2.0.tgz#a7fea5c3fde55e675cf1c8c06f3ebb5c2935835e"
+
+sshpk@^1.7.0:
+  version "1.13.0"
+  resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.13.0.tgz#ff2a3e4fd04497555fed97b39a0fd82fafb3a33c"
+  dependencies:
+    asn1 "~0.2.3"
+    assert-plus "^1.0.0"
+    dashdash "^1.12.0"
+    getpass "^0.1.1"
+  optionalDependencies:
+    bcrypt-pbkdf "^1.0.0"
+    ecc-jsbn "~0.1.1"
+    jodid25519 "^1.0.0"
+    jsbn "~0.1.0"
+    tweetnacl "~0.14.0"
+
+stable@~0.1.3:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/stable/-/stable-0.1.6.tgz#910f5d2aed7b520c6e777499c1f32e139fdecb10"
+
+statuses@1, "statuses@>= 1.3.1 < 2", statuses@~1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.3.1.tgz#faf51b9eb74aaef3b3acf4ad5f61abf24cb7b93e"
+
+string-width@^1.0.1, string-width@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3"
+  dependencies:
+    code-point-at "^1.0.0"
+    is-fullwidth-code-point "^1.0.0"
+    strip-ansi "^3.0.0"
+
+string_decoder@~0.10.x:
+  version "0.10.31"
+  resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-0.10.31.tgz#62e203bc41766c6c28c9fc84301dab1c5310fa94"
+
+string_decoder@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.0.0.tgz#f06f41157b664d86069f84bdbdc9b0d8ab281667"
+  dependencies:
+    buffer-shims "~1.0.0"
+
+stringify-object@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/stringify-object/-/stringify-object-1.0.1.tgz#86d35e7dbfbce9aa45637d7ecdd7847e159db8a2"
+
+stringmap@~0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/stringmap/-/stringmap-0.2.2.tgz#556c137b258f942b8776f5b2ef582aa069d7d1b1"
+
+stringset@~0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/stringset/-/stringset-0.2.1.tgz#ef259c4e349344377fcd1c913dd2e848c9c042b5"
+
+stringstream@~0.0.4:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/stringstream/-/stringstream-0.0.5.tgz#4e484cd4de5a0bbbee18e46307710a8a81621878"
+
+strip-ansi@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-0.3.0.tgz#25f48ea22ca79187f3174a4db8759347bb126220"
+  dependencies:
+    ansi-regex "^0.2.1"
+
+strip-ansi@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-2.0.1.tgz#df62c1aa94ed2f114e1d0f21fd1d50482b79a60e"
+  dependencies:
+    ansi-regex "^1.0.0"
+
+strip-ansi@^3.0.0, strip-ansi@^3.0.1:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf"
+  dependencies:
+    ansi-regex "^2.0.0"
+
+strip-ansi@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-0.1.1.tgz#39e8a98d044d150660abe4a6808acf70bb7bc991"
+
+strip-bom@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-2.0.0.tgz#6219a85616520491f35788bdbf1447a99c7e6b0e"
+  dependencies:
+    is-utf8 "^0.2.0"
+
+strip-indent@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/strip-indent/-/strip-indent-1.0.1.tgz#0c7962a6adefa7bbd4ac366460a638552ae1a0a2"
+  dependencies:
+    get-stdin "^4.0.1"
+
+strip-json-comments@1.0.x:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-1.0.4.tgz#1e15fbcac97d3ee99bf2d73b4c656b082bbafb91"
+
+strip-json-comments@~2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a"
+
+styled_string@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/styled_string/-/styled_string-0.0.1.tgz#d22782bd81295459bc4f1df18c4bad8e94dd124a"
+
+supports-color@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-0.2.0.tgz#d92de2694eb3f67323973d7ae3d8b55b4c22190a"
+
+supports-color@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7"
+
+symlink-or-copy@^1.0.0, symlink-or-copy@^1.0.1, symlink-or-copy@^1.1.8:
+  version "1.1.8"
+  resolved "https://registry.yarnpkg.com/symlink-or-copy/-/symlink-or-copy-1.1.8.tgz#cabe61e0010c1c023c173b25ee5108b37f4b4aa3"
+
+tap-parser@^5.1.0:
+  version "5.3.3"
+  resolved "https://registry.yarnpkg.com/tap-parser/-/tap-parser-5.3.3.tgz#53ec8a90f275d6fff43f169e56a679502a741185"
+  dependencies:
+    events-to-array "^1.0.1"
+    js-yaml "^3.2.7"
+  optionalDependencies:
+    readable-stream "^2"
+
+tar-fs@^1.4.1:
+  version "1.15.2"
+  resolved "https://registry.yarnpkg.com/tar-fs/-/tar-fs-1.15.2.tgz#761f5b32932c7b39461a60d537faea0d8084830c"
+  dependencies:
+    chownr "^1.0.1"
+    mkdirp "^0.5.1"
+    pump "^1.0.0"
+    tar-stream "^1.1.2"
+
+tar-stream@^1.1.2:
+  version "1.5.2"
+  resolved "https://registry.yarnpkg.com/tar-stream/-/tar-stream-1.5.2.tgz#fbc6c6e83c1a19d4cb48c7d96171fc248effc7bf"
+  dependencies:
+    bl "^1.0.0"
+    end-of-stream "^1.0.0"
+    readable-stream "^2.0.0"
+    xtend "^4.0.0"
+
+tar@^1.0.0:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/tar/-/tar-1.0.3.tgz#15bcdab244fa4add44e4244a0176edb8aa9a2b44"
+  dependencies:
+    block-stream "*"
+    fstream "^1.0.2"
+    inherits "2"
+
+tar@^2.0.0, tar@~2.2.1:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/tar/-/tar-2.2.1.tgz#8e4d2a256c0e2185c6b18ad694aec968b83cb1d1"
+  dependencies:
+    block-stream "*"
+    fstream "^1.0.2"
+    inherits "2"
+
+temp@0.8.3:
+  version "0.8.3"
+  resolved "https://registry.yarnpkg.com/temp/-/temp-0.8.3.tgz#e0c6bc4d26b903124410e4fed81103014dfc1f59"
+  dependencies:
+    os-tmpdir "^1.0.0"
+    rimraf "~2.2.6"
+
+testem@^1.0.0-rc.4:
+  version "1.16.0"
+  resolved "https://registry.yarnpkg.com/testem/-/testem-1.16.0.tgz#3933040b5d5b5fbdb6a2b1e7032e511b54a05867"
+  dependencies:
+    backbone "^1.1.2"
+    bluebird "^3.4.6"
+    charm "^1.0.0"
+    commander "^2.6.0"
+    consolidate "^0.14.0"
+    cross-spawn "^5.1.0"
+    express "^4.10.7"
+    fireworm "^0.7.0"
+    glob "^7.0.4"
+    http-proxy "^1.13.1"
+    js-yaml "^3.2.5"
+    lodash.assignin "^4.1.0"
+    lodash.clonedeep "^4.4.1"
+    lodash.find "^4.5.1"
+    lodash.uniqby "^4.7.0"
+    mkdirp "^0.5.1"
+    mustache "^2.2.1"
+    node-notifier "^5.0.1"
+    npmlog "^4.0.0"
+    printf "^0.2.3"
+    rimraf "^2.4.4"
+    socket.io "1.6.0"
+    spawn-args "^0.2.0"
+    styled_string "0.0.1"
+    tap-parser "^5.1.0"
+    xmldom "^0.1.19"
+
+text-table@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4"
+
+"textextensions@1 || 2":
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/textextensions/-/textextensions-2.1.0.tgz#1be0dc2a0dc244d44be8a09af6a85afb93c4dbc3"
+
+throttleit@~0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/throttleit/-/throttleit-0.0.2.tgz#cfedf88e60c00dd9697b61fdd2a8343a9b680eaf"
+
+through@^2.3.6, through@~2.3.4, through@~2.3.8:
+  version "2.3.8"
+  resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5"
+
+timed-out@^3.0.0:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/timed-out/-/timed-out-3.1.3.tgz#95860bfcc5c76c277f8f8326fd0f5b2e20eba217"
+
+timers-ext@0.1:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/timers-ext/-/timers-ext-0.1.2.tgz#61cc47a76c1abd3195f14527f978d58ae94c5204"
+  dependencies:
+    es5-ext "~0.10.14"
+    next-tick "1"
+
+tiny-emitter@^1.0.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/tiny-emitter/-/tiny-emitter-1.2.0.tgz#6dc845052cb08ebefc1874723b58f24a648c3b6f"
+
+tiny-lr@0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/tiny-lr/-/tiny-lr-0.2.1.tgz#b3fdba802e5d56a33c2f6f10794b32e477ac729d"
+  dependencies:
+    body-parser "~1.14.0"
+    debug "~2.2.0"
+    faye-websocket "~0.10.0"
+    livereload-js "^2.2.0"
+    parseurl "~1.3.0"
+    qs "~5.1.0"
+
+tmp@0.0.24:
+  version "0.0.24"
+  resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.24.tgz#d6a5e198d14a9835cc6f2d7c3d9e302428c8cf12"
+
+tmp@0.0.28:
+  version "0.0.28"
+  resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.28.tgz#172735b7f614ea7af39664fa84cf0de4e515d120"
+  dependencies:
+    os-tmpdir "~1.0.1"
+
+tmpl@1.0.x:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/tmpl/-/tmpl-1.0.4.tgz#23640dd7b42d00433911140820e5cf440e521dd1"
+
+to-array@0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/to-array/-/to-array-0.1.4.tgz#17e6c11f73dd4f3d74cda7a4ff3238e9ad9bf890"
+
+to-fast-properties@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-1.0.2.tgz#f3f5c0c3ba7299a7ef99427e44633257ade43320"
+
+touch@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/touch/-/touch-0.0.3.tgz#51aef3d449571d4f287a5d87c9c8b49181a0db1d"
+  dependencies:
+    nopt "~1.0.10"
+
+tough-cookie@>=0.12.0, tough-cookie@^2.0.0, tough-cookie@~2.3.0:
+  version "2.3.2"
+  resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.3.2.tgz#f081f76e4c85720e6c37a5faced737150d84072a"
+  dependencies:
+    punycode "^1.4.1"
+
+tough-cookie@~2.2.0:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.2.2.tgz#c83a1830f4e5ef0b93ef2a3488e724f8de016ac7"
+
+"traverse@>=0.3.0 <0.4":
+  version "0.3.9"
+  resolved "https://registry.yarnpkg.com/traverse/-/traverse-0.3.9.tgz#717b8f220cc0bb7b44e40514c22b2e8bbc70d8b9"
+
+trim-newlines@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/trim-newlines/-/trim-newlines-1.0.0.tgz#5887966bb582a4503a41eb524f7d35011815a613"
+
+trim-right@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/trim-right/-/trim-right-1.0.1.tgz#cb2e1203067e0c8de1f614094b9fe45704ea6003"
+
+truncate-utf8-bytes@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/truncate-utf8-bytes/-/truncate-utf8-bytes-1.0.2.tgz#405923909592d56f78a5818434b0b78489ca5f2b"
+  dependencies:
+    utf8-byte-length "^1.0.1"
+
+try-resolve@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/try-resolve/-/try-resolve-1.0.1.tgz#cfde6fabd72d63e5797cfaab873abbe8e700e912"
+
+tryor@~0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/tryor/-/tryor-0.1.2.tgz#8145e4ca7caff40acde3ccf946e8b8bb75b4172b"
+
+tunnel-agent@^0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd"
+  dependencies:
+    safe-buffer "^5.0.1"
+
+tunnel-agent@~0.4.0, tunnel-agent@~0.4.1:
+  version "0.4.3"
+  resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.4.3.tgz#6373db76909fe570e08d73583365ed828a74eeeb"
+
+tweetnacl@^0.14.3, tweetnacl@~0.14.0:
+  version "0.14.5"
+  resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64"
+
+type-is@~1.6.10, type-is@~1.6.14:
+  version "1.6.15"
+  resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.15.tgz#cab10fb4909e441c82842eafe1ad646c81804410"
+  dependencies:
+    media-typer "0.3.0"
+    mime-types "~2.1.15"
+
+typedarray@^0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777"
+
+uc.micro@^1.0.0, uc.micro@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/uc.micro/-/uc.micro-1.0.3.tgz#7ed50d5e0f9a9fb0a573379259f2a77458d50192"
+
+uglify-js@^2.6, uglify-js@^2.7.0:
+  version "2.8.22"
+  resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-2.8.22.tgz#d54934778a8da14903fa29a326fb24c0ab51a1a0"
+  dependencies:
+    source-map "~0.5.1"
+    yargs "~3.10.0"
+  optionalDependencies:
+    uglify-to-browserify "~1.0.0"
+
+uglify-js@~2.3:
+  version "2.3.6"
+  resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-2.3.6.tgz#fa0984770b428b7a9b2a8058f46355d14fef211a"
+  dependencies:
+    async "~0.2.6"
+    optimist "~0.3.5"
+    source-map "~0.1.7"
+
+uglify-to-browserify@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/uglify-to-browserify/-/uglify-to-browserify-1.0.2.tgz#6e0924d6bda6b5afe349e39a6d632850a0f882b7"
+
+uid-number@0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/uid-number/-/uid-number-0.0.6.tgz#0ea10e8035e8eb5b8e4449f06da1c730663baa81"
+
+ultron@1.0.x:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/ultron/-/ultron-1.0.2.tgz#ace116ab557cd197386a4e88f4685378c8b2e4fa"
+
+umask@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/umask/-/umask-1.1.0.tgz#f29cebf01df517912bb58ff9c4e50fde8e33320d"
+
+underscore.string@~2.3.3:
+  version "2.3.3"
+  resolved "https://registry.yarnpkg.com/underscore.string/-/underscore.string-2.3.3.tgz#71c08bf6b428b1133f37e78fa3a21c82f7329b0d"
+
+underscore@>=1.8.3:
+  version "1.8.3"
+  resolved "https://registry.yarnpkg.com/underscore/-/underscore-1.8.3.tgz#4f3fb53b106e6097fcf9cb4109f2a5e9bdfa5022"
+
+unpipe@1.0.0, unpipe@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec"
+
+untildify@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/untildify/-/untildify-2.1.0.tgz#17eb2807987f76952e9c0485fc311d06a826a2e0"
+  dependencies:
+    os-homedir "^1.0.0"
+
+unzip-response@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/unzip-response/-/unzip-response-1.0.2.tgz#b984f0877fc0a89c2c773cc1ef7b5b232b5b06fe"
+
+update-notifier@^0.6.0:
+  version "0.6.3"
+  resolved "https://registry.yarnpkg.com/update-notifier/-/update-notifier-0.6.3.tgz#776dec8daa13e962a341e8a1d98354306b67ae08"
+  dependencies:
+    boxen "^0.3.1"
+    chalk "^1.0.0"
+    configstore "^2.0.0"
+    is-npm "^1.0.0"
+    latest-version "^2.0.0"
+    semver-diff "^2.0.0"
+
+url-parse-lax@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/url-parse-lax/-/url-parse-lax-1.0.0.tgz#7af8f303645e9bd79a272e7a14ac68bc0609da73"
+  dependencies:
+    prepend-http "^1.0.1"
+
+user-home@^1.0.0, user-home@^1.1.0, user-home@^1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/user-home/-/user-home-1.1.1.tgz#2b5be23a32b63a7c9deb8d0f28d485724a3df190"
+
+utf8-byte-length@^1.0.1:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/utf8-byte-length/-/utf8-byte-length-1.0.4.tgz#f45f150c4c66eee968186505ab93fcbb8ad6bf61"
+
+util-deprecate@~1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf"
+
+util-extend@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/util-extend/-/util-extend-1.0.3.tgz#a7c216d267545169637b3b6edc6ca9119e2ff93f"
+
+utils-merge@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.0.tgz#0294fb922bb9375153541c4f7096231f287c8af8"
+
+uuid@^2.0.1:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/uuid/-/uuid-2.0.3.tgz#67e2e863797215530dff318e5bf9dcebfd47b21a"
+
+uuid@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.0.1.tgz#6544bba2dfda8c1cf17e629a3a305e2bb1fee6c1"
+
+validate-npm-package-license@^3.0.1, validate-npm-package-license@~3.0.1:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.1.tgz#2804babe712ad3379459acfbe24746ab2c303fbc"
+  dependencies:
+    spdx-correct "~1.0.0"
+    spdx-expression-parse "~1.0.0"
+
+validate-npm-package-name@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/validate-npm-package-name/-/validate-npm-package-name-3.0.0.tgz#5fa912d81eb7d0c74afc140de7317f0ca7df437e"
+  dependencies:
+    builtins "^1.0.3"
+
+validate-npm-package-name@~2.2.2:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/validate-npm-package-name/-/validate-npm-package-name-2.2.2.tgz#f65695b22f7324442019a3c7fa39a6e7fd299085"
+  dependencies:
+    builtins "0.0.7"
+
+vary@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.1.tgz#67535ebb694c1d52257457984665323f587e8d37"
+
+verror@1.3.6:
+  version "1.3.6"
+  resolved "https://registry.yarnpkg.com/verror/-/verror-1.3.6.tgz#cff5df12946d297d2baaefaa2689e25be01c005c"
+  dependencies:
+    extsprintf "1.0.2"
+
+walk-sync@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/walk-sync/-/walk-sync-0.1.3.tgz#8a07261a00bda6cfb1be25e9f100fad57546f583"
+
+walk-sync@^0.2.5, walk-sync@^0.2.6:
+  version "0.2.7"
+  resolved "https://registry.yarnpkg.com/walk-sync/-/walk-sync-0.2.7.tgz#b49be4ee6867657aeb736978b56a29d10fa39969"
+  dependencies:
+    ensure-posix-path "^1.0.0"
+    matcher-collection "^1.0.0"
+
+walk-sync@^0.3.0, walk-sync@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/walk-sync/-/walk-sync-0.3.1.tgz#558a16aeac8c0db59c028b73c66f397684ece465"
+  dependencies:
+    ensure-posix-path "^1.0.0"
+    matcher-collection "^1.0.0"
+
+walker@~1.0.5:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/walker/-/walker-1.0.7.tgz#2f7f9b8fd10d677262b18a884e28d19618e028fb"
+  dependencies:
+    makeerror "1.0.x"
+
+watch@~0.10.0:
+  version "0.10.0"
+  resolved "https://registry.yarnpkg.com/watch/-/watch-0.10.0.tgz#77798b2da0f9910d595f1ace5b0c2258521f21dc"
+
+wcwidth@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/wcwidth/-/wcwidth-1.0.1.tgz#f0b0dcf915bc5ff1528afadb2c0e17b532da2fe8"
+  dependencies:
+    defaults "^1.0.3"
+
+websocket-driver@>=0.5.1:
+  version "0.6.5"
+  resolved "https://registry.yarnpkg.com/websocket-driver/-/websocket-driver-0.6.5.tgz#5cb2556ceb85f4373c6d8238aa691c8454e13a36"
+  dependencies:
+    websocket-extensions ">=0.1.1"
+
+websocket-extensions@>=0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/websocket-extensions/-/websocket-extensions-0.1.1.tgz#76899499c184b6ef754377c2dbb0cd6cb55d29e7"
+
+which-module@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/which-module/-/which-module-1.0.0.tgz#bba63ca861948994ff307736089e3b96026c2a4f"
+
+which@1, which@^1.0.8, which@^1.2.12, which@^1.2.9, which@~1.2.0:
+  version "1.2.14"
+  resolved "https://registry.yarnpkg.com/which/-/which-1.2.14.tgz#9a87c4378f03e827cecaf1acdf56c736c01c14e5"
+  dependencies:
+    isexe "^2.0.0"
+
+wide-align@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.0.tgz#40edde802a71fea1f070da3e62dcda2e7add96ad"
+  dependencies:
+    string-width "^1.0.1"
+
+widest-line@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/widest-line/-/widest-line-1.0.0.tgz#0c09c85c2a94683d0d7eaf8ee097d564bf0e105c"
+  dependencies:
+    string-width "^1.0.1"
+
+win-release@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/win-release/-/win-release-1.1.1.tgz#5fa55e02be7ca934edfc12665632e849b72e5209"
+  dependencies:
+    semver "^5.0.1"
+
+window-size@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.0.tgz#5438cd2ea93b202efa3a19fe8887aee7c94f9c9d"
+
+window-size@^0.1.2:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.4.tgz#f8e1aa1ee5a53ec5bf151ffa09742a6ad7697876"
+
+wordwrap@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.2.tgz#b79669bb42ecb409f83d583cad52ca17eaa1643f"
+
+wordwrap@~0.0.2:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.3.tgz#a3d5da6cd5c0bc0008d37234bbaf1bed63059107"
+
+wrap-ansi@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-2.1.0.tgz#d8fc3d284dd05794fe84973caecdd1cf824fdd85"
+  dependencies:
+    string-width "^1.0.1"
+    strip-ansi "^3.0.1"
+
+wrappy@1, wrappy@~1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f"
+
+write-file-atomic@^1.1.2:
+  version "1.3.4"
+  resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-1.3.4.tgz#f807a4f0b1d9e913ae7a48112e6cc3af1991b45f"
+  dependencies:
+    graceful-fs "^4.1.11"
+    imurmurhash "^0.1.4"
+    slide "^1.1.5"
+
+write-file-atomic@~1.1.3:
+  version "1.1.4"
+  resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-1.1.4.tgz#b1f52dc2e8dc0e3cb04d187a25f758a38a90ca3b"
+  dependencies:
+    graceful-fs "^4.1.2"
+    imurmurhash "^0.1.4"
+    slide "^1.1.5"
+
+ws@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ws/-/ws-1.1.1.tgz#082ddb6c641e85d4bb451f03d52f06eabdb1f018"
+  dependencies:
+    options ">=0.0.5"
+    ultron "1.0.x"
+
+wtf-8@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/wtf-8/-/wtf-8-1.0.0.tgz#392d8ba2d0f1c34d1ee2d630f15d0efb68e1048a"
+
+xdg-basedir@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-1.0.1.tgz#14ff8f63a4fdbcb05d5b6eea22b36f3033b9f04e"
+  dependencies:
+    user-home "^1.0.0"
+
+xdg-basedir@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-2.0.0.tgz#edbc903cc385fc04523d966a335504b5504d1bd2"
+  dependencies:
+    os-homedir "^1.0.0"
+
+xmldom@^0.1.19:
+  version "0.1.27"
+  resolved "https://registry.yarnpkg.com/xmldom/-/xmldom-0.1.27.tgz#d501f97b3bdb403af8ef9ecc20573187aadac0e9"
+
+xmlhttprequest-ssl@1.5.3:
+  version "1.5.3"
+  resolved "https://registry.yarnpkg.com/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.5.3.tgz#185a888c04eca46c3e4070d99f7b49de3528992d"
+
+xtend@^4.0.0:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.1.tgz#a5c6d532be656e23db820efb943a1f04998d63af"
+
+y18n@^3.2.0, y18n@^3.2.1:
+  version "3.2.1"
+  resolved "https://registry.yarnpkg.com/y18n/-/y18n-3.2.1.tgz#6d15fba884c08679c0d77e88e7759e811e07fa41"
+
+yallist@^2.0.0:
+  version "2.1.2"
+  resolved "https://registry.yarnpkg.com/yallist/-/yallist-2.1.2.tgz#1c11f9218f076089a47dd512f93c6699a6a81d52"
+
+yam@0.0.18:
+  version "0.0.18"
+  resolved "https://registry.yarnpkg.com/yam/-/yam-0.0.18.tgz#e5cab771f0fc80ca599814cb9c269cb8bff00e2c"
+  dependencies:
+    findup "^0.1.5"
+    fs-extra "^0.16.3"
+    lodash.merge "^3.0.2"
+
+yargs-parser@^4.2.0:
+  version "4.2.1"
+  resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-4.2.1.tgz#29cceac0dc4f03c6c87b4a9f217dd18c9f74871c"
+  dependencies:
+    camelcase "^3.0.0"
+
+yargs@^6.6.0:
+  version "6.6.0"
+  resolved "https://registry.yarnpkg.com/yargs/-/yargs-6.6.0.tgz#782ec21ef403345f830a808ca3d513af56065208"
+  dependencies:
+    camelcase "^3.0.0"
+    cliui "^3.2.0"
+    decamelize "^1.1.1"
+    get-caller-file "^1.0.1"
+    os-locale "^1.4.0"
+    read-pkg-up "^1.0.1"
+    require-directory "^2.1.1"
+    require-main-filename "^1.0.1"
+    set-blocking "^2.0.0"
+    string-width "^1.0.2"
+    which-module "^1.0.0"
+    y18n "^3.2.1"
+    yargs-parser "^4.2.0"
+
+yargs@~3.10.0:
+  version "3.10.0"
+  resolved "https://registry.yarnpkg.com/yargs/-/yargs-3.10.0.tgz#f7ee7bd857dd7c1d2d38c0e74efbd681d1431fd1"
+  dependencies:
+    camelcase "^1.0.2"
+    cliui "^2.1.0"
+    decamelize "^1.0.0"
+    window-size "0.1.0"
+
+yargs@~3.27.0:
+  version "3.27.0"
+  resolved "https://registry.yarnpkg.com/yargs/-/yargs-3.27.0.tgz#21205469316e939131d59f2da0c6d7f98221ea40"
+  dependencies:
+    camelcase "^1.2.1"
+    cliui "^2.1.0"
+    decamelize "^1.0.0"
+    os-locale "^1.4.0"
+    window-size "^0.1.2"
+    y18n "^3.2.0"
+
+yeast@0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/yeast/-/yeast-0.1.2.tgz#008e06d8094320c372dbc2f8ed76a0ca6c8ac419"
diff --git a/contrib/views/hawq/pom.xml b/contrib/views/hawq/pom.xml
index 8089d68..0c6591e 100644
--- a/contrib/views/hawq/pom.xml
+++ b/contrib/views/hawq/pom.xml
@@ -133,29 +133,35 @@
                 <!-- Building frontend -->
                 <groupId>com.github.eirslett</groupId>
                 <artifactId>frontend-maven-plugin</artifactId>
-                <version>1.3</version>
+                <version>1.4</version>
                 <configuration>
                     <nodeVersion>v4.5.0</nodeVersion>
-                    <npmVersion>2.15.0</npmVersion>
+                    <yarnVersion>v0.23.2</yarnVersion>
                     <workingDirectory>${ui.dir}</workingDirectory>
                     <npmInheritsProxyConfigFromMaven>false</npmInheritsProxyConfigFromMaven>
+                    <!-- setting npm_config_tmp environment variable is a workaround for 
+                       https://github.com/Medium/phantomjs/issues/673 -->
+                    <environmentVariables>
+                      <npm_config_tmp>/tmp/npm_config_tmp</npm_config_tmp>
+                    </environmentVariables>
                 </configuration>
                 <executions>
                     <execution>
-                        <id>install node and npm</id>
+                        <id>install node and yarn</id>
                         <phase>generate-sources</phase>
                         <goals>
-                            <goal>install-node-and-npm</goal>
+                            <goal>install-node-and-yarn</goal>
                         </goals>
                     </execution>
                     <execution>
-                        <id>npm install</id>
+                        <id>yarn install --pure-lockfile</id>
                         <phase>generate-sources</phase>
                         <goals>
-                            <goal>npm</goal>
+                            <goal>yarn</goal>
                         </goals>
                         <configuration>
                             <arguments>install --unsafe-perm</arguments>
+                            <arguments>--ignore-engines</arguments>
                         </configuration>
                     </execution>
                 </executions>
@@ -262,4 +268,4 @@
         </resources>
 
     </build>
-</project>
\ No newline at end of file
+</project>
diff --git a/contrib/views/hawq/src/main/resources/ui/yarn.lock b/contrib/views/hawq/src/main/resources/ui/yarn.lock
new file mode 100644
index 0000000..108056b
--- /dev/null
+++ b/contrib/views/hawq/src/main/resources/ui/yarn.lock
@@ -0,0 +1,6665 @@
+# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
+# yarn lockfile v1
+
+
+abbrev@1, abbrev@^1.0.5:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.0.tgz#d0554c2256636e2f56e7c2e5ad183f859428d81f"
+
+abbrev@~1.0.7:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.0.9.tgz#91b4792588a7738c25f35dd6f63752a2f8776135"
+
+accepts@1.3.3, accepts@~1.3.3:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.3.tgz#c3ca7434938648c3e0d9c1e328dd68b622c284ca"
+  dependencies:
+    mime-types "~2.1.11"
+    negotiator "0.6.1"
+
+acorn@^4.0.3:
+  version "4.0.11"
+  resolved "https://registry.yarnpkg.com/acorn/-/acorn-4.0.11.tgz#edcda3bd937e7556410d42ed5860f67399c794c0"
+
+after@0.8.1:
+  version "0.8.1"
+  resolved "https://registry.yarnpkg.com/after/-/after-0.8.1.tgz#ab5d4fb883f596816d3515f8f791c0af486dd627"
+
+ajv@^4.9.1:
+  version "4.11.8"
+  resolved "https://registry.yarnpkg.com/ajv/-/ajv-4.11.8.tgz#82ffb02b29e662ae53bdc20af15947706739c536"
+  dependencies:
+    co "^4.6.0"
+    json-stable-stringify "^1.0.1"
+
+align-text@^0.1.1, align-text@^0.1.3:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/align-text/-/align-text-0.1.4.tgz#0cd90a561093f35d0a99256c22b7069433fad117"
+  dependencies:
+    kind-of "^3.0.2"
+    longest "^1.0.1"
+    repeat-string "^1.5.2"
+
+alter@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/alter/-/alter-0.2.0.tgz#c7588808617572034aae62480af26b1d4d1cb3cd"
+  dependencies:
+    stable "~0.1.3"
+
+amd-name-resolver@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/amd-name-resolver/-/amd-name-resolver-0.0.5.tgz#76962dac876ed3311b05d29c6a58c14e1ef3304b"
+  dependencies:
+    ensure-posix-path "^1.0.1"
+
+amd-name-resolver@0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/amd-name-resolver/-/amd-name-resolver-0.0.6.tgz#d3e4ba2dfcaab1d820c1be9de947c67828cfe595"
+  dependencies:
+    ensure-posix-path "^1.0.1"
+
+amdefine@>=0.0.4:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5"
+
+ansi-escapes@^1.1.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-1.4.0.tgz#d3a8a83b319aa67793662b13e761c7911422306e"
+
+ansi-regex@^0.2.0, ansi-regex@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-0.2.1.tgz#0d8e946967a3d8143f93e24e298525fc1b2235f9"
+
+ansi-regex@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-1.1.1.tgz#41c847194646375e6a1a5d10c3ca054ef9fc980d"
+
+ansi-regex@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df"
+
+ansi-styles@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-1.1.0.tgz#eaecbf66cd706882760b2f4691582b8f55d7a7de"
+
+ansi-styles@^2.1.0, ansi-styles@^2.2.1:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe"
+
+ansi-styles@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-1.0.0.tgz#cb102df1c56f5123eab8b67cd7b98027a0279178"
+
+ansi@^0.3.0, ansi@~0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/ansi/-/ansi-0.3.1.tgz#0c42d4fb17160d5a9af1e484bace1c66922c1b21"
+
+ansicolors@~0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/ansicolors/-/ansicolors-0.2.1.tgz#be089599097b74a5c9c4a84a0cdbcdb62bd87aef"
+
+ansicolors@~0.3.2:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/ansicolors/-/ansicolors-0.3.2.tgz#665597de86a9ffe3aa9bfbe6cae5c6ea426b4979"
+
+ansistyles@~0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/ansistyles/-/ansistyles-0.1.3.tgz#5de60415bda071bb37127854c864f41b23254539"
+
+anymatch@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-1.3.0.tgz#a3e52fa39168c825ff57b0248126ce5a8ff95507"
+  dependencies:
+    arrify "^1.0.0"
+    micromatch "^2.1.5"
+
+aproba@^1.0.3:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.1.1.tgz#95d3600f07710aa0e9298c726ad5ecf2eacbabab"
+
+archy@1.0.0, archy@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/archy/-/archy-1.0.0.tgz#f9c8c13757cc1dd7bc379ac77b2c62a5c2868c40"
+
+are-we-there-yet@~1.1.2:
+  version "1.1.4"
+  resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.4.tgz#bb5dca382bb94f05e15194373d16fd3ba1ca110d"
+  dependencies:
+    delegates "^1.0.0"
+    readable-stream "^2.0.6"
+
+argparse@^1.0.7, argparse@~1.0.2:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.9.tgz#73d83bc263f86e97f8cc4f6bae1b0e90a7d22c86"
+  dependencies:
+    sprintf-js "~1.0.2"
+
+arr-diff@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-2.0.0.tgz#8f3b827f955a8bd669697e4a4256ac3ceae356cf"
+  dependencies:
+    arr-flatten "^1.0.1"
+
+arr-flatten@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.0.3.tgz#a274ed85ac08849b6bd7847c4580745dc51adfb1"
+
+array-equal@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/array-equal/-/array-equal-1.0.0.tgz#8c2a5ef2472fd9ea742b04c77a75093ba2757c93"
+
+array-filter@~0.0.0:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/array-filter/-/array-filter-0.0.1.tgz#7da8cf2e26628ed732803581fd21f67cacd2eeec"
+
+array-find-index@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/array-find-index/-/array-find-index-1.0.2.tgz#df010aa1287e164bbda6f9723b0a96a1ec4187a1"
+
+array-flatten@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2"
+
+array-index@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/array-index/-/array-index-1.0.0.tgz#ec56a749ee103e4e08c790b9c353df16055b97f9"
+  dependencies:
+    debug "^2.2.0"
+    es6-symbol "^3.0.2"
+
+array-map@~0.0.0:
+  version "0.0.0"
+  resolved "https://registry.yarnpkg.com/array-map/-/array-map-0.0.0.tgz#88a2bab73d1cf7bcd5c1b118a003f66f665fa662"
+
+array-reduce@~0.0.0:
+  version "0.0.0"
+  resolved "https://registry.yarnpkg.com/array-reduce/-/array-reduce-0.0.0.tgz#173899d3ffd1c7d9383e4479525dbe278cab5f2b"
+
+array-to-error@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/array-to-error/-/array-to-error-1.1.1.tgz#d68812926d14097a205579a667eeaf1856a44c07"
+  dependencies:
+    array-to-sentence "^1.1.0"
+
+array-to-sentence@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/array-to-sentence/-/array-to-sentence-1.1.0.tgz#c804956dafa53232495b205a9452753a258d39fc"
+
+array-unique@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.2.1.tgz#a1d97ccafcbc2625cc70fadceb36a50c58b01a53"
+
+arraybuffer.slice@0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/arraybuffer.slice/-/arraybuffer.slice-0.0.6.tgz#f33b2159f0532a3f3107a272c0ccfbd1ad2979ca"
+
+arrify@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d"
+
+asap@^2.0.0:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/asap/-/asap-2.0.5.tgz#522765b50c3510490e52d7dcfe085ef9ba96958f"
+
+asn1@0.1.11:
+  version "0.1.11"
+  resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.1.11.tgz#559be18376d08a4ec4dbe80877d27818639b2df7"
+
+asn1@~0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.3.tgz#dac8787713c9966849fc8180777ebe9c1ddf3b86"
+
+assert-plus@1.0.0, assert-plus@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525"
+
+assert-plus@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.1.5.tgz#ee74009413002d84cec7219c6ac811812e723160"
+
+assert-plus@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.2.0.tgz#d74e1b87e7affc0db8aadb7021f3fe48101ab234"
+
+ast-traverse@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/ast-traverse/-/ast-traverse-0.1.1.tgz#69cf2b8386f19dcda1bb1e05d68fe359d8897de6"
+
+ast-types@0.8.12:
+  version "0.8.12"
+  resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.8.12.tgz#a0d90e4351bb887716c83fd637ebf818af4adfcc"
+
+ast-types@0.9.6:
+  version "0.9.6"
+  resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.9.6.tgz#102c9e9e9005d3e7e3829bf0c4fa24ee862ee9b9"
+
+async-disk-cache@^1.2.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/async-disk-cache/-/async-disk-cache-1.3.1.tgz#3394010d9448b16205b01e0e2e704180805413d3"
+  dependencies:
+    debug "^2.1.3"
+    heimdalljs "^0.2.3"
+    istextorbinary "2.1.0"
+    mkdirp "^0.5.0"
+    rimraf "^2.5.3"
+    rsvp "^3.0.18"
+
+async-foreach@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/async-foreach/-/async-foreach-0.1.3.tgz#36121f845c0578172de419a97dbeb1d16ec34542"
+
+async-some@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/async-some/-/async-some-1.0.2.tgz#4d8a81620d5958791b5b98f802d3207776e95509"
+  dependencies:
+    dezalgo "^1.0.2"
+
+async@^0.2.8, async@~0.2.6, async@~0.2.9:
+  version "0.2.10"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.2.10.tgz#b6bbe0b0674b9d719708ca38de8c237cb526c3d1"
+
+async@^1.4.0, async@^1.4.2, async@^1.5.2:
+  version "1.5.2"
+  resolved "https://registry.yarnpkg.com/async/-/async-1.5.2.tgz#ec6a61ae56480c0c3cb241c95618e20892f9672a"
+
+async@^2.0.1:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/async/-/async-2.3.0.tgz#1013d1051047dd320fe24e494d5c66ecaf6147d9"
+  dependencies:
+    lodash "^4.14.0"
+
+async@~0.8.0:
+  version "0.8.0"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.8.0.tgz#ee65ec77298c2ff1456bc4418a052d0f06435112"
+
+async@~0.9.0:
+  version "0.9.2"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.9.2.tgz#aea74d5e61c1f899613bf64bda66d4c78f2fd17d"
+
+asynckit@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79"
+
+aws-sign2@~0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.5.0.tgz#c57103f7a17fc037f02d7c2e64b602ea223f7d63"
+
+aws-sign2@~0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.6.0.tgz#14342dd38dbcc94d0e5b87d763cd63612c0e794f"
+
+aws4@^1.2.1:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.6.0.tgz#83ef5ca860b2b32e4a0deedee8c771b9db57471e"
+
+babel-code-frame@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-code-frame/-/babel-code-frame-6.22.0.tgz#027620bee567a88c32561574e7fd0801d33118e4"
+  dependencies:
+    chalk "^1.1.0"
+    esutils "^2.0.2"
+    js-tokens "^3.0.0"
+
+babel-core@^5.0.0:
+  version "5.8.38"
+  resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-5.8.38.tgz#1fcaee79d7e61b750b00b8e54f6dfc9d0af86558"
+  dependencies:
+    babel-plugin-constant-folding "^1.0.1"
+    babel-plugin-dead-code-elimination "^1.0.2"
+    babel-plugin-eval "^1.0.1"
+    babel-plugin-inline-environment-variables "^1.0.1"
+    babel-plugin-jscript "^1.0.4"
+    babel-plugin-member-expression-literals "^1.0.1"
+    babel-plugin-property-literals "^1.0.1"
+    babel-plugin-proto-to-assign "^1.0.3"
+    babel-plugin-react-constant-elements "^1.0.3"
+    babel-plugin-react-display-name "^1.0.3"
+    babel-plugin-remove-console "^1.0.1"
+    babel-plugin-remove-debugger "^1.0.1"
+    babel-plugin-runtime "^1.0.7"
+    babel-plugin-undeclared-variables-check "^1.0.2"
+    babel-plugin-undefined-to-void "^1.1.6"
+    babylon "^5.8.38"
+    bluebird "^2.9.33"
+    chalk "^1.0.0"
+    convert-source-map "^1.1.0"
+    core-js "^1.0.0"
+    debug "^2.1.1"
+    detect-indent "^3.0.0"
+    esutils "^2.0.0"
+    fs-readdir-recursive "^0.1.0"
+    globals "^6.4.0"
+    home-or-tmp "^1.0.0"
+    is-integer "^1.0.4"
+    js-tokens "1.0.1"
+    json5 "^0.4.0"
+    lodash "^3.10.0"
+    minimatch "^2.0.3"
+    output-file-sync "^1.1.0"
+    path-exists "^1.0.0"
+    path-is-absolute "^1.0.0"
+    private "^0.1.6"
+    regenerator "0.8.40"
+    regexpu "^1.3.0"
+    repeating "^1.1.2"
+    resolve "^1.1.6"
+    shebang-regex "^1.0.0"
+    slash "^1.0.0"
+    source-map "^0.5.0"
+    source-map-support "^0.2.10"
+    to-fast-properties "^1.0.0"
+    trim-right "^1.0.0"
+    try-resolve "^1.0.0"
+
+babel-core@^6.14.0, babel-core@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-6.24.1.tgz#8c428564dce1e1f41fb337ec34f4c3b022b5ad83"
+  dependencies:
+    babel-code-frame "^6.22.0"
+    babel-generator "^6.24.1"
+    babel-helpers "^6.24.1"
+    babel-messages "^6.23.0"
+    babel-register "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+    babylon "^6.11.0"
+    convert-source-map "^1.1.0"
+    debug "^2.1.1"
+    json5 "^0.5.0"
+    lodash "^4.2.0"
+    minimatch "^3.0.2"
+    path-is-absolute "^1.0.0"
+    private "^0.1.6"
+    slash "^1.0.0"
+    source-map "^0.5.0"
+
+babel-generator@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-generator/-/babel-generator-6.24.1.tgz#e715f486c58ded25649d888944d52aa07c5d9497"
+  dependencies:
+    babel-messages "^6.23.0"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+    detect-indent "^4.0.0"
+    jsesc "^1.3.0"
+    lodash "^4.2.0"
+    source-map "^0.5.0"
+    trim-right "^1.0.1"
+
+babel-helper-builder-binary-assignment-operator-visitor@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-builder-binary-assignment-operator-visitor/-/babel-helper-builder-binary-assignment-operator-visitor-6.24.1.tgz#cce4517ada356f4220bcae8a02c2b346f9a56664"
+  dependencies:
+    babel-helper-explode-assignable-expression "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-helper-call-delegate@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-call-delegate/-/babel-helper-call-delegate-6.24.1.tgz#ece6aacddc76e41c3461f88bfc575bd0daa2df8d"
+  dependencies:
+    babel-helper-hoist-variables "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-helper-define-map@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-define-map/-/babel-helper-define-map-6.24.1.tgz#7a9747f258d8947d32d515f6aa1c7bd02204a080"
+  dependencies:
+    babel-helper-function-name "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+    lodash "^4.2.0"
+
+babel-helper-explode-assignable-expression@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-explode-assignable-expression/-/babel-helper-explode-assignable-expression-6.24.1.tgz#f25b82cf7dc10433c55f70592d5746400ac22caa"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-helper-function-name@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-function-name/-/babel-helper-function-name-6.24.1.tgz#d3475b8c03ed98242a25b48351ab18399d3580a9"
+  dependencies:
+    babel-helper-get-function-arity "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-helper-get-function-arity@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-get-function-arity/-/babel-helper-get-function-arity-6.24.1.tgz#8f7782aa93407c41d3aa50908f89b031b1b6853d"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-helper-hoist-variables@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-hoist-variables/-/babel-helper-hoist-variables-6.24.1.tgz#1ecb27689c9d25513eadbc9914a73f5408be7a76"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-helper-optimise-call-expression@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-optimise-call-expression/-/babel-helper-optimise-call-expression-6.24.1.tgz#f7a13427ba9f73f8f4fa993c54a97882d1244257"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-helper-regex@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-regex/-/babel-helper-regex-6.24.1.tgz#d36e22fab1008d79d88648e32116868128456ce8"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+    lodash "^4.2.0"
+
+babel-helper-remap-async-to-generator@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-remap-async-to-generator/-/babel-helper-remap-async-to-generator-6.24.1.tgz#5ec581827ad723fecdd381f1c928390676e4551b"
+  dependencies:
+    babel-helper-function-name "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-helper-replace-supers@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-replace-supers/-/babel-helper-replace-supers-6.24.1.tgz#bf6dbfe43938d17369a213ca8a8bf74b6a90ab1a"
+  dependencies:
+    babel-helper-optimise-call-expression "^6.24.1"
+    babel-messages "^6.23.0"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-helpers@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helpers/-/babel-helpers-6.24.1.tgz#3471de9caec388e5c850e597e58a26ddf37602b2"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+
+babel-messages@^6.23.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-messages/-/babel-messages-6.23.0.tgz#f3cdf4703858035b2a2951c6ec5edf6c62f2630e"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-check-es2015-constants@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-check-es2015-constants/-/babel-plugin-check-es2015-constants-6.22.0.tgz#35157b101426fd2ffd3da3f75c7d1e91835bbf8a"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-constant-folding@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-constant-folding/-/babel-plugin-constant-folding-1.0.1.tgz#8361d364c98e449c3692bdba51eff0844290aa8e"
+
+babel-plugin-dead-code-elimination@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/babel-plugin-dead-code-elimination/-/babel-plugin-dead-code-elimination-1.0.2.tgz#5f7c451274dcd7cccdbfbb3e0b85dd28121f0f65"
+
+babel-plugin-debug-macros@^0.1.6:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/babel-plugin-debug-macros/-/babel-plugin-debug-macros-0.1.7.tgz#69f5a3dc7d72f781354f18c611a3b007bb223511"
+  dependencies:
+    semver "^5.3.0"
+
+babel-plugin-eval@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-eval/-/babel-plugin-eval-1.0.1.tgz#a2faed25ce6be69ade4bfec263f70169195950da"
+
+babel-plugin-feature-flags@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-feature-flags/-/babel-plugin-feature-flags-0.3.1.tgz#9c827cf9a4eb9a19f725ccb239e85cab02036fc1"
+
+babel-plugin-filter-imports@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-filter-imports/-/babel-plugin-filter-imports-0.3.1.tgz#e7859b56886b175dd2616425d277b219e209ea8b"
+
+babel-plugin-htmlbars-inline-precompile@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-htmlbars-inline-precompile/-/babel-plugin-htmlbars-inline-precompile-0.1.0.tgz#b784723bd1f108796b56faf9f1c05eb5ca442983"
+
+babel-plugin-inline-environment-variables@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-inline-environment-variables/-/babel-plugin-inline-environment-variables-1.0.1.tgz#1f58ce91207ad6a826a8bf645fafe68ff5fe3ffe"
+
+babel-plugin-jscript@^1.0.4:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/babel-plugin-jscript/-/babel-plugin-jscript-1.0.4.tgz#8f342c38276e87a47d5fa0a8bd3d5eb6ccad8fcc"
+
+babel-plugin-member-expression-literals@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-member-expression-literals/-/babel-plugin-member-expression-literals-1.0.1.tgz#cc5edb0faa8dc927170e74d6d1c02440021624d3"
+
+babel-plugin-property-literals@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-property-literals/-/babel-plugin-property-literals-1.0.1.tgz#0252301900192980b1c118efea48ce93aab83336"
+
+babel-plugin-proto-to-assign@^1.0.3:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/babel-plugin-proto-to-assign/-/babel-plugin-proto-to-assign-1.0.4.tgz#c49e7afd02f577bc4da05ea2df002250cf7cd123"
+  dependencies:
+    lodash "^3.9.3"
+
+babel-plugin-react-constant-elements@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/babel-plugin-react-constant-elements/-/babel-plugin-react-constant-elements-1.0.3.tgz#946736e8378429cbc349dcff62f51c143b34e35a"
+
+babel-plugin-react-display-name@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/babel-plugin-react-display-name/-/babel-plugin-react-display-name-1.0.3.tgz#754fe38926e8424a4e7b15ab6ea6139dee0514fc"
+
+babel-plugin-remove-console@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-remove-console/-/babel-plugin-remove-console-1.0.1.tgz#d8f24556c3a05005d42aaaafd27787f53ff013a7"
+
+babel-plugin-remove-debugger@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-remove-debugger/-/babel-plugin-remove-debugger-1.0.1.tgz#fd2ea3cd61a428ad1f3b9c89882ff4293e8c14c7"
+
+babel-plugin-runtime@^1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/babel-plugin-runtime/-/babel-plugin-runtime-1.0.7.tgz#bf7c7d966dd56ecd5c17fa1cb253c9acb7e54aaf"
+
+babel-plugin-syntax-async-functions@^6.8.0:
+  version "6.13.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-syntax-async-functions/-/babel-plugin-syntax-async-functions-6.13.0.tgz#cad9cad1191b5ad634bf30ae0872391e0647be95"
+
+babel-plugin-syntax-exponentiation-operator@^6.8.0:
+  version "6.13.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-syntax-exponentiation-operator/-/babel-plugin-syntax-exponentiation-operator-6.13.0.tgz#9ee7e8337290da95288201a6a57f4170317830de"
+
+babel-plugin-syntax-trailing-function-commas@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-syntax-trailing-function-commas/-/babel-plugin-syntax-trailing-function-commas-6.22.0.tgz#ba0360937f8d06e40180a43fe0d5616fff532cf3"
+
+babel-plugin-transform-async-to-generator@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-async-to-generator/-/babel-plugin-transform-async-to-generator-6.24.1.tgz#6536e378aff6cb1d5517ac0e40eb3e9fc8d08761"
+  dependencies:
+    babel-helper-remap-async-to-generator "^6.24.1"
+    babel-plugin-syntax-async-functions "^6.8.0"
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-arrow-functions@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-arrow-functions/-/babel-plugin-transform-es2015-arrow-functions-6.22.0.tgz#452692cb711d5f79dc7f85e440ce41b9f244d221"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-block-scoped-functions@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-block-scoped-functions/-/babel-plugin-transform-es2015-block-scoped-functions-6.22.0.tgz#bbc51b49f964d70cb8d8e0b94e820246ce3a6141"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-block-scoping@^6.23.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-block-scoping/-/babel-plugin-transform-es2015-block-scoping-6.24.1.tgz#76c295dc3a4741b1665adfd3167215dcff32a576"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+    lodash "^4.2.0"
+
+babel-plugin-transform-es2015-classes@^6.23.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-classes/-/babel-plugin-transform-es2015-classes-6.24.1.tgz#5a4c58a50c9c9461e564b4b2a3bfabc97a2584db"
+  dependencies:
+    babel-helper-define-map "^6.24.1"
+    babel-helper-function-name "^6.24.1"
+    babel-helper-optimise-call-expression "^6.24.1"
+    babel-helper-replace-supers "^6.24.1"
+    babel-messages "^6.23.0"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-computed-properties@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-computed-properties/-/babel-plugin-transform-es2015-computed-properties-6.24.1.tgz#6fe2a8d16895d5634f4cd999b6d3480a308159b3"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+
+babel-plugin-transform-es2015-destructuring@^6.23.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-destructuring/-/babel-plugin-transform-es2015-destructuring-6.23.0.tgz#997bb1f1ab967f682d2b0876fe358d60e765c56d"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-duplicate-keys@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-duplicate-keys/-/babel-plugin-transform-es2015-duplicate-keys-6.24.1.tgz#73eb3d310ca969e3ef9ec91c53741a6f1576423e"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-for-of@^6.23.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-for-of/-/babel-plugin-transform-es2015-for-of-6.23.0.tgz#f47c95b2b613df1d3ecc2fdb7573623c75248691"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-function-name@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-function-name/-/babel-plugin-transform-es2015-function-name-6.24.1.tgz#834c89853bc36b1af0f3a4c5dbaa94fd8eacaa8b"
+  dependencies:
+    babel-helper-function-name "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-literals@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-literals/-/babel-plugin-transform-es2015-literals-6.22.0.tgz#4f54a02d6cd66cf915280019a31d31925377ca2e"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-modules-amd@^6.22.0, babel-plugin-transform-es2015-modules-amd@^6.24.0, babel-plugin-transform-es2015-modules-amd@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-amd/-/babel-plugin-transform-es2015-modules-amd-6.24.1.tgz#3b3e54017239842d6d19c3011c4bd2f00a00d154"
+  dependencies:
+    babel-plugin-transform-es2015-modules-commonjs "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+
+babel-plugin-transform-es2015-modules-commonjs@^6.23.0, babel-plugin-transform-es2015-modules-commonjs@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-commonjs/-/babel-plugin-transform-es2015-modules-commonjs-6.24.1.tgz#d3e310b40ef664a36622200097c6d440298f2bfe"
+  dependencies:
+    babel-plugin-transform-strict-mode "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-modules-systemjs@^6.23.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-systemjs/-/babel-plugin-transform-es2015-modules-systemjs-6.24.1.tgz#ff89a142b9119a906195f5f106ecf305d9407d23"
+  dependencies:
+    babel-helper-hoist-variables "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+
+babel-plugin-transform-es2015-modules-umd@^6.23.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-umd/-/babel-plugin-transform-es2015-modules-umd-6.24.1.tgz#ac997e6285cd18ed6176adb607d602344ad38468"
+  dependencies:
+    babel-plugin-transform-es2015-modules-amd "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+
+babel-plugin-transform-es2015-object-super@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-object-super/-/babel-plugin-transform-es2015-object-super-6.24.1.tgz#24cef69ae21cb83a7f8603dad021f572eb278f8d"
+  dependencies:
+    babel-helper-replace-supers "^6.24.1"
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-parameters@^6.23.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-parameters/-/babel-plugin-transform-es2015-parameters-6.24.1.tgz#57ac351ab49caf14a97cd13b09f66fdf0a625f2b"
+  dependencies:
+    babel-helper-call-delegate "^6.24.1"
+    babel-helper-get-function-arity "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-shorthand-properties@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-shorthand-properties/-/babel-plugin-transform-es2015-shorthand-properties-6.24.1.tgz#24f875d6721c87661bbd99a4622e51f14de38aa0"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-spread@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-spread/-/babel-plugin-transform-es2015-spread-6.22.0.tgz#d6d68a99f89aedc4536c81a542e8dd9f1746f8d1"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-sticky-regex@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-sticky-regex/-/babel-plugin-transform-es2015-sticky-regex-6.24.1.tgz#00c1cdb1aca71112cdf0cf6126c2ed6b457ccdbc"
+  dependencies:
+    babel-helper-regex "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-template-literals@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-template-literals/-/babel-plugin-transform-es2015-template-literals-6.22.0.tgz#a84b3450f7e9f8f1f6839d6d687da84bb1236d8d"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-typeof-symbol@^6.23.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-typeof-symbol/-/babel-plugin-transform-es2015-typeof-symbol-6.23.0.tgz#dec09f1cddff94b52ac73d505c84df59dcceb372"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-unicode-regex@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-unicode-regex/-/babel-plugin-transform-es2015-unicode-regex-6.24.1.tgz#d38b12f42ea7323f729387f18a7c5ae1faeb35e9"
+  dependencies:
+    babel-helper-regex "^6.24.1"
+    babel-runtime "^6.22.0"
+    regexpu-core "^2.0.0"
+
+babel-plugin-transform-exponentiation-operator@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-exponentiation-operator/-/babel-plugin-transform-exponentiation-operator-6.24.1.tgz#2ab0c9c7f3098fa48907772bb813fe41e8de3a0e"
+  dependencies:
+    babel-helper-builder-binary-assignment-operator-visitor "^6.24.1"
+    babel-plugin-syntax-exponentiation-operator "^6.8.0"
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-regenerator@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-regenerator/-/babel-plugin-transform-regenerator-6.24.1.tgz#b8da305ad43c3c99b4848e4fe4037b770d23c418"
+  dependencies:
+    regenerator-transform "0.9.11"
+
+babel-plugin-transform-strict-mode@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-strict-mode/-/babel-plugin-transform-strict-mode-6.24.1.tgz#d5faf7aa578a65bbe591cf5edae04a0c67020758"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-plugin-undeclared-variables-check@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/babel-plugin-undeclared-variables-check/-/babel-plugin-undeclared-variables-check-1.0.2.tgz#5cf1aa539d813ff64e99641290af620965f65dee"
+  dependencies:
+    leven "^1.0.2"
+
+babel-plugin-undefined-to-void@^1.1.6:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/babel-plugin-undefined-to-void/-/babel-plugin-undefined-to-void-1.1.6.tgz#7f578ef8b78dfae6003385d8417a61eda06e2f81"
+
+babel-polyfill@^6.16.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-polyfill/-/babel-polyfill-6.23.0.tgz#8364ca62df8eafb830499f699177466c3b03499d"
+  dependencies:
+    babel-runtime "^6.22.0"
+    core-js "^2.4.0"
+    regenerator-runtime "^0.10.0"
+
+babel-preset-env@^1.2.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/babel-preset-env/-/babel-preset-env-1.4.0.tgz#c8e02a3bcc7792f23cded68e0355b9d4c28f0f7a"
+  dependencies:
+    babel-plugin-check-es2015-constants "^6.22.0"
+    babel-plugin-syntax-trailing-function-commas "^6.22.0"
+    babel-plugin-transform-async-to-generator "^6.22.0"
+    babel-plugin-transform-es2015-arrow-functions "^6.22.0"
+    babel-plugin-transform-es2015-block-scoped-functions "^6.22.0"
+    babel-plugin-transform-es2015-block-scoping "^6.23.0"
+    babel-plugin-transform-es2015-classes "^6.23.0"
+    babel-plugin-transform-es2015-computed-properties "^6.22.0"
+    babel-plugin-transform-es2015-destructuring "^6.23.0"
+    babel-plugin-transform-es2015-duplicate-keys "^6.22.0"
+    babel-plugin-transform-es2015-for-of "^6.23.0"
+    babel-plugin-transform-es2015-function-name "^6.22.0"
+    babel-plugin-transform-es2015-literals "^6.22.0"
+    babel-plugin-transform-es2015-modules-amd "^6.22.0"
+    babel-plugin-transform-es2015-modules-commonjs "^6.23.0"
+    babel-plugin-transform-es2015-modules-systemjs "^6.23.0"
+    babel-plugin-transform-es2015-modules-umd "^6.23.0"
+    babel-plugin-transform-es2015-object-super "^6.22.0"
+    babel-plugin-transform-es2015-parameters "^6.23.0"
+    babel-plugin-transform-es2015-shorthand-properties "^6.22.0"
+    babel-plugin-transform-es2015-spread "^6.22.0"
+    babel-plugin-transform-es2015-sticky-regex "^6.22.0"
+    babel-plugin-transform-es2015-template-literals "^6.22.0"
+    babel-plugin-transform-es2015-typeof-symbol "^6.23.0"
+    babel-plugin-transform-es2015-unicode-regex "^6.22.0"
+    babel-plugin-transform-exponentiation-operator "^6.22.0"
+    babel-plugin-transform-regenerator "^6.22.0"
+    browserslist "^1.4.0"
+    invariant "^2.2.2"
+
+babel-register@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-register/-/babel-register-6.24.1.tgz#7e10e13a2f71065bdfad5a1787ba45bca6ded75f"
+  dependencies:
+    babel-core "^6.24.1"
+    babel-runtime "^6.22.0"
+    core-js "^2.4.0"
+    home-or-tmp "^2.0.0"
+    lodash "^4.2.0"
+    mkdirp "^0.5.1"
+    source-map-support "^0.4.2"
+
+babel-runtime@^6.18.0, babel-runtime@^6.22.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-runtime/-/babel-runtime-6.23.0.tgz#0a9489f144de70efb3ce4300accdb329e2fc543b"
+  dependencies:
+    core-js "^2.4.0"
+    regenerator-runtime "^0.10.0"
+
+babel-template@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-template/-/babel-template-6.24.1.tgz#04ae514f1f93b3a2537f2a0f60a5a45fb8308333"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+    babylon "^6.11.0"
+    lodash "^4.2.0"
+
+babel-traverse@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-traverse/-/babel-traverse-6.24.1.tgz#ab36673fd356f9a0948659e7b338d5feadb31695"
+  dependencies:
+    babel-code-frame "^6.22.0"
+    babel-messages "^6.23.0"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+    babylon "^6.15.0"
+    debug "^2.2.0"
+    globals "^9.0.0"
+    invariant "^2.2.0"
+    lodash "^4.2.0"
+
+babel-types@^6.19.0, babel-types@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-types/-/babel-types-6.24.1.tgz#a136879dc15b3606bda0d90c1fc74304c2ff0975"
+  dependencies:
+    babel-runtime "^6.22.0"
+    esutils "^2.0.2"
+    lodash "^4.2.0"
+    to-fast-properties "^1.0.1"
+
+babel6-plugin-strip-class-callcheck@^6.0.0:
+  version "6.0.0"
+  resolved "https://registry.yarnpkg.com/babel6-plugin-strip-class-callcheck/-/babel6-plugin-strip-class-callcheck-6.0.0.tgz#de841c1abebbd39f78de0affb2c9a52ee228fddf"
+
+babel6-plugin-strip-heimdall@^6.0.1:
+  version "6.0.1"
+  resolved "https://registry.yarnpkg.com/babel6-plugin-strip-heimdall/-/babel6-plugin-strip-heimdall-6.0.1.tgz#35f80eddec1f7fffdc009811dfbd46d9965072b6"
+
+babylon@^5.8.38:
+  version "5.8.38"
+  resolved "https://registry.yarnpkg.com/babylon/-/babylon-5.8.38.tgz#ec9b120b11bf6ccd4173a18bf217e60b79859ffd"
+
+babylon@^6.11.0, babylon@^6.15.0:
+  version "6.17.0"
+  resolved "https://registry.yarnpkg.com/babylon/-/babylon-6.17.0.tgz#37da948878488b9c4e3c4038893fa3314b3fc932"
+
+backbone@^1.1.2:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/backbone/-/backbone-1.3.3.tgz#4cc80ea7cb1631ac474889ce40f2f8bc683b2999"
+  dependencies:
+    underscore ">=1.8.3"
+
+backo2@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/backo2/-/backo2-1.0.2.tgz#31ab1ac8b129363463e35b3ebb69f4dfcfba7947"
+
+balanced-match@^0.4.1:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-0.4.2.tgz#cb3f3e3c732dc0f01ee70b403f302e61d7709838"
+
+base64-arraybuffer@0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/base64-arraybuffer/-/base64-arraybuffer-0.1.5.tgz#73926771923b5a19747ad666aa5cd4bf9c6e9ce8"
+
+base64id@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/base64id/-/base64id-0.1.0.tgz#02ce0fdeee0cef4f40080e1e73e834f0b1bfce3f"
+
+basic-auth@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/basic-auth/-/basic-auth-1.1.0.tgz#45221ee429f7ee1e5035be3f51533f1cdfd29884"
+
+bcrypt-pbkdf@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.1.tgz#63bc5dcb61331b92bc05fd528953c33462a06f8d"
+  dependencies:
+    tweetnacl "^0.14.3"
+
+better-assert@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/better-assert/-/better-assert-1.0.2.tgz#40866b9e1b9e0b55b481894311e68faffaebc522"
+  dependencies:
+    callsite "1.0.0"
+
+binary@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/binary/-/binary-0.3.0.tgz#9f60553bc5ce8c3386f3b553cff47462adecaa79"
+  dependencies:
+    buffers "~0.1.1"
+    chainsaw "~0.1.0"
+
+"binaryextensions@1 || 2":
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/binaryextensions/-/binaryextensions-2.0.0.tgz#e597d1a7a6a3558a2d1c7241a16c99965e6aa40f"
+
+bl@^1.0.0, bl@~1.0.0:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/bl/-/bl-1.0.3.tgz#fc5421a28fd4226036c3b3891a66a25bc64d226e"
+  dependencies:
+    readable-stream "~2.0.5"
+
+bl@~0.9.0:
+  version "0.9.5"
+  resolved "https://registry.yarnpkg.com/bl/-/bl-0.9.5.tgz#c06b797af085ea00bc527afc8efcf11de2232054"
+  dependencies:
+    readable-stream "~1.0.26"
+
+blank-object@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/blank-object/-/blank-object-1.0.2.tgz#f990793fbe9a8c8dd013fb3219420bec81d5f4b9"
+
+blob@0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/blob/-/blob-0.0.4.tgz#bcf13052ca54463f30f9fc7e95b9a47630a94921"
+
+block-stream@*, block-stream@0.0.8:
+  version "0.0.8"
+  resolved "https://registry.yarnpkg.com/block-stream/-/block-stream-0.0.8.tgz#0688f46da2bbf9cff0c4f68225a0cb95cbe8a46b"
+  dependencies:
+    inherits "~2.0.0"
+
+bluebird@^2.9.33:
+  version "2.11.0"
+  resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-2.11.0.tgz#534b9033c022c9579c56ba3b3e5a5caafbb650e1"
+
+bluebird@^3.1.1, bluebird@^3.4.6:
+  version "3.5.0"
+  resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.5.0.tgz#791420d7f551eea2897453a8a77653f96606d67c"
+
+body-parser@~1.14.0:
+  version "1.14.2"
+  resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.14.2.tgz#1015cb1fe2c443858259581db53332f8d0cf50f9"
+  dependencies:
+    bytes "2.2.0"
+    content-type "~1.0.1"
+    debug "~2.2.0"
+    depd "~1.1.0"
+    http-errors "~1.3.1"
+    iconv-lite "0.4.13"
+    on-finished "~2.3.0"
+    qs "5.2.0"
+    raw-body "~2.1.5"
+    type-is "~1.6.10"
+
+boom@2.x.x:
+  version "2.10.1"
+  resolved "https://registry.yarnpkg.com/boom/-/boom-2.10.1.tgz#39c8918ceff5799f83f9492a848f625add0c766f"
+  dependencies:
+    hoek "2.x.x"
+
+bower-config@^1.3.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/bower-config/-/bower-config-1.4.0.tgz#16c38c1135f8071c19f25938d61b0d8cbf18d3f1"
+  dependencies:
+    graceful-fs "^4.1.3"
+    mout "^1.0.0"
+    optimist "^0.6.1"
+    osenv "^0.1.3"
+    untildify "^2.1.0"
+
+bower-endpoint-parser@0.2.2, bower-endpoint-parser@^0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/bower-endpoint-parser/-/bower-endpoint-parser-0.2.2.tgz#00b565adbfab6f2d35addde977e97962acbcb3f6"
+
+bower-json@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/bower-json/-/bower-json-0.4.0.tgz#a99c3ccf416ef0590ed0ded252c760f1c6d93766"
+  dependencies:
+    deep-extend "~0.2.5"
+    graceful-fs "~2.0.0"
+    intersect "~0.0.3"
+
+bower-logger@^0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/bower-logger/-/bower-logger-0.2.2.tgz#39be07e979b2fc8e03a94634205ed9422373d381"
+
+bower-registry-client@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/bower-registry-client/-/bower-registry-client-1.0.0.tgz#697c3499067549a106b49f26d03e6dd1017a9241"
+  dependencies:
+    async "^0.2.8"
+    graceful-fs "^4.0.0"
+    lru-cache "^2.3.0"
+    mkdirp "^0.3.5"
+    request "^2.51.0"
+    request-replay "^0.2.0"
+    rimraf "^2.2.0"
+
+bower@1.7.2, bower@^1.3.12:
+  version "1.7.2"
+  resolved "https://registry.yarnpkg.com/bower/-/bower-1.7.2.tgz#b04228f9970f11777017e64ae39d71f9346c9575"
+  dependencies:
+    abbrev "^1.0.5"
+    archy "1.0.0"
+    bower-config "^1.3.0"
+    bower-endpoint-parser "^0.2.2"
+    bower-json "^0.4.0"
+    bower-logger "^0.2.2"
+    bower-registry-client "^1.0.0"
+    cardinal "0.4.4"
+    chalk "^1.0.0"
+    chmodr "^1.0.2"
+    configstore "^0.3.2"
+    decompress-zip "^0.1.0"
+    destroy "^1.0.3"
+    fs-write-stream-atomic "1.0.5"
+    fstream "^1.0.3"
+    fstream-ignore "^1.0.2"
+    github "^0.2.3"
+    glob "^4.3.2"
+    graceful-fs "^3.0.5"
+    handlebars "^2.0.0"
+    inquirer "0.10.0"
+    insight "^0.7.0"
+    is-root "^1.0.0"
+    junk "^1.0.0"
+    lockfile "^1.0.0"
+    lru-cache "^2.5.0"
+    md5-hex "^1.0.2"
+    mkdirp "0.5.0"
+    mout "^0.11.0"
+    nopt "^3.0.1"
+    opn "^1.0.1"
+    p-throttler "0.1.1"
+    promptly "0.2.0"
+    q "^1.1.2"
+    request "2.53.0"
+    request-progress "0.3.1"
+    retry "0.6.1"
+    rimraf "^2.2.8"
+    semver "^2.3.0"
+    semver-utils "^1.1.1"
+    shell-quote "^1.4.2"
+    stringify-object "^1.0.0"
+    tar-fs "^1.4.1"
+    tmp "0.0.24"
+    update-notifier "^0.6.0"
+    user-home "^1.1.0"
+    which "^1.0.8"
+
+boxen@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/boxen/-/boxen-0.3.1.tgz#a7d898243ae622f7abb6bb604d740a76c6a5461b"
+  dependencies:
+    chalk "^1.1.1"
+    filled-array "^1.0.0"
+    object-assign "^4.0.1"
+    repeating "^2.0.0"
+    string-width "^1.0.1"
+    widest-line "^1.0.0"
+
+brace-expansion@^1.0.0:
+  version "1.1.7"
+  resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.7.tgz#3effc3c50e000531fb720eaff80f0ae8ef23cf59"
+  dependencies:
+    balanced-match "^0.4.1"
+    concat-map "0.0.1"
+
+braces@^1.8.2:
+  version "1.8.5"
+  resolved "https://registry.yarnpkg.com/braces/-/braces-1.8.5.tgz#ba77962e12dff969d6b76711e914b737857bf6a7"
+  dependencies:
+    expand-range "^1.8.1"
+    preserve "^0.2.0"
+    repeat-element "^1.1.2"
+
+breakable@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/breakable/-/breakable-1.0.0.tgz#784a797915a38ead27bad456b5572cb4bbaa78c1"
+
+broccoli-asset-rev@^2.2.0:
+  version "2.5.0"
+  resolved "https://registry.yarnpkg.com/broccoli-asset-rev/-/broccoli-asset-rev-2.5.0.tgz#f5f66eac962bf9f086286921f0eaeaab6d00d819"
+  dependencies:
+    broccoli-asset-rewrite "^1.1.0"
+    broccoli-filter "^1.2.2"
+    json-stable-stringify "^1.0.0"
+    matcher-collection "^1.0.1"
+    rsvp "^3.0.6"
+
+broccoli-asset-rewrite@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-asset-rewrite/-/broccoli-asset-rewrite-1.1.0.tgz#77a5da56157aa318c59113245e8bafb4617f8830"
+  dependencies:
+    broccoli-filter "^1.2.3"
+
+broccoli-babel-transpiler@^5.4.5, broccoli-babel-transpiler@^5.5.0, broccoli-babel-transpiler@^5.6.2:
+  version "5.6.2"
+  resolved "https://registry.yarnpkg.com/broccoli-babel-transpiler/-/broccoli-babel-transpiler-5.6.2.tgz#958c72e43575b2f0a862a5096dba1ce1ebc7d74d"
+  dependencies:
+    babel-core "^5.0.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-persistent-filter "^1.0.1"
+    clone "^0.2.0"
+    hash-for-dep "^1.0.2"
+    json-stable-stringify "^1.0.0"
+
+broccoli-babel-transpiler@^6.0.0:
+  version "6.0.0"
+  resolved "https://registry.yarnpkg.com/broccoli-babel-transpiler/-/broccoli-babel-transpiler-6.0.0.tgz#a52c5404bf36236849da503b011fd41fe64a00a2"
+  dependencies:
+    babel-core "^6.14.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-persistent-filter "^1.0.1"
+    clone "^2.0.0"
+    hash-for-dep "^1.0.2"
+    json-stable-stringify "^1.0.0"
+
+broccoli-caching-writer@^2.0.4, broccoli-caching-writer@^2.2.0, broccoli-caching-writer@^2.3.1:
+  version "2.3.1"
+  resolved "https://registry.yarnpkg.com/broccoli-caching-writer/-/broccoli-caching-writer-2.3.1.tgz#b93cf58f9264f003075868db05774f4e7f25bd07"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.2.5"
+    broccoli-plugin "1.1.0"
+    debug "^2.1.1"
+    rimraf "^2.2.8"
+    rsvp "^3.0.17"
+    walk-sync "^0.2.5"
+
+broccoli-caching-writer@^3.0.3:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/broccoli-caching-writer/-/broccoli-caching-writer-3.0.3.tgz#0bd2c96a9738d6a6ab590f07ba35c5157d7db476"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-plugin "^1.2.1"
+    debug "^2.1.1"
+    rimraf "^2.2.8"
+    rsvp "^3.0.17"
+    walk-sync "^0.3.0"
+
+broccoli-clean-css@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-clean-css/-/broccoli-clean-css-1.1.0.tgz#9db143d9af7e0ae79c26e3ac5a9bb2d720ea19fa"
+  dependencies:
+    broccoli-persistent-filter "^1.1.6"
+    clean-css-promise "^0.1.0"
+    inline-source-map-comment "^1.0.5"
+    json-stable-stringify "^1.0.0"
+
+broccoli-concat@^2.0.4, broccoli-concat@^2.2.0:
+  version "2.3.8"
+  resolved "https://registry.yarnpkg.com/broccoli-concat/-/broccoli-concat-2.3.8.tgz#590cdcc021bb905b6c121d87c2d1d57df44a2a48"
+  dependencies:
+    broccoli-caching-writer "^2.3.1"
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-stew "^1.3.3"
+    fast-sourcemap-concat "^1.0.1"
+    fs-extra "^0.30.0"
+    lodash.merge "^4.3.0"
+    lodash.omit "^4.1.0"
+    lodash.uniq "^4.2.0"
+
+broccoli-config-loader@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/broccoli-config-loader/-/broccoli-config-loader-1.0.0.tgz#c3cf5ecfaffc04338c6f1d5d38dc36baeaa131ba"
+  dependencies:
+    broccoli-caching-writer "^2.0.4"
+
+broccoli-config-replace@^1.1.0:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/broccoli-config-replace/-/broccoli-config-replace-1.1.2.tgz#6ea879d92a5bad634d11329b51fc5f4aafda9c00"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-plugin "^1.2.0"
+    debug "^2.2.0"
+    fs-extra "^0.24.0"
+
+broccoli-file-creator@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/broccoli-file-creator/-/broccoli-file-creator-1.1.1.tgz#1b35b67d215abdfadd8d49eeb69493c39e6c3450"
+  dependencies:
+    broccoli-kitchen-sink-helpers "~0.2.0"
+    broccoli-plugin "^1.1.0"
+    broccoli-writer "~0.1.1"
+    mkdirp "^0.5.1"
+    rsvp "~3.0.6"
+    symlink-or-copy "^1.0.1"
+
+broccoli-filter@^1.2.2, broccoli-filter@^1.2.3:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/broccoli-filter/-/broccoli-filter-1.2.4.tgz#409afb94b9a3a6da9fac8134e91e205f40cc7330"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-plugin "^1.0.0"
+    copy-dereference "^1.0.0"
+    debug "^2.2.0"
+    mkdirp "^0.5.1"
+    promise-map-series "^0.2.1"
+    rsvp "^3.0.18"
+    symlink-or-copy "^1.0.1"
+    walk-sync "^0.3.1"
+
+broccoli-funnel-reducer@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/broccoli-funnel-reducer/-/broccoli-funnel-reducer-1.0.0.tgz#11365b2a785aec9b17972a36df87eef24c5cc0ea"
+
+broccoli-funnel@^1.0.0, broccoli-funnel@^1.0.1:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/broccoli-funnel/-/broccoli-funnel-1.2.0.tgz#cddc3afc5ff1685a8023488fff74ce6fb5a51296"
+  dependencies:
+    array-equal "^1.0.0"
+    blank-object "^1.0.1"
+    broccoli-plugin "^1.3.0"
+    debug "^2.2.0"
+    exists-sync "0.0.4"
+    fast-ordered-set "^1.0.0"
+    fs-tree-diff "^0.5.3"
+    heimdalljs "^0.2.0"
+    minimatch "^3.0.0"
+    mkdirp "^0.5.0"
+    path-posix "^1.0.0"
+    rimraf "^2.4.3"
+    symlink-or-copy "^1.0.0"
+    walk-sync "^0.3.1"
+
+broccoli-jshint@^1.0.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/broccoli-jshint/-/broccoli-jshint-1.2.0.tgz#8cd565d11a04bfd32cb8f85a0f7ede1e5be7a6a2"
+  dependencies:
+    broccoli-persistent-filter "^1.2.0"
+    chalk "~0.4.0"
+    findup-sync "^0.3.0"
+    jshint "^2.7.0"
+    json-stable-stringify "^1.0.0"
+    mkdirp "~0.4.0"
+
+broccoli-kitchen-sink-helpers@^0.2.5, broccoli-kitchen-sink-helpers@~0.2.0:
+  version "0.2.9"
+  resolved "https://registry.yarnpkg.com/broccoli-kitchen-sink-helpers/-/broccoli-kitchen-sink-helpers-0.2.9.tgz#a5e0986ed8d76fb5984b68c3f0450d3a96e36ecc"
+  dependencies:
+    glob "^5.0.10"
+    mkdirp "^0.5.1"
+
+broccoli-kitchen-sink-helpers@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/broccoli-kitchen-sink-helpers/-/broccoli-kitchen-sink-helpers-0.3.1.tgz#77c7c18194b9664163ec4fcee2793444926e0c06"
+  dependencies:
+    glob "^5.0.10"
+    mkdirp "^0.5.1"
+
+broccoli-merge-trees@^1.0.0, broccoli-merge-trees@^1.1.0, broccoli-merge-trees@^1.1.1:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/broccoli-merge-trees/-/broccoli-merge-trees-1.2.4.tgz#a001519bb5067f06589d91afa2942445a2d0fdb5"
+  dependencies:
+    broccoli-plugin "^1.3.0"
+    can-symlink "^1.0.0"
+    fast-ordered-set "^1.0.2"
+    fs-tree-diff "^0.5.4"
+    heimdalljs "^0.2.1"
+    heimdalljs-logger "^0.1.7"
+    rimraf "^2.4.3"
+    symlink-or-copy "^1.0.0"
+
+broccoli-persistent-filter@^1.0.1, broccoli-persistent-filter@^1.0.3, broccoli-persistent-filter@^1.1.6, broccoli-persistent-filter@^1.2.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/broccoli-persistent-filter/-/broccoli-persistent-filter-1.3.1.tgz#d02556a135c77dfb859bba7844bc3539be7168e1"
+  dependencies:
+    async-disk-cache "^1.2.1"
+    broccoli-plugin "^1.0.0"
+    fs-tree-diff "^0.5.2"
+    hash-for-dep "^1.0.2"
+    heimdalljs "^0.2.1"
+    heimdalljs-logger "^0.1.7"
+    md5-hex "^1.0.2"
+    mkdirp "^0.5.1"
+    promise-map-series "^0.2.1"
+    rimraf "^2.6.1"
+    rsvp "^3.0.18"
+    symlink-or-copy "^1.0.1"
+    walk-sync "^0.3.1"
+
+broccoli-plugin@1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-plugin/-/broccoli-plugin-1.1.0.tgz#73e2cfa05f8ea1e3fc1420c40c3d9e7dc724bf02"
+  dependencies:
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.3"
+    rimraf "^2.3.4"
+    symlink-or-copy "^1.0.1"
+
+broccoli-plugin@^1.0.0, broccoli-plugin@^1.1.0, broccoli-plugin@^1.2.0, broccoli-plugin@^1.2.1, broccoli-plugin@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/broccoli-plugin/-/broccoli-plugin-1.3.0.tgz#bee704a8e42da08cb58e513aaa436efb7f0ef1ee"
+  dependencies:
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.3"
+    rimraf "^2.3.4"
+    symlink-or-copy "^1.1.8"
+
+broccoli-sane-watcher@^1.1.1:
+  version "1.1.5"
+  resolved "https://registry.yarnpkg.com/broccoli-sane-watcher/-/broccoli-sane-watcher-1.1.5.tgz#f2b0af9cf0afb74c7a49cd88eb11c6869ee8c0c0"
+  dependencies:
+    broccoli-slow-trees "^1.1.0"
+    debug "^2.1.0"
+    rsvp "^3.0.18"
+    sane "^1.1.1"
+
+broccoli-sass-source-maps@^1.4.0:
+  version "1.8.1"
+  resolved "https://registry.yarnpkg.com/broccoli-sass-source-maps/-/broccoli-sass-source-maps-1.8.1.tgz#115e32be25dc5f1686af1c8d1fa4c4c62749f0b6"
+  dependencies:
+    broccoli-caching-writer "^3.0.3"
+    include-path-searcher "^0.1.0"
+    mkdirp "^0.3.5"
+    node-sass "^3.8.0"
+    object-assign "^2.0.0"
+    rsvp "^3.0.6"
+
+broccoli-slow-trees@^1.0.0, broccoli-slow-trees@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-slow-trees/-/broccoli-slow-trees-1.1.0.tgz#426c5724e008107e4573f73e8a9ca702916b78f7"
+
+broccoli-source@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-source/-/broccoli-source-1.1.0.tgz#54f0e82c8b73f46580cbbc4f578f0b32fca8f809"
+
+broccoli-sri-hash@^2.1.0:
+  version "2.1.2"
+  resolved "https://registry.yarnpkg.com/broccoli-sri-hash/-/broccoli-sri-hash-2.1.2.tgz#bc69905ed7a381ad325cc0d02ded071328ebf3f3"
+  dependencies:
+    broccoli-caching-writer "^2.2.0"
+    mkdirp "^0.5.1"
+    rsvp "^3.1.0"
+    sri-toolbox "^0.2.0"
+    symlink-or-copy "^1.0.1"
+
+broccoli-stew@^1.3.3:
+  version "1.4.2"
+  resolved "https://registry.yarnpkg.com/broccoli-stew/-/broccoli-stew-1.4.2.tgz#9ec4062fd7162c6026561a2fbf64558363aff8d6"
+  dependencies:
+    broccoli-funnel "^1.0.1"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-persistent-filter "^1.1.6"
+    broccoli-plugin "^1.3.0"
+    chalk "^1.1.3"
+    debug "^2.4.0"
+    ensure-posix-path "^1.0.1"
+    fs-extra "^2.0.0"
+    minimatch "^3.0.2"
+    resolve "^1.1.6"
+    rsvp "^3.0.16"
+    sanitize-filename "^1.5.3"
+    symlink-or-copy "^1.1.8"
+    walk-sync "^0.3.0"
+
+broccoli-uglify-sourcemap@^1.0.0:
+  version "1.5.2"
+  resolved "https://registry.yarnpkg.com/broccoli-uglify-sourcemap/-/broccoli-uglify-sourcemap-1.5.2.tgz#04f84ab0db539031fa868ccfa563c9932d50cedb"
+  dependencies:
+    broccoli-plugin "^1.2.1"
+    debug "^2.2.0"
+    lodash.merge "^4.5.1"
+    matcher-collection "^1.0.0"
+    mkdirp "^0.5.0"
+    source-map-url "^0.3.0"
+    symlink-or-copy "^1.0.1"
+    uglify-js "^2.7.0"
+    walk-sync "^0.1.3"
+
+broccoli-unwatched-tree@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/broccoli-unwatched-tree/-/broccoli-unwatched-tree-0.1.1.tgz#4312fde04bdafe67a05a967d72cc50b184a9f514"
+
+broccoli-viz@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/broccoli-viz/-/broccoli-viz-2.0.1.tgz#3f3ed2fb83e368aa5306fae460801dea552e40db"
+
+broccoli-writer@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/broccoli-writer/-/broccoli-writer-0.1.1.tgz#d4d71aa8f2afbc67a3866b91a2da79084b96ab2d"
+  dependencies:
+    quick-temp "^0.1.0"
+    rsvp "^3.0.6"
+
+browserslist@^1.4.0:
+  version "1.7.7"
+  resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-1.7.7.tgz#0bd76704258be829b2398bb50e4b62d1a166b0b9"
+  dependencies:
+    caniuse-db "^1.0.30000639"
+    electron-to-chromium "^1.2.7"
+
+bser@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/bser/-/bser-1.0.2.tgz#381116970b2a6deea5646dd15dd7278444b56169"
+  dependencies:
+    node-int64 "^0.4.0"
+
+buffers@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/buffers/-/buffers-0.1.1.tgz#b24579c3bed4d6d396aeee6d9a8ae7f5482ab7bb"
+
+builtin-modules@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/builtin-modules/-/builtin-modules-1.1.1.tgz#270f076c5a72c02f5b65a47df94c5fe3a278892f"
+
+builtins@0.0.7:
+  version "0.0.7"
+  resolved "https://registry.yarnpkg.com/builtins/-/builtins-0.0.7.tgz#355219cd6cf18dbe7c01cc7fd2dce765cfdc549a"
+
+builtins@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/builtins/-/builtins-1.0.3.tgz#cb94faeb61c8696451db36534e1422f94f0aee88"
+
+bytes@2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.2.0.tgz#fd35464a403f6f9117c2de3609ecff9cae000588"
+
+bytes@2.3.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.3.0.tgz#d5b680a165b6201739acb611542aabc2d8ceb070"
+
+bytes@2.4.0:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.4.0.tgz#7d97196f9d5baf7f6935e25985549edd2a6c2339"
+
+callsite@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/callsite/-/callsite-1.0.0.tgz#280398e5d664bd74038b6f0905153e6e8af1bc20"
+
+camelcase-keys@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/camelcase-keys/-/camelcase-keys-2.1.0.tgz#308beeaffdf28119051efa1d932213c91b8f92e7"
+  dependencies:
+    camelcase "^2.0.0"
+    map-obj "^1.0.0"
+
+camelcase@^1.0.2, camelcase@^1.2.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-1.2.1.tgz#9bb5304d2e0b56698b2c758b08a3eaa9daa58a39"
+
+camelcase@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-2.1.1.tgz#7c1d16d679a1bbe59ca02cacecfb011e201f5a1f"
+
+camelcase@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-3.0.0.tgz#32fc4b9fcdaf845fcdf7e73bb97cac2261f0ab0a"
+
+can-symlink@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/can-symlink/-/can-symlink-1.0.0.tgz#97b607d8a84bb6c6e228b902d864ecb594b9d219"
+  dependencies:
+    tmp "0.0.28"
+
+caniuse-db@^1.0.30000639:
+  version "1.0.30000664"
+  resolved "https://registry.yarnpkg.com/caniuse-db/-/caniuse-db-1.0.30000664.tgz#e16316e5fdabb9c7209b2bf0744ffc8a14201f22"
+
+capture-stack-trace@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/capture-stack-trace/-/capture-stack-trace-1.0.0.tgz#4a6fa07399c26bba47f0b2496b4d0fb408c5550d"
+
+cardinal@0.4.4:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/cardinal/-/cardinal-0.4.4.tgz#ca5bb68a5b511b90fe93b9acea49bdee5c32bfe2"
+  dependencies:
+    ansicolors "~0.2.1"
+    redeyed "~0.4.0"
+
+cardinal@^0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/cardinal/-/cardinal-0.5.0.tgz#00d5f661dbd4aabfdf7d41ce48a5a59bca35a291"
+  dependencies:
+    ansicolors "~0.2.1"
+    redeyed "~0.5.0"
+
+caseless@~0.11.0:
+  version "0.11.0"
+  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.11.0.tgz#715b96ea9841593cc33067923f5ec60ebda4f7d7"
+
+caseless@~0.12.0:
+  version "0.12.0"
+  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc"
+
+caseless@~0.9.0:
+  version "0.9.0"
+  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.9.0.tgz#b7b65ce6bf1413886539cfd533f0b30effa9cf88"
+
+center-align@^0.1.1:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/center-align/-/center-align-0.1.3.tgz#aa0d32629b6ee972200411cbd4461c907bc2b7ad"
+  dependencies:
+    align-text "^0.1.3"
+    lazy-cache "^1.0.3"
+
+chainsaw@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/chainsaw/-/chainsaw-0.1.0.tgz#5eab50b28afe58074d0d58291388828b5e5fbc98"
+  dependencies:
+    traverse ">=0.3.0 <0.4"
+
+chalk@^0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-0.5.1.tgz#663b3a648b68b55d04690d49167aa837858f2174"
+  dependencies:
+    ansi-styles "^1.1.0"
+    escape-string-regexp "^1.0.0"
+    has-ansi "^0.1.0"
+    strip-ansi "^0.3.0"
+    supports-color "^0.2.0"
+
+chalk@^1.0.0, chalk@^1.1.0, chalk@^1.1.1, chalk@^1.1.3:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98"
+  dependencies:
+    ansi-styles "^2.2.1"
+    escape-string-regexp "^1.0.2"
+    has-ansi "^2.0.0"
+    strip-ansi "^3.0.0"
+    supports-color "^2.0.0"
+
+chalk@~0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-0.4.0.tgz#5199a3ddcd0c1efe23bc08c1b027b06176e0c64f"
+  dependencies:
+    ansi-styles "~1.0.0"
+    has-color "~0.1.0"
+    strip-ansi "~0.1.0"
+
+char-spinner@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/char-spinner/-/char-spinner-1.0.1.tgz#e6ea67bd247e107112983b7ab0479ed362800081"
+
+charm@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/charm/-/charm-1.0.2.tgz#8add367153a6d9a581331052c4090991da995e35"
+  dependencies:
+    inherits "^2.0.1"
+
+chmodr@^1.0.2, chmodr@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/chmodr/-/chmodr-1.0.2.tgz#04662b932d0f02ec66deaa2b0ea42811968e3eb9"
+
+chownr@^1.0.1, chownr@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.0.1.tgz#e2a75042a9551908bebd25b8523d5f9769d79181"
+
+clean-base-url@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/clean-base-url/-/clean-base-url-1.0.0.tgz#c901cf0a20b972435b0eccd52d056824a4351b7b"
+
+clean-css-promise@^0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/clean-css-promise/-/clean-css-promise-0.1.1.tgz#43f3d2c8dfcb2bf071481252cd9b76433c08eecb"
+  dependencies:
+    array-to-error "^1.0.0"
+    clean-css "^3.4.5"
+    pinkie-promise "^2.0.0"
+
+clean-css@^3.4.5:
+  version "3.4.25"
+  resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-3.4.25.tgz#9e9a52d5c1e6bc5123e1b2783fa65fe958946ede"
+  dependencies:
+    commander "2.8.x"
+    source-map "0.4.x"
+
+cli-color@~0.3.2:
+  version "0.3.3"
+  resolved "https://registry.yarnpkg.com/cli-color/-/cli-color-0.3.3.tgz#12d5bdd158ff8a0b0db401198913c03df069f6f5"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.6"
+    memoizee "~0.3.8"
+    timers-ext "0.1"
+
+cli-cursor@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/cli-cursor/-/cli-cursor-1.0.2.tgz#64da3f7d56a54412e59794bd62dc35295e8f2987"
+  dependencies:
+    restore-cursor "^1.0.1"
+
+cli-table@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/cli-table/-/cli-table-0.3.1.tgz#f53b05266a8b1a0b934b3d0821e6e2dc5914ae23"
+  dependencies:
+    colors "1.0.3"
+
+cli-width@^1.0.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/cli-width/-/cli-width-1.1.1.tgz#a4d293ef67ebb7b88d4a4d42c0ccf00c4d1e366d"
+
+cli@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/cli/-/cli-1.0.1.tgz#22817534f24bfa4950c34d532d48ecbc621b8c14"
+  dependencies:
+    exit "0.1.2"
+    glob "^7.1.1"
+
+cliui@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/cliui/-/cliui-2.1.0.tgz#4b475760ff80264c762c3a1719032e91c7fea0d1"
+  dependencies:
+    center-align "^0.1.1"
+    right-align "^0.1.1"
+    wordwrap "0.0.2"
+
+cliui@^3.2.0:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/cliui/-/cliui-3.2.0.tgz#120601537a916d29940f934da3b48d585a39213d"
+  dependencies:
+    string-width "^1.0.1"
+    strip-ansi "^3.0.1"
+    wrap-ansi "^2.0.0"
+
+clone@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/clone/-/clone-0.2.0.tgz#c6126a90ad4f72dbf5acdb243cc37724fe93fc1f"
+
+clone@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/clone/-/clone-1.0.2.tgz#260b7a99ebb1edfe247538175f783243cb19d149"
+
+clone@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/clone/-/clone-2.1.1.tgz#d217d1e961118e3ac9a4b8bba3285553bf647cdb"
+
+cmd-shim@~2.0.2:
+  version "2.0.2"
+  resolved "https://registry.yarnpkg.com/cmd-shim/-/cmd-shim-2.0.2.tgz#6fcbda99483a8fd15d7d30a196ca69d688a2efdb"
+  dependencies:
+    graceful-fs "^4.1.2"
+    mkdirp "~0.5.0"
+
+co@^4.6.0:
+  version "4.6.0"
+  resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184"
+
+code-point-at@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77"
+
+colors@1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/colors/-/colors-1.0.3.tgz#0433f44d809680fdeb60ed260f1b0c262e82a40b"
+
+colors@~0.6.0-1:
+  version "0.6.2"
+  resolved "https://registry.yarnpkg.com/colors/-/colors-0.6.2.tgz#2423fe6678ac0c5dae8852e5d0e5be08c997abcc"
+
+columnify@~1.5.4:
+  version "1.5.4"
+  resolved "https://registry.yarnpkg.com/columnify/-/columnify-1.5.4.tgz#4737ddf1c7b69a8a7c340570782e947eec8e78bb"
+  dependencies:
+    strip-ansi "^3.0.0"
+    wcwidth "^1.0.0"
+
+combined-stream@^1.0.5, combined-stream@~1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.5.tgz#938370a57b4a51dea2c77c15d5c5fdf895164009"
+  dependencies:
+    delayed-stream "~1.0.0"
+
+combined-stream@~0.0.4, combined-stream@~0.0.5:
+  version "0.0.7"
+  resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-0.0.7.tgz#0137e657baa5a7541c57ac37ac5fc07d73b4dc1f"
+  dependencies:
+    delayed-stream "0.0.5"
+
+commander@2.8.x:
+  version "2.8.1"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.8.1.tgz#06be367febfda0c330aa1e2a072d3dc9762425d4"
+  dependencies:
+    graceful-readlink ">= 1.0.0"
+
+commander@^2.5.0, commander@^2.6.0, commander@^2.9.0:
+  version "2.9.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.9.0.tgz#9c99094176e12240cb22d6c5146098400fe0f7d4"
+  dependencies:
+    graceful-readlink ">= 1.0.0"
+
+commander@~2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.1.0.tgz#d121bbae860d9992a3d517ba96f56588e47c6781"
+
+commoner@~0.10.3:
+  version "0.10.8"
+  resolved "https://registry.yarnpkg.com/commoner/-/commoner-0.10.8.tgz#34fc3672cd24393e8bb47e70caa0293811f4f2c5"
+  dependencies:
+    commander "^2.5.0"
+    detective "^4.3.1"
+    glob "^5.0.15"
+    graceful-fs "^4.1.2"
+    iconv-lite "^0.4.5"
+    mkdirp "^0.5.0"
+    private "^0.1.6"
+    q "^1.1.2"
+    recast "^0.11.17"
+
+component-bind@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/component-bind/-/component-bind-1.0.0.tgz#00c608ab7dcd93897c0009651b1d3a8e1e73bbd1"
+
+component-emitter@1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.1.2.tgz#296594f2753daa63996d2af08d15a95116c9aec3"
+
+component-emitter@1.2.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.2.1.tgz#137918d6d78283f7df7a6b7c5a63e140e69425e6"
+
+component-inherit@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/component-inherit/-/component-inherit-0.0.3.tgz#645fc4adf58b72b649d5cae65135619db26ff143"
+
+compressible@~2.0.8:
+  version "2.0.10"
+  resolved "https://registry.yarnpkg.com/compressible/-/compressible-2.0.10.tgz#feda1c7f7617912732b29bf8cf26252a20b9eecd"
+  dependencies:
+    mime-db ">= 1.27.0 < 2"
+
+compression@^1.4.4:
+  version "1.6.2"
+  resolved "https://registry.yarnpkg.com/compression/-/compression-1.6.2.tgz#cceb121ecc9d09c52d7ad0c3350ea93ddd402bc3"
+  dependencies:
+    accepts "~1.3.3"
+    bytes "2.3.0"
+    compressible "~2.0.8"
+    debug "~2.2.0"
+    on-headers "~1.0.1"
+    vary "~1.1.0"
+
+concat-map@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b"
+
+concat-stream@1.5.0, concat-stream@^1.4.6:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.5.0.tgz#53f7d43c51c5e43f81c8fdd03321c631be68d611"
+  dependencies:
+    inherits "~2.0.1"
+    readable-stream "~2.0.0"
+    typedarray "~0.0.5"
+
+config-chain@~1.1.10:
+  version "1.1.11"
+  resolved "https://registry.yarnpkg.com/config-chain/-/config-chain-1.1.11.tgz#aba09747dfbe4c3e70e766a6e41586e1859fc6f2"
+  dependencies:
+    ini "^1.3.4"
+    proto-list "~1.2.1"
+
+configstore@^0.3.2:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/configstore/-/configstore-0.3.2.tgz#25e4c16c3768abf75c5a65bc61761f495055b459"
+  dependencies:
+    graceful-fs "^3.0.1"
+    js-yaml "^3.1.0"
+    mkdirp "^0.5.0"
+    object-assign "^2.0.0"
+    osenv "^0.1.0"
+    user-home "^1.0.0"
+    uuid "^2.0.1"
+    xdg-basedir "^1.0.0"
+
+configstore@^1.0.0, configstore@^1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/configstore/-/configstore-1.4.0.tgz#c35781d0501d268c25c54b8b17f6240e8a4fb021"
+  dependencies:
+    graceful-fs "^4.1.2"
+    mkdirp "^0.5.0"
+    object-assign "^4.0.1"
+    os-tmpdir "^1.0.0"
+    osenv "^0.1.0"
+    uuid "^2.0.1"
+    write-file-atomic "^1.1.2"
+    xdg-basedir "^2.0.0"
+
+configstore@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/configstore/-/configstore-2.1.0.tgz#737a3a7036e9886102aa6099e47bb33ab1aba1a1"
+  dependencies:
+    dot-prop "^3.0.0"
+    graceful-fs "^4.1.2"
+    mkdirp "^0.5.0"
+    object-assign "^4.0.1"
+    os-tmpdir "^1.0.0"
+    osenv "^0.1.0"
+    uuid "^2.0.1"
+    write-file-atomic "^1.1.2"
+    xdg-basedir "^2.0.0"
+
+connect@^3.3.3:
+  version "3.6.1"
+  resolved "https://registry.yarnpkg.com/connect/-/connect-3.6.1.tgz#b7760693a74f0454face1d9378edb3f885b43227"
+  dependencies:
+    debug "2.6.3"
+    finalhandler "1.0.1"
+    parseurl "~1.3.1"
+    utils-merge "1.0.0"
+
+console-browserify@1.1.x:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/console-browserify/-/console-browserify-1.1.0.tgz#f0241c45730a9fc6323b206dbf38edc741d0bb10"
+  dependencies:
+    date-now "^0.1.4"
+
+console-control-strings@^1.0.0, console-control-strings@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/console-control-strings/-/console-control-strings-1.1.0.tgz#3d7cf4464db6446ea644bf4b39507f9851008e8e"
+
+consolidate@^0.14.0:
+  version "0.14.5"
+  resolved "https://registry.yarnpkg.com/consolidate/-/consolidate-0.14.5.tgz#5a25047bc76f73072667c8cb52c989888f494c63"
+  dependencies:
+    bluebird "^3.1.1"
+
+content-disposition@0.5.2:
+  version "0.5.2"
+  resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.2.tgz#0cf68bb9ddf5f2be7961c3a85178cb85dba78cb4"
+
+content-type@~1.0.1, content-type@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.2.tgz#b7d113aee7a8dd27bd21133c4dc2529df1721eed"
+
+convert-source-map@^1.1.0:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.5.0.tgz#9acd70851c6d5dfdd93d9282e5edf94a03ff46b5"
+
+cookie-signature@1.0.6:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c"
+
+cookie@0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.3.1.tgz#e7e0a1f9ef43b4c8ba925c5c5a96e806d16873bb"
+
+copy-dereference@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/copy-dereference/-/copy-dereference-1.0.0.tgz#6b131865420fd81b413ba994b44d3655311152b6"
+
+core-js@^1.0.0:
+  version "1.2.7"
+  resolved "https://registry.yarnpkg.com/core-js/-/core-js-1.2.7.tgz#652294c14651db28fa93bd2d5ff2983a4f08c636"
+
+core-js@^2.4.0:
+  version "2.4.1"
+  resolved "https://registry.yarnpkg.com/core-js/-/core-js-2.4.1.tgz#4de911e667b0eae9124e34254b53aea6fc618d3e"
+
+core-object@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/core-object/-/core-object-0.0.2.tgz#c9a6fee8f712e281fa9f6fba10243409ea2debc3"
+  dependencies:
+    lodash-node "^2.4.1"
+
+core-util-is@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7"
+
+cpr@0.4.2:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/cpr/-/cpr-0.4.2.tgz#cc5083e6d2fa31f52bbfeefae508a445fe6180f2"
+  dependencies:
+    graceful-fs "~4.1.2"
+    mkdirp "~0.5.0"
+    rimraf "~2.4.3"
+
+create-error-class@^3.0.1:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/create-error-class/-/create-error-class-3.0.2.tgz#06be7abef947a3f14a30fd610671d401bca8b7b6"
+  dependencies:
+    capture-stack-trace "^1.0.0"
+
+cross-spawn@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-3.0.1.tgz#1256037ecb9f0c5f79e3d6ef135e30770184b982"
+  dependencies:
+    lru-cache "^4.0.1"
+    which "^1.2.9"
+
+cross-spawn@^5.1.0:
+  version "5.1.0"
+  resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-5.1.0.tgz#e8bd0efee58fcff6f8f94510a0a554bbfa235449"
+  dependencies:
+    lru-cache "^4.0.1"
+    shebang-command "^1.2.0"
+    which "^1.2.9"
+
+cryptiles@2.x.x:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/cryptiles/-/cryptiles-2.0.5.tgz#3bdfecdc608147c1c67202fa291e7dca59eaa3b8"
+  dependencies:
+    boom "2.x.x"
+
+ctype@0.5.3:
+  version "0.5.3"
+  resolved "https://registry.yarnpkg.com/ctype/-/ctype-0.5.3.tgz#82c18c2461f74114ef16c135224ad0b9144ca12f"
+
+currently-unhandled@^0.4.1:
+  version "0.4.1"
+  resolved "https://registry.yarnpkg.com/currently-unhandled/-/currently-unhandled-0.4.1.tgz#988df33feab191ef799a61369dd76c17adf957ea"
+  dependencies:
+    array-find-index "^1.0.1"
+
+d@1:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/d/-/d-1.0.0.tgz#754bb5bfe55451da69a58b94d45f4c5b0462d58f"
+  dependencies:
+    es5-ext "^0.10.9"
+
+d@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/d/-/d-0.1.1.tgz#da184c535d18d8ee7ba2aa229b914009fae11309"
+  dependencies:
+    es5-ext "~0.10.2"
+
+dashdash@^1.12.0:
+  version "1.14.1"
+  resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0"
+  dependencies:
+    assert-plus "^1.0.0"
+
+date-now@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/date-now/-/date-now-0.1.4.tgz#eaf439fd4d4848ad74e5cc7dbef200672b9e345b"
+
+debug@0.7.4:
+  version "0.7.4"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-0.7.4.tgz#06e1ea8082c2cb14e39806e22e2f6f757f92af39"
+
+debug@2.2.0, debug@~2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.2.0.tgz#f87057e995b1a1f6ae6a4960664137bc56f039da"
+  dependencies:
+    ms "0.7.1"
+
+debug@2.3.3:
+  version "2.3.3"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.3.3.tgz#40c453e67e6e13c901ddec317af8986cda9eff8c"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.1:
+  version "2.6.1"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.1.tgz#79855090ba2c4e3115cc7d8769491d58f0491351"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.3:
+  version "2.6.3"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.3.tgz#0f7eb8c30965ec08c72accfa0130c8b79984141d"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.4:
+  version "2.6.4"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.4.tgz#7586a9b3c39741c0282ae33445c4e8ac74734fe0"
+  dependencies:
+    ms "0.7.3"
+
+debug@^2.1.0, debug@^2.1.1, debug@^2.1.3, debug@^2.2.0, debug@^2.4.0:
+  version "2.6.6"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.6.tgz#a9fa6fbe9ca43cf1e79f73b75c0189cbb7d6db5a"
+  dependencies:
+    ms "0.7.3"
+
+debuglog@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/debuglog/-/debuglog-1.0.1.tgz#aa24ffb9ac3df9a2351837cfb2d279360cd78492"
+
+decamelize@^1.0.0, decamelize@^1.1.1, decamelize@^1.1.2:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290"
+
+decompress-zip@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/decompress-zip/-/decompress-zip-0.1.0.tgz#bce60c11664f2d660fca4bcf634af6de5d6c14c7"
+  dependencies:
+    binary "^0.3.0"
+    graceful-fs "^3.0.0"
+    mkpath "^0.1.0"
+    nopt "^3.0.1"
+    q "^1.1.2"
+    readable-stream "^1.1.8"
+    touch "0.0.3"
+
+deep-extend@~0.2.5:
+  version "0.2.11"
+  resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.2.11.tgz#7a16ba69729132340506170494bc83f7076fe08f"
+
+deep-extend@~0.4.0:
+  version "0.4.1"
+  resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.4.1.tgz#efe4113d08085f4e6f9687759810f807469e2253"
+
+defaults@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/defaults/-/defaults-1.0.3.tgz#c656051e9817d9ff08ed881477f3fe4019f3ef7d"
+  dependencies:
+    clone "^1.0.2"
+
+defined@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/defined/-/defined-1.0.0.tgz#c98d9bcef75674188e110969151199e39b1fa693"
+
+defs@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/defs/-/defs-1.1.1.tgz#b22609f2c7a11ba7a3db116805c139b1caffa9d2"
+  dependencies:
+    alter "~0.2.0"
+    ast-traverse "~0.1.1"
+    breakable "~1.0.0"
+    esprima-fb "~15001.1001.0-dev-harmony-fb"
+    simple-fmt "~0.1.0"
+    simple-is "~0.2.0"
+    stringmap "~0.2.2"
+    stringset "~0.2.1"
+    tryor "~0.1.2"
+    yargs "~3.27.0"
+
+delayed-stream@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-0.0.5.tgz#d4b1f43a93e8296dfe02694f4680bc37a313c73f"
+
+delayed-stream@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619"
+
+delegates@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a"
+
+depd@1.1.0, depd@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.0.tgz#e1bd82c6aab6ced965b97b88b17ed3e528ca18c3"
+
+destroy@^1.0.3, destroy@~1.0.4:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.0.4.tgz#978857442c44749e4206613e37946205826abd80"
+
+detect-indent@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/detect-indent/-/detect-indent-3.0.1.tgz#9dc5e5ddbceef8325764b9451b02bc6d54084f75"
+  dependencies:
+    get-stdin "^4.0.1"
+    minimist "^1.1.0"
+    repeating "^1.1.0"
+
+detect-indent@^4.0.0:
+  version "4.0.0"
+  resolved "https://registry.yarnpkg.com/detect-indent/-/detect-indent-4.0.0.tgz#f76d064352cdf43a1cb6ce619c4ee3a9475de208"
+  dependencies:
+    repeating "^2.0.0"
+
+detective@^4.3.1:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/detective/-/detective-4.5.0.tgz#6e5a8c6b26e6c7a254b1c6b6d7490d98ec91edd1"
+  dependencies:
+    acorn "^4.0.3"
+    defined "^1.0.0"
+
+dezalgo@^1.0.0, dezalgo@^1.0.1, dezalgo@^1.0.2, dezalgo@~1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/dezalgo/-/dezalgo-1.0.3.tgz#7f742de066fc748bc8db820569dddce49bf0d456"
+  dependencies:
+    asap "^2.0.0"
+    wrappy "1"
+
+diff@^1.3.1:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/diff/-/diff-1.4.0.tgz#7f28d2eb9ee7b15a97efd89ce63dcfdaa3ccbabf"
+
+dom-serializer@0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-0.1.0.tgz#073c697546ce0780ce23be4a28e293e40bc30c82"
+  dependencies:
+    domelementtype "~1.1.1"
+    entities "~1.1.1"
+
+domelementtype@1:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.3.0.tgz#b17aed82e8ab59e52dd9c19b1756e0fc187204c2"
+
+domelementtype@~1.1.1:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.1.3.tgz#bd28773e2642881aec51544924299c5cd822185b"
+
+domhandler@2.3:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-2.3.0.tgz#2de59a0822d5027fabff6f032c2b25a2a8abe738"
+  dependencies:
+    domelementtype "1"
+
+domutils@1.5:
+  version "1.5.1"
+  resolved "https://registry.yarnpkg.com/domutils/-/domutils-1.5.1.tgz#dcd8488a26f563d61079e48c9f7b7e32373682cf"
+  dependencies:
+    dom-serializer "0"
+    domelementtype "1"
+
+dot-prop@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/dot-prop/-/dot-prop-3.0.0.tgz#1b708af094a49c9a0e7dbcad790aba539dac1177"
+  dependencies:
+    is-obj "^1.0.0"
+
+duplexer2@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/duplexer2/-/duplexer2-0.1.4.tgz#8b12dab878c0d69e3e7891051662a32fc6bddcc1"
+  dependencies:
+    readable-stream "^2.0.2"
+
+ecc-jsbn@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.1.tgz#0fc73a9ed5f0d53c38193398523ef7e543777505"
+  dependencies:
+    jsbn "~0.1.0"
+
+editions@^1.1.1:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/editions/-/editions-1.3.3.tgz#0907101bdda20fac3cbe334c27cbd0688dc99a5b"
+
+editor@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/editor/-/editor-1.0.0.tgz#60c7f87bd62bcc6a894fa8ccd6afb7823a24f742"
+
+ee-first@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d"
+
+electron-to-chromium@^1.2.7:
+  version "1.3.8"
+  resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.8.tgz#b2c8a2c79bb89fbbfd3724d9555e15095b5f5fb6"
+
+ember-cli-app-version@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-app-version/-/ember-cli-app-version-1.0.1.tgz#d135eba75f30e791d8a5e5844f1251dcbcc40438"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+    ember-cli-htmlbars "^1.0.0"
+    git-repo-version "0.3.0"
+
+ember-cli-babel@^5.0.0, ember-cli-babel@^5.1.10, ember-cli-babel@^5.1.3, ember-cli-babel@^5.1.5, ember-cli-babel@^5.1.6:
+  version "5.2.4"
+  resolved "https://registry.yarnpkg.com/ember-cli-babel/-/ember-cli-babel-5.2.4.tgz#5ce4f46b08ed6f6d21e878619fb689719d6e8e13"
+  dependencies:
+    broccoli-babel-transpiler "^5.6.2"
+    broccoli-funnel "^1.0.0"
+    clone "^2.0.0"
+    ember-cli-version-checker "^1.0.2"
+    resolve "^1.1.2"
+
+ember-cli-babel@^6.0.0, ember-cli-babel@^6.0.0-beta.7:
+  version "6.1.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-babel/-/ember-cli-babel-6.1.0.tgz#d9c83a7d0c67cc8a3ccb9bd082971c3593e54fad"
+  dependencies:
+    amd-name-resolver "0.0.6"
+    babel-plugin-debug-macros "^0.1.6"
+    babel-plugin-transform-es2015-modules-amd "^6.24.0"
+    babel-polyfill "^6.16.0"
+    babel-preset-env "^1.2.0"
+    broccoli-babel-transpiler "^6.0.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-source "^1.1.0"
+    clone "^2.0.0"
+    ember-cli-version-checker "^1.2.0"
+
+ember-cli-broccoli@0.16.9:
+  version "0.16.9"
+  resolved "https://registry.yarnpkg.com/ember-cli-broccoli/-/ember-cli-broccoli-0.16.9.tgz#4e9128f59ffaee99705c01e9a44a691a0ae199db"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.2.5"
+    broccoli-slow-trees "^1.0.0"
+    commander "^2.5.0"
+    connect "^3.3.3"
+    copy-dereference "^1.0.0"
+    findup-sync "^0.2.1"
+    handlebars "^4.0.4"
+    mime "^1.2.11"
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.2"
+    rimraf "^2.2.8"
+    rsvp "^3.0.17"
+
+ember-cli-dependency-checker@^1.2.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-dependency-checker/-/ember-cli-dependency-checker-1.4.0.tgz#2b13f977e1eea843fc1a21a001be6ca5d4ef1942"
+  dependencies:
+    chalk "^0.5.1"
+    is-git-url "^0.2.0"
+    semver "^4.1.0"
+
+ember-cli-get-component-path-option@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-get-component-path-option/-/ember-cli-get-component-path-option-1.0.0.tgz#0d7b595559e2f9050abed804f1d8eff1b08bc771"
+
+ember-cli-htmlbars-inline-precompile@^0.3.1:
+  version "0.3.6"
+  resolved "https://registry.yarnpkg.com/ember-cli-htmlbars-inline-precompile/-/ember-cli-htmlbars-inline-precompile-0.3.6.tgz#4095fe423f93102724c0725e4dd1a31f25e24de5"
+  dependencies:
+    babel-plugin-htmlbars-inline-precompile "^0.1.0"
+    ember-cli-babel "^5.1.3"
+    ember-cli-htmlbars "^1.0.0"
+    hash-for-dep "^1.0.2"
+
+ember-cli-htmlbars@^1.0.0, ember-cli-htmlbars@^1.0.1:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-1.3.0.tgz#e090f011239153bf45dab29625f94a46fce205af"
+  dependencies:
+    broccoli-persistent-filter "^1.0.3"
+    ember-cli-version-checker "^1.0.2"
+    hash-for-dep "^1.0.2"
+    json-stable-stringify "^1.0.0"
+    strip-bom "^2.0.0"
+
+ember-cli-inject-live-reload@^1.3.1:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-inject-live-reload/-/ember-cli-inject-live-reload-1.6.1.tgz#82b8f5be454815a75e7f6d42c9ce0bc883a914a3"
+
+ember-cli-is-package-missing@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-is-package-missing/-/ember-cli-is-package-missing-1.0.0.tgz#6e6184cafb92635dd93ca6c946b104292d4e3390"
+
+ember-cli-node-assets@^0.1.1:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/ember-cli-node-assets/-/ember-cli-node-assets-0.1.6.tgz#6488a2949048c801ad6d9e33753c7bce32fc1146"
+  dependencies:
+    broccoli-funnel "^1.0.1"
+    broccoli-merge-trees "^1.1.1"
+    broccoli-unwatched-tree "^0.1.1"
+    debug "^2.2.0"
+    lodash "^4.5.1"
+    resolve "^1.1.7"
+
+ember-cli-normalize-entity-name@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-normalize-entity-name/-/ember-cli-normalize-entity-name-1.0.0.tgz#0b14f7bcbc599aa117b5fddc81e4fd03c4bad5b7"
+  dependencies:
+    silent-error "^1.0.0"
+
+ember-cli-path-utils@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-path-utils/-/ember-cli-path-utils-1.0.0.tgz#4e39af8b55301cddc5017739b77a804fba2071ed"
+
+ember-cli-preprocess-registry@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-preprocess-registry/-/ember-cli-preprocess-registry-2.0.0.tgz#45c8b985eba06bb443b3abce1c3c6220fdcb8094"
+  dependencies:
+    broccoli-clean-css "^1.1.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    debug "^2.2.0"
+    exists-sync "0.0.3"
+    lodash "^3.10.0"
+    process-relative-require "^1.0.0"
+    silent-error "^1.0.0"
+
+ember-cli-pretender@0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-pretender/-/ember-cli-pretender-0.5.0.tgz#a77b73a1bffc7f90b9661a500541fac4a227bf37"
+
+ember-cli-qunit@^1.2.1:
+  version "1.4.2"
+  resolved "https://registry.yarnpkg.com/ember-cli-qunit/-/ember-cli-qunit-1.4.2.tgz#7ca25495c70ca347106d44fc00f0d7aeca027475"
+  dependencies:
+    broccoli-babel-transpiler "^5.5.0"
+    broccoli-concat "^2.2.0"
+    broccoli-jshint "^1.0.0"
+    broccoli-merge-trees "^1.1.0"
+    ember-cli-babel "^5.1.5"
+    ember-cli-version-checker "^1.1.4"
+    ember-qunit "^0.4.18"
+    qunitjs "^1.20.0"
+    resolve "^1.1.6"
+
+ember-cli-release@0.2.8:
+  version "0.2.8"
+  resolved "https://registry.yarnpkg.com/ember-cli-release/-/ember-cli-release-0.2.8.tgz#e9fddd06058c0f3bc2ea57ab2667e9611f8fb205"
+  dependencies:
+    chalk "^1.0.0"
+    git-tools "^0.1.4"
+    make-array "^0.1.2"
+    merge "^1.2.0"
+    moment-timezone "^0.3.0"
+    nopt "^3.0.3"
+    rsvp "^3.0.17"
+    semver "^4.3.1"
+    silent-error "^1.0.0"
+
+ember-cli-sass@5.3.0:
+  version "5.3.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-sass/-/ember-cli-sass-5.3.0.tgz#62d4bc634f26ab85f4b0e49a4f9ffccb8841bac9"
+  dependencies:
+    broccoli-merge-trees "^1.1.0"
+    broccoli-sass-source-maps "^1.4.0"
+    ember-cli-version-checker "^1.0.2"
+    merge "^1.2.0"
+
+ember-cli-sri@^2.1.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-sri/-/ember-cli-sri-2.1.1.tgz#971620934a4b9183cf7923cc03e178b83aa907fd"
+  dependencies:
+    broccoli-sri-hash "^2.1.0"
+
+ember-cli-string-utils@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-string-utils/-/ember-cli-string-utils-1.1.0.tgz#39b677fc2805f55173735376fcef278eaa4452a1"
+
+ember-cli-test-info@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-test-info/-/ember-cli-test-info-1.0.0.tgz#ed4e960f249e97523cf891e4aed2072ce84577b4"
+  dependencies:
+    ember-cli-string-utils "^1.0.0"
+
+ember-cli-uglify@^1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-uglify/-/ember-cli-uglify-1.2.0.tgz#3208c32b54bc2783056e8bb0d5cfe9bbaf17ffb2"
+  dependencies:
+    broccoli-uglify-sourcemap "^1.0.0"
+
+ember-cli-valid-component-name@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-valid-component-name/-/ember-cli-valid-component-name-1.0.0.tgz#71550ce387e0233065f30b30b1510aa2dfbe87ef"
+  dependencies:
+    silent-error "^1.0.0"
+
+ember-cli-version-checker@^1.0.2, ember-cli-version-checker@^1.1.4, ember-cli-version-checker@^1.1.6, ember-cli-version-checker@^1.2.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-version-checker/-/ember-cli-version-checker-1.3.1.tgz#0bc2d134c830142da64bf9627a0eded10b61ae72"
+  dependencies:
+    semver "^5.3.0"
+
+ember-cli@2.4.2:
+  version "2.4.2"
+  resolved "https://registry.yarnpkg.com/ember-cli/-/ember-cli-2.4.2.tgz#f02a998caae9657627326f6b220c635c89bfcc75"
+  dependencies:
+    amd-name-resolver "0.0.5"
+    bower "^1.3.12"
+    bower-config "^1.3.0"
+    bower-endpoint-parser "0.2.2"
+    broccoli-babel-transpiler "^5.4.5"
+    broccoli-concat "^2.0.4"
+    broccoli-config-loader "^1.0.0"
+    broccoli-config-replace "^1.1.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-funnel-reducer "^1.0.0"
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-plugin "^1.2.0"
+    broccoli-sane-watcher "^1.1.1"
+    broccoli-source "^1.1.0"
+    broccoli-viz "^2.0.1"
+    chalk "^1.1.1"
+    clean-base-url "^1.0.0"
+    compression "^1.4.4"
+    configstore "^1.4.0"
+    core-object "0.0.2"
+    cpr "0.4.2"
+    debug "^2.1.3"
+    diff "^1.3.1"
+    ember-cli-broccoli "0.16.9"
+    ember-cli-get-component-path-option "^1.0.0"
+    ember-cli-is-package-missing "^1.0.0"
+    ember-cli-normalize-entity-name "^1.0.0"
+    ember-cli-path-utils "^1.0.0"
+    ember-cli-preprocess-registry "^2.0.0"
+    ember-cli-string-utils "^1.0.0"
+    ember-cli-test-info "^1.0.0"
+    ember-cli-valid-component-name "^1.0.0"
+    ember-router-generator "^1.0.0"
+    escape-string-regexp "^1.0.3"
+    exists-sync "0.0.3"
+    exit "^0.1.2"
+    express "^4.12.3"
+    findup "0.1.5"
+    findup-sync "^0.2.1"
+    fs-extra "0.26.2"
+    fs-monitor-stack "^1.0.2"
+    fs-tree-diff "^0.4.4"
+    get-caller-file "^1.0.0"
+    git-repo-info "^1.0.4"
+    glob "7.0.3"
+    http-proxy "^1.9.0"
+    inflection "^1.7.0"
+    inquirer "0.5.1"
+    is-git-url "^0.2.0"
+    isbinaryfile "^2.0.3"
+    leek "0.0.21"
+    lodash "^4.6.1"
+    markdown-it "4.3.0"
+    markdown-it-terminal "0.0.3"
+    merge-defaults "^0.2.1"
+    minimatch "^3.0.0"
+    mkdirp "^0.5.1"
+    morgan "^1.5.2"
+    node-modules-path "^1.0.0"
+    node-uuid "^1.4.3"
+    nopt "^3.0.1"
+    npm "2.14.21"
+    pleasant-progress "^1.0.2"
+    portfinder "^1.0.2"
+    promise-map-series "^0.2.1"
+    quick-temp "0.1.5"
+    readline2 "0.1.1"
+    resolve "^1.1.6"
+    rimraf "^2.4.4"
+    rsvp "^3.0.17"
+    sane "^1.1.1"
+    semver "^5.1.0"
+    silent-error "^1.0.0"
+    symlink-or-copy "^1.0.1"
+    temp "0.8.3"
+    testem "^1.3.0"
+    through "^2.3.6"
+    tiny-lr "0.2.1"
+    tree-sync "^1.1.0"
+    walk-sync "^0.2.6"
+    yam "0.0.18"
+
+ember-data@^2.4.0:
+  version "2.13.0"
+  resolved "https://registry.yarnpkg.com/ember-data/-/ember-data-2.13.0.tgz#6d61487129de0e72225cc98bbc0d995e2042a933"
+  dependencies:
+    amd-name-resolver "0.0.5"
+    babel-plugin-feature-flags "^0.3.1"
+    babel-plugin-filter-imports "^0.3.1"
+    babel6-plugin-strip-class-callcheck "^6.0.0"
+    babel6-plugin-strip-heimdall "^6.0.1"
+    broccoli-babel-transpiler "^6.0.0"
+    broccoli-file-creator "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    chalk "^1.1.1"
+    ember-cli-babel "^6.0.0-beta.7"
+    ember-cli-path-utils "^1.0.0"
+    ember-cli-string-utils "^1.0.0"
+    ember-cli-test-info "^1.0.0"
+    ember-cli-version-checker "^1.1.4"
+    ember-inflector "^2.0.0"
+    ember-runtime-enumerable-includes-polyfill "^2.0.0"
+    exists-sync "0.0.3"
+    git-repo-info "^1.1.2"
+    heimdalljs "^0.3.0"
+    inflection "^1.8.0"
+    npm-git-info "^1.0.0"
+    semver "^5.1.0"
+    silent-error "^1.0.0"
+
+ember-disable-proxy-controllers@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/ember-disable-proxy-controllers/-/ember-disable-proxy-controllers-1.0.1.tgz#1254eeec0ba025c24eb9e8da611afa7b38754281"
+  dependencies:
+    ember-cli-babel "^5.0.0"
+
+ember-export-application-global@^1.0.4:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ember-export-application-global/-/ember-export-application-global-1.1.1.tgz#f257d5271268932a89d7392679ce4db89d7154af"
+  dependencies:
+    ember-cli-babel "^5.1.10"
+
+ember-inflector@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/ember-inflector/-/ember-inflector-2.0.0.tgz#ac0870e87c0724bd42cf5ed7ef166c49a296ecfb"
+  dependencies:
+    ember-cli-babel "^6.0.0"
+
+ember-load-initializers@^0.5.0:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/ember-load-initializers/-/ember-load-initializers-0.5.1.tgz#76e3db23c111dbdcd3ae6f687036bf0b56be0cbe"
+
+ember-qunit@^0.4.18:
+  version "0.4.24"
+  resolved "https://registry.yarnpkg.com/ember-qunit/-/ember-qunit-0.4.24.tgz#b54cf6688c442d07eacea47c3285879cdd7c2163"
+  dependencies:
+    ember-test-helpers "^0.5.32"
+
+ember-resolver@^2.0.3:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/ember-resolver/-/ember-resolver-2.1.1.tgz#5e4c1fffe9f5f48fc2194ad7592274ed0cd74f72"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+    ember-cli-version-checker "^1.1.6"
+
+ember-router-generator@^1.0.0:
+  version "1.2.3"
+  resolved "https://registry.yarnpkg.com/ember-router-generator/-/ember-router-generator-1.2.3.tgz#8ed2ca86ff323363120fc14278191e9e8f1315ee"
+  dependencies:
+    recast "^0.11.3"
+
+ember-runtime-enumerable-includes-polyfill@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/ember-runtime-enumerable-includes-polyfill/-/ember-runtime-enumerable-includes-polyfill-2.0.0.tgz#6e9ba118bc909d1d7762de1b03a550d8955308a9"
+  dependencies:
+    ember-cli-babel "^6.0.0"
+    ember-cli-version-checker "^1.1.6"
+
+ember-sinon-qunit@1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/ember-sinon-qunit/-/ember-sinon-qunit-1.3.0.tgz#21f1f68dbccfbdfbb705261aca39b91f0e226f83"
+  dependencies:
+    ember-cli-babel "^5.1.5"
+    ember-sinon "0.5.0"
+
+ember-sinon@0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/ember-sinon/-/ember-sinon-0.5.0.tgz#9c2e3da77ea28df5b9c592fe8a9f0d4a6fe22f57"
+  dependencies:
+    ember-cli-babel "^5.1.5"
+    ember-cli-node-assets "^0.1.1"
+    sinon "^1.17.3"
+
+ember-test-helpers@^0.5.32:
+  version "0.5.34"
+  resolved "https://registry.yarnpkg.com/ember-test-helpers/-/ember-test-helpers-0.5.34.tgz#c8439108d1cba1d7d838c212208a5c4061471b83"
+  dependencies:
+    klassy "^0.1.3"
+
+encodeurl@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.1.tgz#79e3d58655346909fe6f0f45a5de68103b294d20"
+
+end-of-stream@^1.0.0, end-of-stream@^1.1.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-1.4.0.tgz#7a90d833efda6cfa6eac0f4949dbb0fad3a63206"
+  dependencies:
+    once "^1.4.0"
+
+engine.io-client@1.8.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/engine.io-client/-/engine.io-client-1.8.0.tgz#7b730e4127414087596d9be3c88d2bc5fdb6cf5c"
+  dependencies:
+    component-emitter "1.2.1"
+    component-inherit "0.0.3"
+    debug "2.3.3"
+    engine.io-parser "1.3.1"
+    has-cors "1.1.0"
+    indexof "0.0.1"
+    parsejson "0.0.3"
+    parseqs "0.0.5"
+    parseuri "0.0.5"
+    ws "1.1.1"
+    xmlhttprequest-ssl "1.5.3"
+    yeast "0.1.2"
+
+engine.io-parser@1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/engine.io-parser/-/engine.io-parser-1.3.1.tgz#9554f1ae33107d6fbd170ca5466d2f833f6a07cf"
+  dependencies:
+    after "0.8.1"
+    arraybuffer.slice "0.0.6"
+    base64-arraybuffer "0.1.5"
+    blob "0.0.4"
+    has-binary "0.1.6"
+    wtf-8 "1.0.0"
+
+engine.io@1.8.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/engine.io/-/engine.io-1.8.0.tgz#3eeb5f264cb75dbbec1baaea26d61f5a4eace2aa"
+  dependencies:
+    accepts "1.3.3"
+    base64id "0.1.0"
+    cookie "0.3.1"
+    debug "2.3.3"
+    engine.io-parser "1.3.1"
+    ws "1.1.1"
+
+ensure-posix-path@^1.0.0, ensure-posix-path@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/ensure-posix-path/-/ensure-posix-path-1.0.2.tgz#a65b3e42d0b71cfc585eb774f9943c8d9b91b0c2"
+
+entities@1.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/entities/-/entities-1.0.0.tgz#b2987aa3821347fcde642b24fdfc9e4fb712bf26"
+
+entities@~1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/entities/-/entities-1.1.1.tgz#6e5c2d0a5621b5dadaecef80b90edfb5cd7772f0"
+
+error-ex@^1.2.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.1.tgz#f855a86ce61adc4e8621c3cda21e7a7612c3a8dc"
+  dependencies:
+    is-arrayish "^0.2.1"
+
+es5-ext@^0.10.14, es5-ext@^0.10.9, es5-ext@~0.10.11, es5-ext@~0.10.14, es5-ext@~0.10.2, es5-ext@~0.10.5, es5-ext@~0.10.6:
+  version "0.10.15"
+  resolved "https://registry.yarnpkg.com/es5-ext/-/es5-ext-0.10.15.tgz#c330a5934c1ee21284a7c081a86e5fd937c91ea6"
+  dependencies:
+    es6-iterator "2"
+    es6-symbol "~3.1"
+
+es6-iterator@2:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/es6-iterator/-/es6-iterator-2.0.1.tgz#8e319c9f0453bf575d374940a655920e59ca5512"
+  dependencies:
+    d "1"
+    es5-ext "^0.10.14"
+    es6-symbol "^3.1"
+
+es6-iterator@~0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/es6-iterator/-/es6-iterator-0.1.3.tgz#d6f58b8c4fc413c249b4baa19768f8e4d7c8944e"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.5"
+    es6-symbol "~2.0.1"
+
+es6-symbol@^3.0.2, es6-symbol@^3.1, es6-symbol@~3.1:
+  version "3.1.1"
+  resolved "https://registry.yarnpkg.com/es6-symbol/-/es6-symbol-3.1.1.tgz#bf00ef4fdab6ba1b46ecb7b629b4c7ed5715cc77"
+  dependencies:
+    d "1"
+    es5-ext "~0.10.14"
+
+es6-symbol@~2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/es6-symbol/-/es6-symbol-2.0.1.tgz#761b5c67cfd4f1d18afb234f691d678682cb3bf3"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.5"
+
+es6-weak-map@~0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/es6-weak-map/-/es6-weak-map-0.1.4.tgz#706cef9e99aa236ba7766c239c8b9e286ea7d228"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.6"
+    es6-iterator "~0.1.3"
+    es6-symbol "~2.0.1"
+
+escape-html@~1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988"
+
+escape-string-regexp@^1.0.0, escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.3, escape-string-regexp@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4"
+
+esprima-fb@~12001.1.0-dev-harmony-fb:
+  version "12001.1.0-dev-harmony-fb"
+  resolved "https://registry.yarnpkg.com/esprima-fb/-/esprima-fb-12001.1.0-dev-harmony-fb.tgz#d84400384ba95ce2678c617ad24a7f40808da915"
+
+esprima-fb@~15001.1001.0-dev-harmony-fb:
+  version "15001.1001.0-dev-harmony-fb"
+  resolved "https://registry.yarnpkg.com/esprima-fb/-/esprima-fb-15001.1001.0-dev-harmony-fb.tgz#43beb57ec26e8cf237d3dd8b33e42533577f2659"
+
+esprima@^2.6.0:
+  version "2.7.3"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-2.7.3.tgz#96e3b70d5779f6ad49cd032673d1c312767ba581"
+
+esprima@^3.1.1, esprima@~3.1.0:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-3.1.3.tgz#fdca51cee6133895e3c88d535ce49dbff62a4633"
+
+esprima@~1.0.4:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-1.0.4.tgz#9f557e08fc3b4d26ece9dd34f8fbf476b62585ad"
+
+esutils@^2.0.0, esutils@^2.0.2:
+  version "2.0.2"
+  resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.2.tgz#0abf4f1caa5bcb1f7a9d8acc6dea4faaa04bac9b"
+
+etag@~1.8.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.0.tgz#6f631aef336d6c46362b51764044ce216be3c051"
+
+event-emitter@~0.3.4:
+  version "0.3.5"
+  resolved "https://registry.yarnpkg.com/event-emitter/-/event-emitter-0.3.5.tgz#df8c69eef1647923c7157b9ce83840610b02cc39"
+  dependencies:
+    d "1"
+    es5-ext "~0.10.14"
+
+eventemitter3@1.x.x:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-1.2.0.tgz#1c86991d816ad1e504750e73874224ecf3bec508"
+
+events-to-array@^1.0.1:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/events-to-array/-/events-to-array-1.1.2.tgz#2d41f563e1fe400ed4962fe1a4d5c6a7539df7f6"
+
+exec-sh@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/exec-sh/-/exec-sh-0.2.0.tgz#14f75de3f20d286ef933099b2ce50a90359cef10"
+  dependencies:
+    merge "^1.1.3"
+
+exists-sync@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/exists-sync/-/exists-sync-0.0.3.tgz#b910000bedbb113b378b82f5f5a7638107622dcf"
+
+exists-sync@0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/exists-sync/-/exists-sync-0.0.4.tgz#9744c2c428cc03b01060db454d4b12f0ef3c8879"
+
+exit-hook@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/exit-hook/-/exit-hook-1.1.1.tgz#f05ca233b48c05d54fff07765df8507e95c02ff8"
+
+exit@0.1.2, exit@0.1.x, exit@^0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/exit/-/exit-0.1.2.tgz#0632638f8d877cc82107d30a0fff1a17cba1cd0c"
+
+expand-brackets@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-0.1.5.tgz#df07284e342a807cd733ac5af72411e581d1177b"
+  dependencies:
+    is-posix-bracket "^0.1.0"
+
+expand-range@^1.8.1:
+  version "1.8.2"
+  resolved "https://registry.yarnpkg.com/expand-range/-/expand-range-1.8.2.tgz#a299effd335fe2721ebae8e257ec79644fc85337"
+  dependencies:
+    fill-range "^2.1.0"
+
+express@^4.10.7, express@^4.12.3:
+  version "4.15.2"
+  resolved "https://registry.yarnpkg.com/express/-/express-4.15.2.tgz#af107fc148504457f2dca9a6f2571d7129b97b35"
+  dependencies:
+    accepts "~1.3.3"
+    array-flatten "1.1.1"
+    content-disposition "0.5.2"
+    content-type "~1.0.2"
+    cookie "0.3.1"
+    cookie-signature "1.0.6"
+    debug "2.6.1"
+    depd "~1.1.0"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    etag "~1.8.0"
+    finalhandler "~1.0.0"
+    fresh "0.5.0"
+    merge-descriptors "1.0.1"
+    methods "~1.1.2"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    path-to-regexp "0.1.7"
+    proxy-addr "~1.1.3"
+    qs "6.4.0"
+    range-parser "~1.2.0"
+    send "0.15.1"
+    serve-static "1.12.1"
+    setprototypeof "1.0.3"
+    statuses "~1.3.1"
+    type-is "~1.6.14"
+    utils-merge "1.0.0"
+    vary "~1.1.0"
+
+extend@~3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.1.tgz#a755ea7bc1adfcc5a31ce7e762dbaadc5e636444"
+
+extglob@^0.3.1:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/extglob/-/extglob-0.3.2.tgz#2e18ff3d2f49ab2765cec9023f011daa8d8349a1"
+  dependencies:
+    is-extglob "^1.0.0"
+
+extract-zip@~1.5.0:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/extract-zip/-/extract-zip-1.5.0.tgz#92ccf6d81ef70a9fa4c1747114ccef6d8688a6c4"
+  dependencies:
+    concat-stream "1.5.0"
+    debug "0.7.4"
+    mkdirp "0.5.0"
+    yauzl "2.4.1"
+
+extsprintf@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.0.2.tgz#e1080e0658e300b06294990cc70e1502235fd550"
+
+fast-ordered-set@^1.0.0, fast-ordered-set@^1.0.2:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/fast-ordered-set/-/fast-ordered-set-1.0.3.tgz#3fbb36634f7be79e4f7edbdb4a357dee25d184eb"
+  dependencies:
+    blank-object "^1.0.1"
+
+fast-sourcemap-concat@^1.0.1:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/fast-sourcemap-concat/-/fast-sourcemap-concat-1.1.0.tgz#a800767abed5eda02e67238ec063a709be61f9d4"
+  dependencies:
+    chalk "^0.5.1"
+    debug "^2.2.0"
+    fs-extra "^0.30.0"
+    memory-streams "^0.1.0"
+    mkdirp "^0.5.0"
+    rsvp "^3.0.14"
+    source-map "^0.4.2"
+    source-map-url "^0.3.0"
+
+faye-websocket@~0.10.0:
+  version "0.10.0"
+  resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.10.0.tgz#4e492f8d04dfb6f89003507f6edbf2d501e7c6f4"
+  dependencies:
+    websocket-driver ">=0.5.1"
+
+fb-watchman@^1.8.0:
+  version "1.9.2"
+  resolved "https://registry.yarnpkg.com/fb-watchman/-/fb-watchman-1.9.2.tgz#a24cf47827f82d38fb59a69ad70b76e3b6ae7383"
+  dependencies:
+    bser "1.0.2"
+
+fd-slicer@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/fd-slicer/-/fd-slicer-1.0.1.tgz#8b5bcbd9ec327c5041bf9ab023fd6750f1177e65"
+  dependencies:
+    pend "~1.2.0"
+
+figures@^1.3.5:
+  version "1.7.0"
+  resolved "https://registry.yarnpkg.com/figures/-/figures-1.7.0.tgz#cbe1e3affcf1cd44b80cadfed28dc793a9701d2e"
+  dependencies:
+    escape-string-regexp "^1.0.5"
+    object-assign "^4.1.0"
+
+filename-regex@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/filename-regex/-/filename-regex-2.0.1.tgz#c1c4b9bee3e09725ddb106b75c1e301fe2f18b26"
+
+fill-range@^2.1.0:
+  version "2.2.3"
+  resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-2.2.3.tgz#50b77dfd7e469bc7492470963699fe7a8485a723"
+  dependencies:
+    is-number "^2.1.0"
+    isobject "^2.0.0"
+    randomatic "^1.1.3"
+    repeat-element "^1.1.2"
+    repeat-string "^1.5.2"
+
+filled-array@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/filled-array/-/filled-array-1.1.0.tgz#c3c4f6c663b923459a9aa29912d2d031f1507f84"
+
+finalhandler@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.0.1.tgz#bcd15d1689c0e5ed729b6f7f541a6df984117db8"
+  dependencies:
+    debug "2.6.3"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    statuses "~1.3.1"
+    unpipe "~1.0.0"
+
+finalhandler@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.0.2.tgz#d0e36f9dbc557f2de14423df6261889e9d60c93a"
+  dependencies:
+    debug "2.6.4"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    statuses "~1.3.1"
+    unpipe "~1.0.0"
+
+find-up@^1.0.0:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/find-up/-/find-up-1.1.2.tgz#6b2e9822b1a2ce0a60ab64d610eccad53cb24d0f"
+  dependencies:
+    path-exists "^2.0.0"
+    pinkie-promise "^2.0.0"
+
+findup-sync@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/findup-sync/-/findup-sync-0.2.1.tgz#e0a90a450075c49466ee513732057514b81e878c"
+  dependencies:
+    glob "~4.3.0"
+
+findup-sync@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/findup-sync/-/findup-sync-0.3.0.tgz#37930aa5d816b777c03445e1966cc6790a4c0b16"
+  dependencies:
+    glob "~5.0.0"
+
+findup@0.1.5, findup@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/findup/-/findup-0.1.5.tgz#8ad929a3393bac627957a7e5de4623b06b0e2ceb"
+  dependencies:
+    colors "~0.6.0-1"
+    commander "~2.1.0"
+
+fireworm@^0.7.0:
+  version "0.7.1"
+  resolved "https://registry.yarnpkg.com/fireworm/-/fireworm-0.7.1.tgz#ccf20f7941f108883fcddb99383dbe6e1861c758"
+  dependencies:
+    async "~0.2.9"
+    is-type "0.0.1"
+    lodash.debounce "^3.1.1"
+    lodash.flatten "^3.0.2"
+    minimatch "^3.0.2"
+
+for-in@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80"
+
+for-own@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/for-own/-/for-own-0.1.5.tgz#5265c681a4f294dabbf17c9509b6763aa84510ce"
+  dependencies:
+    for-in "^1.0.1"
+
+forever-agent@~0.5.0:
+  version "0.5.2"
+  resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.5.2.tgz#6d0e09c4921f94a27f63d3b49c5feff1ea4c5130"
+
+forever-agent@~0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91"
+
+form-data@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-0.2.0.tgz#26f8bc26da6440e299cbdcfb69035c4f77a6e466"
+  dependencies:
+    async "~0.9.0"
+    combined-stream "~0.0.4"
+    mime-types "~2.0.3"
+
+form-data@~1.0.0-rc3:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-1.0.1.tgz#ae315db9a4907fa065502304a66d7733475ee37c"
+  dependencies:
+    async "^2.0.1"
+    combined-stream "^1.0.5"
+    mime-types "^2.1.11"
+
+form-data@~2.1.1:
+  version "2.1.4"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.1.4.tgz#33c183acf193276ecaa98143a69e94bfee1750d1"
+  dependencies:
+    asynckit "^0.4.0"
+    combined-stream "^1.0.5"
+    mime-types "^2.1.12"
+
+formatio@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/formatio/-/formatio-1.1.1.tgz#5ed3ccd636551097383465d996199100e86161e9"
+  dependencies:
+    samsam "~1.1"
+
+forwarded@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.1.0.tgz#19ef9874c4ae1c297bcf078fde63a09b66a84363"
+
+fresh@0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.0.tgz#f474ca5e6a9246d6fd8e0953cfa9b9c805afa78e"
+
+fs-extra@0.26.2:
+  version "0.26.2"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.26.2.tgz#71b7697e539db037acf41e6e7923e94d605bf498"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    klaw "^1.0.0"
+    path-is-absolute "^1.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@^0.16.3:
+  version "0.16.5"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.16.5.tgz#1ad661fa6c86c9608cd1b49efc6fce834939a750"
+  dependencies:
+    graceful-fs "^3.0.5"
+    jsonfile "^2.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@^0.24.0:
+  version "0.24.0"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.24.0.tgz#d4e4342a96675cb7846633a6099249332b539952"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    path-is-absolute "^1.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@^0.30.0:
+  version "0.30.0"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.30.0.tgz#f233ffcc08d4da7d432daa449776989db1df93f0"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    klaw "^1.0.0"
+    path-is-absolute "^1.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@^2.0.0:
+  version "2.1.2"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-2.1.2.tgz#046c70163cef9aad46b0e4a7fa467fb22d71de35"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+
+fs-extra@~0.26.4:
+  version "0.26.7"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.26.7.tgz#9ae1fdd94897798edab76d0918cf42d0c3184fa9"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    klaw "^1.0.0"
+    path-is-absolute "^1.0.0"
+    rimraf "^2.2.8"
+
+fs-monitor-stack@^1.0.2:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/fs-monitor-stack/-/fs-monitor-stack-1.1.1.tgz#c4038d5977939b6b4e38396d7e7cd0895a7ac6b3"
+
+fs-readdir-recursive@^0.1.0:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/fs-readdir-recursive/-/fs-readdir-recursive-0.1.2.tgz#315b4fb8c1ca5b8c47defef319d073dad3568059"
+
+fs-tree-diff@^0.4.4:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/fs-tree-diff/-/fs-tree-diff-0.4.4.tgz#f6b75d70db22c1f3b05d592270f4ed6c9c2f82dd"
+  dependencies:
+    debug "^2.2.0"
+    fast-ordered-set "^1.0.2"
+
+fs-tree-diff@^0.5.2, fs-tree-diff@^0.5.3, fs-tree-diff@^0.5.4, fs-tree-diff@^0.5.6:
+  version "0.5.6"
+  resolved "https://registry.yarnpkg.com/fs-tree-diff/-/fs-tree-diff-0.5.6.tgz#342665749e8dca406800b672268c8f5073f3e623"
+  dependencies:
+    heimdalljs-logger "^0.1.7"
+    object-assign "^4.1.0"
+    path-posix "^1.0.0"
+    symlink-or-copy "^1.1.8"
+
+fs-vacuum@~1.2.7:
+  version "1.2.10"
+  resolved "https://registry.yarnpkg.com/fs-vacuum/-/fs-vacuum-1.2.10.tgz#b7629bec07a4031a2548fdf99f5ecf1cc8b31e36"
+  dependencies:
+    graceful-fs "^4.1.2"
+    path-is-inside "^1.0.1"
+    rimraf "^2.5.2"
+
+fs-write-stream-atomic@1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.5.tgz#862a4dabdffcafabfc16499458e37310c39925f6"
+  dependencies:
+    graceful-fs "^4.1.2"
+    imurmurhash "^0.1.4"
+
+fs-write-stream-atomic@~1.0.8:
+  version "1.0.10"
+  resolved "https://registry.yarnpkg.com/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.10.tgz#b47df53493ef911df75731e70a9ded0189db40c9"
+  dependencies:
+    graceful-fs "^4.1.2"
+    iferr "^0.1.5"
+    imurmurhash "^0.1.4"
+    readable-stream "1 || 2"
+
+fs.realpath@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f"
+
+fstream-ignore@^1.0.0, fstream-ignore@^1.0.2:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/fstream-ignore/-/fstream-ignore-1.0.5.tgz#9c31dae34767018fe1d249b24dada67d092da105"
+  dependencies:
+    fstream "^1.0.0"
+    inherits "2"
+    minimatch "^3.0.0"
+
+fstream-npm@~1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/fstream-npm/-/fstream-npm-1.0.7.tgz#7ed0d1ac13d7686dd9e1bf6ceb8be273bf6d2f86"
+  dependencies:
+    fstream-ignore "^1.0.0"
+    inherits "2"
+
+fstream@^1.0.0, fstream@^1.0.2, fstream@^1.0.3, fstream@~1.0.8:
+  version "1.0.11"
+  resolved "https://registry.yarnpkg.com/fstream/-/fstream-1.0.11.tgz#5c1fb1f117477114f0632a0eb4b71b3cb0fd3171"
+  dependencies:
+    graceful-fs "^4.1.2"
+    inherits "~2.0.0"
+    mkdirp ">=0.5 0"
+    rimraf "2"
+
+gauge@~1.2.5:
+  version "1.2.7"
+  resolved "https://registry.yarnpkg.com/gauge/-/gauge-1.2.7.tgz#e9cec5483d3d4ee0ef44b60a7d99e4935e136d93"
+  dependencies:
+    ansi "^0.3.0"
+    has-unicode "^2.0.0"
+    lodash.pad "^4.1.0"
+    lodash.padend "^4.1.0"
+    lodash.padstart "^4.1.0"
+
+gauge@~2.7.1:
+  version "2.7.4"
+  resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.4.tgz#2c03405c7538c39d7eb37b317022e325fb018bf7"
+  dependencies:
+    aproba "^1.0.3"
+    console-control-strings "^1.0.0"
+    has-unicode "^2.0.0"
+    object-assign "^4.1.0"
+    signal-exit "^3.0.0"
+    string-width "^1.0.1"
+    strip-ansi "^3.0.1"
+    wide-align "^1.1.0"
+
+gaze@^1.0.0:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/gaze/-/gaze-1.1.2.tgz#847224677adb8870d679257ed3388fdb61e40105"
+  dependencies:
+    globule "^1.0.0"
+
+generate-function@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/generate-function/-/generate-function-2.0.0.tgz#6858fe7c0969b7d4e9093337647ac79f60dfbe74"
+
+generate-object-property@^1.1.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/generate-object-property/-/generate-object-property-1.2.0.tgz#9c0e1c40308ce804f4783618b937fa88f99d50d0"
+  dependencies:
+    is-property "^1.0.0"
+
+get-caller-file@^1.0.0, get-caller-file@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-1.0.2.tgz#f702e63127e7e231c160a80c1554acb70d5047e5"
+
+get-stdin@^4.0.1:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-4.0.1.tgz#b968c6b0a04384324902e8bf1a5df32579a450fe"
+
+getpass@^0.1.1:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa"
+  dependencies:
+    assert-plus "^1.0.0"
+
+git-repo-info@^1.0.4, git-repo-info@^1.1.2:
+  version "1.4.1"
+  resolved "https://registry.yarnpkg.com/git-repo-info/-/git-repo-info-1.4.1.tgz#2a072823254aaf62fcf0766007d7b6651bd41943"
+
+git-repo-version@0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/git-repo-version/-/git-repo-version-0.3.0.tgz#c9b97d0d21c4357d669dc1269c2b6a75da6cc0e9"
+  dependencies:
+    git-repo-info "^1.0.4"
+
+git-tools@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/git-tools/-/git-tools-0.1.4.tgz#5e43e59443b8a5dedb39dba663da49e79f943978"
+  dependencies:
+    spawnback "~1.0.0"
+
+github-url-from-git@~1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/github-url-from-git/-/github-url-from-git-1.4.0.tgz#285e6b520819001bde128674704379e4ff03e0de"
+
+github-url-from-username-repo@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/github-url-from-username-repo/-/github-url-from-username-repo-1.0.2.tgz#7dd79330d2abe69c10c2cef79714c97215791dfa"
+
+github@^0.2.3:
+  version "0.2.4"
+  resolved "https://registry.yarnpkg.com/github/-/github-0.2.4.tgz#24fa7f0e13fa11b946af91134c51982a91ce538b"
+  dependencies:
+    mime "^1.2.11"
+
+glob-base@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/glob-base/-/glob-base-0.3.0.tgz#dbb164f6221b1c0b1ccf82aea328b497df0ea3c4"
+  dependencies:
+    glob-parent "^2.0.0"
+    is-glob "^2.0.0"
+
+glob-parent@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-2.0.0.tgz#81383d72db054fcccf5336daa902f182f6edbb28"
+  dependencies:
+    is-glob "^2.0.0"
+
+"glob@3 || 4", glob@^4.3.2:
+  version "4.5.3"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-4.5.3.tgz#c6cb73d3226c1efef04de3c56d012f03377ee15f"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^2.0.1"
+    once "^1.3.0"
+
+glob@7.0.3, glob@^7.0.3:
+  version "7.0.3"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-7.0.3.tgz#0aa235931a4a96ac13d60ffac2fb877bd6ed4f58"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "2 || 3"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@^5.0.10, glob@^5.0.15, glob@~5.0.0, glob@~5.0.15:
+  version "5.0.15"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.15.tgz#1bc936b9e02f4a603fcc222ecf7633d30b8b93b1"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "2 || 3"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@^6.0.1:
+  version "6.0.4"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-6.0.4.tgz#0f08860f6a155127b2fadd4f9ce24b1aab6e4d22"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "2 || 3"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@^7.0.0, glob@^7.0.4, glob@^7.0.5, glob@^7.1.1, glob@~7.1.1:
+  version "7.1.1"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.1.tgz#805211df04faaf1c63a3600306cdf5ade50b2ec8"
+  dependencies:
+    fs.realpath "^1.0.0"
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^3.0.2"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@~4.3.0:
+  version "4.3.5"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-4.3.5.tgz#80fbb08ca540f238acce5d11d1e9bc41e75173d3"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^2.0.1"
+    once "^1.3.0"
+
+globals@^6.4.0:
+  version "6.4.1"
+  resolved "https://registry.yarnpkg.com/globals/-/globals-6.4.1.tgz#8498032b3b6d1cc81eebc5f79690d8fe29fabf4f"
+
+globals@^9.0.0:
+  version "9.17.0"
+  resolved "https://registry.yarnpkg.com/globals/-/globals-9.17.0.tgz#0c0ca696d9b9bb694d2e5470bd37777caad50286"
+
+globule@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/globule/-/globule-1.1.0.tgz#c49352e4dc183d85893ee825385eb994bb6df45f"
+  dependencies:
+    glob "~7.1.1"
+    lodash "~4.16.4"
+    minimatch "~3.0.2"
+
+got@^5.0.0:
+  version "5.7.1"
+  resolved "https://registry.yarnpkg.com/got/-/got-5.7.1.tgz#5f81635a61e4a6589f180569ea4e381680a51f35"
+  dependencies:
+    create-error-class "^3.0.1"
+    duplexer2 "^0.1.4"
+    is-redirect "^1.0.0"
+    is-retry-allowed "^1.0.0"
+    is-stream "^1.0.0"
+    lowercase-keys "^1.0.0"
+    node-status-codes "^1.0.0"
+    object-assign "^4.0.1"
+    parse-json "^2.1.0"
+    pinkie-promise "^2.0.0"
+    read-all-stream "^3.0.0"
+    readable-stream "^2.0.5"
+    timed-out "^3.0.0"
+    unzip-response "^1.0.2"
+    url-parse-lax "^1.0.0"
+
+graceful-fs@^3.0.0, graceful-fs@^3.0.1, graceful-fs@^3.0.5:
+  version "3.0.11"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-3.0.11.tgz#7613c778a1afea62f25c630a086d7f3acbbdd818"
+  dependencies:
+    natives "^1.1.0"
+
+graceful-fs@^4.0.0, graceful-fs@^4.1.11, graceful-fs@^4.1.2, graceful-fs@^4.1.3, graceful-fs@^4.1.4, graceful-fs@^4.1.6, graceful-fs@^4.1.9, graceful-fs@~4.1.2, graceful-fs@~4.1.3:
+  version "4.1.11"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.11.tgz#0e8bdfe4d1ddb8854d64e04ea7c00e2a026e5658"
+
+graceful-fs@~2.0.0:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-2.0.3.tgz#7cd2cdb228a4a3f36e95efa6cc142de7d1a136d0"
+
+"graceful-readlink@>= 1.0.0":
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/graceful-readlink/-/graceful-readlink-1.0.1.tgz#4cafad76bc62f02fa039b2f94e9a3dd3a391a725"
+
+growly@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/growly/-/growly-1.3.0.tgz#f10748cbe76af964b7c96c93c6bcc28af120c081"
+
+handlebars@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-2.0.0.tgz#6e9d7f8514a3467fa5e9f82cc158ecfc1d5ac76f"
+  dependencies:
+    optimist "~0.3"
+  optionalDependencies:
+    uglify-js "~2.3"
+
+handlebars@^4.0.4:
+  version "4.0.6"
+  resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-4.0.6.tgz#2ce4484850537f9c97a8026d5399b935c4ed4ed7"
+  dependencies:
+    async "^1.4.0"
+    optimist "^0.6.1"
+    source-map "^0.4.4"
+  optionalDependencies:
+    uglify-js "^2.6"
+
+har-schema@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-1.0.5.tgz#d263135f43307c02c602afc8fe95970c0151369e"
+
+har-validator@~2.0.2, har-validator@~2.0.6:
+  version "2.0.6"
+  resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-2.0.6.tgz#cdcbc08188265ad119b6a5a7c8ab70eecfb5d27d"
+  dependencies:
+    chalk "^1.1.1"
+    commander "^2.9.0"
+    is-my-json-valid "^2.12.4"
+    pinkie-promise "^2.0.0"
+
+har-validator@~4.2.1:
+  version "4.2.1"
+  resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-4.2.1.tgz#33481d0f1bbff600dd203d75812a6a5fba002e2a"
+  dependencies:
+    ajv "^4.9.1"
+    har-schema "^1.0.5"
+
+has-ansi@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-0.1.0.tgz#84f265aae8c0e6a88a12d7022894b7568894c62e"
+  dependencies:
+    ansi-regex "^0.2.0"
+
+has-ansi@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91"
+  dependencies:
+    ansi-regex "^2.0.0"
+
+has-binary@0.1.6:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/has-binary/-/has-binary-0.1.6.tgz#25326f39cfa4f616ad8787894e3af2cfbc7b6e10"
+  dependencies:
+    isarray "0.0.1"
+
+has-binary@0.1.7:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/has-binary/-/has-binary-0.1.7.tgz#68e61eb16210c9545a0a5cce06a873912fe1e68c"
+  dependencies:
+    isarray "0.0.1"
+
+has-color@~0.1.0:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/has-color/-/has-color-0.1.7.tgz#67144a5260c34fc3cca677d041daf52fe7b78b2f"
+
+has-cors@1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/has-cors/-/has-cors-1.1.0.tgz#5e474793f7ea9843d1bb99c23eef49ff126fff39"
+
+has-unicode@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9"
+
+hash-for-dep@^1.0.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/hash-for-dep/-/hash-for-dep-1.1.2.tgz#e3347ed92960eb0bb53a2c6c2b70e36d75b7cd0c"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    heimdalljs "^0.2.3"
+    heimdalljs-logger "^0.1.7"
+    resolve "^1.1.6"
+
+hasha@^2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/hasha/-/hasha-2.2.0.tgz#78d7cbfc1e6d66303fe79837365984517b2f6ee1"
+  dependencies:
+    is-stream "^1.0.1"
+    pinkie-promise "^2.0.0"
+
+hawk@~2.3.0:
+  version "2.3.1"
+  resolved "https://registry.yarnpkg.com/hawk/-/hawk-2.3.1.tgz#1e731ce39447fa1d0f6d707f7bceebec0fd1ec1f"
+  dependencies:
+    boom "2.x.x"
+    cryptiles "2.x.x"
+    hoek "2.x.x"
+    sntp "1.x.x"
+
+hawk@~3.1.0, hawk@~3.1.3:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/hawk/-/hawk-3.1.3.tgz#078444bd7c1640b0fe540d2c9b73d59678e8e1c4"
+  dependencies:
+    boom "2.x.x"
+    cryptiles "2.x.x"
+    hoek "2.x.x"
+    sntp "1.x.x"
+
+heimdalljs-logger@^0.1.7:
+  version "0.1.9"
+  resolved "https://registry.yarnpkg.com/heimdalljs-logger/-/heimdalljs-logger-0.1.9.tgz#d76ada4e45b7bb6f786fc9c010a68eb2e2faf176"
+  dependencies:
+    debug "^2.2.0"
+    heimdalljs "^0.2.0"
+
+heimdalljs@^0.2.0, heimdalljs@^0.2.1, heimdalljs@^0.2.3:
+  version "0.2.4"
+  resolved "https://registry.yarnpkg.com/heimdalljs/-/heimdalljs-0.2.4.tgz#34ead16eab422c94803065d33abeba1f7b24a910"
+  dependencies:
+    rsvp "~3.2.1"
+
+heimdalljs@^0.3.0:
+  version "0.3.3"
+  resolved "https://registry.yarnpkg.com/heimdalljs/-/heimdalljs-0.3.3.tgz#e92d2c6f77fd46d5bf50b610d28ad31755054d0b"
+  dependencies:
+    rsvp "~3.2.1"
+
+hoek@2.x.x:
+  version "2.16.3"
+  resolved "https://registry.yarnpkg.com/hoek/-/hoek-2.16.3.tgz#20bb7403d3cea398e91dc4710a8ff1b8274a25ed"
+
+home-or-tmp@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/home-or-tmp/-/home-or-tmp-1.0.0.tgz#4b9f1e40800c3e50c6c27f781676afcce71f3985"
+  dependencies:
+    os-tmpdir "^1.0.1"
+    user-home "^1.1.1"
+
+home-or-tmp@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/home-or-tmp/-/home-or-tmp-2.0.0.tgz#e36c3f2d2cae7d746a857e38d18d5f32a7882db8"
+  dependencies:
+    os-homedir "^1.0.0"
+    os-tmpdir "^1.0.1"
+
+hosted-git-info@^2.1.4, hosted-git-info@~2.1.4:
+  version "2.1.5"
+  resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.1.5.tgz#0ba81d90da2e25ab34a332e6ec77936e1598118b"
+
+htmlparser2@3.8.x:
+  version "3.8.3"
+  resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-3.8.3.tgz#996c28b191516a8be86501a7d79757e5c70c1068"
+  dependencies:
+    domelementtype "1"
+    domhandler "2.3"
+    domutils "1.5"
+    entities "1.0"
+    readable-stream "1.1"
+
+http-errors@~1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.3.1.tgz#197e22cdebd4198585e8694ef6786197b91ed942"
+  dependencies:
+    inherits "~2.0.1"
+    statuses "1"
+
+http-errors@~1.6.1:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.6.1.tgz#5f8b8ed98aca545656bf572997387f904a722257"
+  dependencies:
+    depd "1.1.0"
+    inherits "2.0.3"
+    setprototypeof "1.0.3"
+    statuses ">= 1.3.1 < 2"
+
+http-proxy@^1.13.1, http-proxy@^1.9.0:
+  version "1.16.2"
+  resolved "https://registry.yarnpkg.com/http-proxy/-/http-proxy-1.16.2.tgz#06dff292952bf64dbe8471fa9df73066d4f37742"
+  dependencies:
+    eventemitter3 "1.x.x"
+    requires-port "1.x.x"
+
+http-signature@~0.10.0:
+  version "0.10.1"
+  resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-0.10.1.tgz#4fbdac132559aa8323121e540779c0a012b27e66"
+  dependencies:
+    asn1 "0.1.11"
+    assert-plus "^0.1.5"
+    ctype "0.5.3"
+
+http-signature@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.1.1.tgz#df72e267066cd0ac67fb76adf8e134a8fbcf91bf"
+  dependencies:
+    assert-plus "^0.2.0"
+    jsprim "^1.2.2"
+    sshpk "^1.7.0"
+
+iconv-lite@0.4.13, iconv-lite@^0.4.5:
+  version "0.4.13"
+  resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.13.tgz#1f88aba4ab0b1508e8312acc39345f36e992e2f2"
+
+iferr@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/iferr/-/iferr-0.1.5.tgz#c60eed69e6d8fdb6b3104a1fcbca1c192dc5b501"
+
+imurmurhash@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea"
+
+in-publish@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/in-publish/-/in-publish-2.0.0.tgz#e20ff5e3a2afc2690320b6dc552682a9c7fadf51"
+
+include-path-searcher@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/include-path-searcher/-/include-path-searcher-0.1.0.tgz#c0cf2ddfa164fb2eae07bc7ca43a7f191cb4d7bd"
+
+indent-string@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-2.1.0.tgz#8e2d48348742121b4a8218b7a137e9a52049dc80"
+  dependencies:
+    repeating "^2.0.0"
+
+indexof@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/indexof/-/indexof-0.0.1.tgz#82dc336d232b9062179d05ab3293a66059fd435d"
+
+inflection@^1.7.0, inflection@^1.8.0:
+  version "1.12.0"
+  resolved "https://registry.yarnpkg.com/inflection/-/inflection-1.12.0.tgz#a200935656d6f5f6bc4dc7502e1aecb703228416"
+
+inflight@^1.0.4, inflight@~1.0.4:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9"
+  dependencies:
+    once "^1.3.0"
+    wrappy "1"
+
+inherits@2, inherits@2.0.3, inherits@^2.0.1, inherits@~2.0.0, inherits@~2.0.1:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de"
+
+inherits@2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.1.tgz#b17d08d326b4423e568eff719f91b0b1cbdf69f1"
+
+ini@^1.3.4, ini@~1.3.0, ini@~1.3.4:
+  version "1.3.4"
+  resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.4.tgz#0537cb79daf59b59a1a517dff706c86ec039162e"
+
+init-package-json@~1.9.3:
+  version "1.9.6"
+  resolved "https://registry.yarnpkg.com/init-package-json/-/init-package-json-1.9.6.tgz#789fc2b74466a4952b9ea77c0575bc78ebd60a61"
+  dependencies:
+    glob "^7.1.1"
+    npm-package-arg "^4.0.0 || ^5.0.0"
+    promzard "^0.3.0"
+    read "~1.0.1"
+    read-package-json "1 || 2"
+    semver "2.x || 3.x || 4 || 5"
+    validate-npm-package-license "^3.0.1"
+    validate-npm-package-name "^3.0.0"
+
+inline-source-map-comment@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/inline-source-map-comment/-/inline-source-map-comment-1.0.5.tgz#50a8a44c2a790dfac441b5c94eccd5462635faf6"
+  dependencies:
+    chalk "^1.0.0"
+    get-stdin "^4.0.1"
+    minimist "^1.1.1"
+    sum-up "^1.0.1"
+    xtend "^4.0.0"
+
+inquirer@0.10.0, inquirer@^0.10.0:
+  version "0.10.0"
+  resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-0.10.0.tgz#48cd3e23f8d989a52d47dc5e10ec75324387e908"
+  dependencies:
+    ansi-escapes "^1.1.0"
+    ansi-regex "^2.0.0"
+    chalk "^1.0.0"
+    cli-cursor "^1.0.1"
+    cli-width "^1.0.1"
+    figures "^1.3.5"
+    lodash "^3.3.1"
+    readline2 "^1.0.1"
+    run-async "^0.1.0"
+    rx-lite "^3.1.2"
+    strip-ansi "^3.0.0"
+    through "^2.3.6"
+
+inquirer@0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-0.5.1.tgz#e9f2cd1ee172c7a32e054b78a03d4ddb0d7707f1"
+  dependencies:
+    async "~0.8.0"
+    chalk "~0.4.0"
+    cli-color "~0.3.2"
+    lodash "~2.4.1"
+    mute-stream "0.0.4"
+    readline2 "~0.1.0"
+    through "~2.3.4"
+
+insight@^0.7.0:
+  version "0.7.0"
+  resolved "https://registry.yarnpkg.com/insight/-/insight-0.7.0.tgz#061f9189835bd38a97a60c2b76ea0c6b30099ff6"
+  dependencies:
+    async "^1.4.2"
+    chalk "^1.0.0"
+    configstore "^1.0.0"
+    inquirer "^0.10.0"
+    lodash.debounce "^3.0.1"
+    object-assign "^4.0.1"
+    os-name "^1.0.0"
+    request "^2.40.0"
+    tough-cookie "^2.0.0"
+
+intersect@~0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/intersect/-/intersect-0.0.3.tgz#c1a4a5e5eac6ede4af7504cc07e0ada7bc9f4920"
+
+invariant@^2.2.0, invariant@^2.2.2:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.2.tgz#9e1f56ac0acdb6bf303306f338be3b204ae60360"
+  dependencies:
+    loose-envify "^1.0.0"
+
+invert-kv@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/invert-kv/-/invert-kv-1.0.0.tgz#104a8e4aaca6d3d8cd157a8ef8bfab2d7a3ffdb6"
+
+ipaddr.js@1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.3.0.tgz#1e03a52fdad83a8bbb2b25cbf4998b4cffcd3dec"
+
+is-arrayish@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d"
+
+is-buffer@^1.1.5:
+  version "1.1.5"
+  resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.5.tgz#1f3b26ef613b214b88cbca23cc6c01d87961eecc"
+
+is-builtin-module@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-builtin-module/-/is-builtin-module-1.0.0.tgz#540572d34f7ac3119f8f76c30cbc1b1e037affbe"
+  dependencies:
+    builtin-modules "^1.0.0"
+
+is-dotfile@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-dotfile/-/is-dotfile-1.0.2.tgz#2c132383f39199f8edc268ca01b9b007d205cc4d"
+
+is-equal-shallow@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/is-equal-shallow/-/is-equal-shallow-0.1.3.tgz#2238098fc221de0bcfa5d9eac4c45d638aa1c534"
+  dependencies:
+    is-primitive "^2.0.0"
+
+is-extendable@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89"
+
+is-extglob@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-1.0.0.tgz#ac468177c4943405a092fc8f29760c6ffc6206c0"
+
+is-finite@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-finite/-/is-finite-1.0.2.tgz#cc6677695602be550ef11e8b4aa6305342b6d0aa"
+  dependencies:
+    number-is-nan "^1.0.0"
+
+is-fullwidth-code-point@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb"
+  dependencies:
+    number-is-nan "^1.0.0"
+
+is-git-url@^0.2.0:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/is-git-url/-/is-git-url-0.2.3.tgz#445200d6fbd6da028fb5e01440d9afc93f3ccb64"
+
+is-glob@^2.0.0, is-glob@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-2.0.1.tgz#d096f926a3ded5600f3fdfd91198cb0888c2d863"
+  dependencies:
+    is-extglob "^1.0.0"
+
+is-integer@^1.0.4:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/is-integer/-/is-integer-1.0.6.tgz#5273819fada880d123e1ac00a938e7172dd8d95e"
+  dependencies:
+    is-finite "^1.0.0"
+
+is-my-json-valid@^2.12.4:
+  version "2.16.0"
+  resolved "https://registry.yarnpkg.com/is-my-json-valid/-/is-my-json-valid-2.16.0.tgz#f079dd9bfdae65ee2038aae8acbc86ab109e3693"
+  dependencies:
+    generate-function "^2.0.0"
+    generate-object-property "^1.1.0"
+    jsonpointer "^4.0.0"
+    xtend "^4.0.0"
+
+is-npm@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-npm/-/is-npm-1.0.0.tgz#f2fb63a65e4905b406c86072765a1a4dc793b9f4"
+
+is-number@^2.0.2, is-number@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/is-number/-/is-number-2.1.0.tgz#01fcbbb393463a548f2f466cce16dece49db908f"
+  dependencies:
+    kind-of "^3.0.2"
+
+is-obj@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/is-obj/-/is-obj-1.0.1.tgz#3e4729ac1f5fde025cd7d83a896dab9f4f67db0f"
+
+is-posix-bracket@^0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/is-posix-bracket/-/is-posix-bracket-0.1.1.tgz#3334dc79774368e92f016e6fbc0a88f5cd6e6bc4"
+
+is-primitive@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/is-primitive/-/is-primitive-2.0.0.tgz#207bab91638499c07b2adf240a41a87210034575"
+
+is-property@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-property/-/is-property-1.0.2.tgz#57fe1c4e48474edd65b09911f26b1cd4095dda84"
+
+is-redirect@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-redirect/-/is-redirect-1.0.0.tgz#1d03dded53bd8db0f30c26e4f95d36fc7c87dc24"
+
+is-retry-allowed@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/is-retry-allowed/-/is-retry-allowed-1.1.0.tgz#11a060568b67339444033d0125a61a20d564fb34"
+
+is-root@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-root/-/is-root-1.0.0.tgz#07b6c233bc394cd9d02ba15c966bd6660d6342d5"
+
+is-stream@^1.0.0, is-stream@^1.0.1:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-1.1.0.tgz#12d4a3dd4e68e0b79ceb8dbc84173ae80d91ca44"
+
+is-type@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/is-type/-/is-type-0.0.1.tgz#f651d85c365d44955d14a51d8d7061f3f6b4779c"
+  dependencies:
+    core-util-is "~1.0.0"
+
+is-typedarray@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a"
+
+is-utf8@^0.2.0:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/is-utf8/-/is-utf8-0.2.1.tgz#4b0da1442104d1b336340e80797e865cf39f7d72"
+
+isarray@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf"
+
+isarray@1.0.0, isarray@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11"
+
+isbinaryfile@^2.0.3:
+  version "2.0.4"
+  resolved "https://registry.yarnpkg.com/isbinaryfile/-/isbinaryfile-2.0.4.tgz#d23592e6a6f093efb84c2e6152056be294e414a1"
+
+isexe@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10"
+
+isobject@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/isobject/-/isobject-2.1.0.tgz#f065561096a3f1da2ef46272f815c840d87e0c89"
+  dependencies:
+    isarray "1.0.0"
+
+isstream@~0.1.1, isstream@~0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a"
+
+istextorbinary@2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/istextorbinary/-/istextorbinary-2.1.0.tgz#dbed2a6f51be2f7475b68f89465811141b758874"
+  dependencies:
+    binaryextensions "1 || 2"
+    editions "^1.1.1"
+    textextensions "1 || 2"
+
+jju@^1.1.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/jju/-/jju-1.3.0.tgz#dadd9ef01924bc728b03f2f7979bdbd62f7a2aaa"
+
+jodid25519@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/jodid25519/-/jodid25519-1.0.2.tgz#06d4912255093419477d425633606e0e90782967"
+  dependencies:
+    jsbn "~0.1.0"
+
+js-base64@^2.1.8:
+  version "2.1.9"
+  resolved "https://registry.yarnpkg.com/js-base64/-/js-base64-2.1.9.tgz#f0e80ae039a4bd654b5f281fc93f04a914a7fcce"
+
+js-tokens@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-1.0.1.tgz#cc435a5c8b94ad15acb7983140fc80182c89aeae"
+
+js-tokens@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.1.tgz#08e9f132484a2c45a30907e9dc4d5567b7f114d7"
+
+js-yaml@^3.1.0, js-yaml@^3.2.5, js-yaml@^3.2.7:
+  version "3.8.3"
+  resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.8.3.tgz#33a05ec481c850c8875929166fe1beb61c728766"
+  dependencies:
+    argparse "^1.0.7"
+    esprima "^3.1.1"
+
+jsbn@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513"
+
+jsesc@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-1.3.0.tgz#46c3fec8c1892b12b0833db9bc7622176dbab34b"
+
+jsesc@~0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-0.5.0.tgz#e7dee66e35d6fc16f710fe91d5cf69f70f08911d"
+
+jshint@^2.7.0:
+  version "2.9.4"
+  resolved "https://registry.yarnpkg.com/jshint/-/jshint-2.9.4.tgz#5e3ba97848d5290273db514aee47fe24cf592934"
+  dependencies:
+    cli "~1.0.0"
+    console-browserify "1.1.x"
+    exit "0.1.x"
+    htmlparser2 "3.8.x"
+    lodash "3.7.x"
+    minimatch "~3.0.2"
+    shelljs "0.3.x"
+    strip-json-comments "1.0.x"
+
+json-parse-helpfulerror@^1.0.2:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/json-parse-helpfulerror/-/json-parse-helpfulerror-1.0.3.tgz#13f14ce02eed4e981297b64eb9e3b932e2dd13dc"
+  dependencies:
+    jju "^1.1.0"
+
+json-schema@0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.2.3.tgz#b480c892e59a2f05954ce727bd3f2a4e882f9e13"
+
+json-stable-stringify@^1.0.0, json-stable-stringify@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/json-stable-stringify/-/json-stable-stringify-1.0.1.tgz#9a759d39c5f2ff503fd5300646ed445f88c4f9af"
+  dependencies:
+    jsonify "~0.0.0"
+
+json-stringify-safe@~5.0.0, json-stringify-safe@~5.0.1:
+  version "5.0.1"
+  resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb"
+
+json3@3.3.2:
+  version "3.3.2"
+  resolved "https://registry.yarnpkg.com/json3/-/json3-3.3.2.tgz#3c0434743df93e2f5c42aee7b19bcb483575f4e1"
+
+json5@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/json5/-/json5-0.4.0.tgz#054352e4c4c80c86c0923877d449de176a732c8d"
+
+json5@^0.5.0:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/json5/-/json5-0.5.1.tgz#1eade7acc012034ad84e2396767ead9fa5495821"
+
+jsonfile@^2.0.0, jsonfile@^2.1.0:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-2.4.0.tgz#3736a2b428b87bbda0cc83b53fa3d633a35c2ae8"
+  optionalDependencies:
+    graceful-fs "^4.1.6"
+
+jsonify@~0.0.0:
+  version "0.0.0"
+  resolved "https://registry.yarnpkg.com/jsonify/-/jsonify-0.0.0.tgz#2c74b6ee41d93ca51b7b5aaee8f503631d252a73"
+
+jsonpointer@^4.0.0:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/jsonpointer/-/jsonpointer-4.0.1.tgz#4fd92cb34e0e9db3c89c8622ecf51f9b978c6cb9"
+
+jsprim@^1.2.2:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.0.tgz#a3b87e40298d8c380552d8cc7628a0bb95a22918"
+  dependencies:
+    assert-plus "1.0.0"
+    extsprintf "1.0.2"
+    json-schema "0.2.3"
+    verror "1.3.6"
+
+junk@^1.0.0:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/junk/-/junk-1.0.3.tgz#87be63488649cbdca6f53ab39bec9ccd2347f592"
+
+kew@~0.7.0:
+  version "0.7.0"
+  resolved "https://registry.yarnpkg.com/kew/-/kew-0.7.0.tgz#79d93d2d33363d6fdd2970b335d9141ad591d79b"
+
+kind-of@^3.0.2:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.2.0.tgz#b58abe4d5c044ad33726a8c1525b48cf891bff07"
+  dependencies:
+    is-buffer "^1.1.5"
+
+klassy@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/klassy/-/klassy-0.1.3.tgz#c31d5756d583197d75f582b6e692872be497067f"
+
+klaw@^1.0.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/klaw/-/klaw-1.3.1.tgz#4088433b46b3b1ba259d78785d8e96f73ba02439"
+  optionalDependencies:
+    graceful-fs "^4.1.9"
+
+latest-version@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/latest-version/-/latest-version-2.0.0.tgz#56f8d6139620847b8017f8f1f4d78e211324168b"
+  dependencies:
+    package-json "^2.0.0"
+
+lazy-cache@^1.0.3:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/lazy-cache/-/lazy-cache-1.0.4.tgz#a1d78fc3a50474cb80845d3b3b6e1da49a446e8e"
+
+lcid@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/lcid/-/lcid-1.0.0.tgz#308accafa0bc483a3867b4b6f2b9506251d1b835"
+  dependencies:
+    invert-kv "^1.0.0"
+
+leek@0.0.21:
+  version "0.0.21"
+  resolved "https://registry.yarnpkg.com/leek/-/leek-0.0.21.tgz#09804bf70f8aefbba745f5d56d2a4debf22711ff"
+  dependencies:
+    debug "^2.1.0"
+    lodash.assign "^3.2.0"
+    request "^2.27.0"
+    rsvp "^3.0.21"
+
+leven@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/leven/-/leven-1.0.2.tgz#9144b6eebca5f1d0680169f1a6770dcea60b75c3"
+
+linkify-it@~1.2.0:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/linkify-it/-/linkify-it-1.2.4.tgz#0773526c317c8fd13bd534ee1d180ff88abf881a"
+  dependencies:
+    uc.micro "^1.0.1"
+
+livereload-js@^2.2.0:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/livereload-js/-/livereload-js-2.2.2.tgz#6c87257e648ab475bc24ea257457edcc1f8d0bc2"
+
+load-json-file@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/load-json-file/-/load-json-file-1.1.0.tgz#956905708d58b4bab4c2261b04f59f31c99374c0"
+  dependencies:
+    graceful-fs "^4.1.2"
+    parse-json "^2.2.0"
+    pify "^2.0.0"
+    pinkie-promise "^2.0.0"
+    strip-bom "^2.0.0"
+
+loader.js@^4.0.0:
+  version "4.3.0"
+  resolved "https://registry.yarnpkg.com/loader.js/-/loader.js-4.3.0.tgz#736c13eb8afdf75abd6c2d7b4f7fd40e1105a71f"
+
+lockfile@^1.0.0, lockfile@~1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/lockfile/-/lockfile-1.0.3.tgz#2638fc39a0331e9cac1a04b71799931c9c50df79"
+
+lodash-node@^2.4.1:
+  version "2.4.1"
+  resolved "https://registry.yarnpkg.com/lodash-node/-/lodash-node-2.4.1.tgz#ea82f7b100c733d1a42af76801e506105e2a80ec"
+
+lodash._arraycopy@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/lodash._arraycopy/-/lodash._arraycopy-3.0.0.tgz#76e7b7c1f1fb92547374878a562ed06a3e50f6e1"
+
+lodash._arrayeach@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/lodash._arrayeach/-/lodash._arrayeach-3.0.0.tgz#bab156b2a90d3f1bbd5c653403349e5e5933ef9e"
+
+lodash._baseassign@^3.0.0:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/lodash._baseassign/-/lodash._baseassign-3.2.0.tgz#8c38a099500f215ad09e59f1722fd0c52bfe0a4e"
+  dependencies:
+    lodash._basecopy "^3.0.0"
+    lodash.keys "^3.0.0"
+
+lodash._basecopy@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/lodash._basecopy/-/lodash._basecopy-3.0.1.tgz#8da0e6a876cf344c0ad8a54882111dd3c5c7ca36"
+
+lodash._baseflatten@^3.0.0:
+  version "3.1.4"
+  resolved "https://registry.yarnpkg.com/lodash._baseflatten/-/lodash._baseflatten-3.1.4.tgz#0770ff80131af6e34f3b511796a7ba5214e65ff7"
+  dependencies:
+    lodash.isarguments "^3.0.0"
+    lodash.isarray "^3.0.0"
+
+lodash._basefor@^3.0.0:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/lodash._basefor/-/lodash._basefor-3.0.3.tgz#7550b4e9218ef09fad24343b612021c79b4c20c2"
+
+lodash._bindcallback@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/lodash._bindcallback/-/lodash._bindcallback-3.0.1.tgz#e531c27644cf8b57a99e17ed95b35c748789392e"
+
+lodash._createassigner@^3.0.0:
+  version "3.1.1"
+  resolved "https://registry.yarnpkg.com/lodash._createassigner/-/lodash._createassigner-3.1.1.tgz#838a5bae2fdaca63ac22dee8e19fa4e6d6970b11"
+  dependencies:
+    lodash._bindcallback "^3.0.0"
+    lodash._isiterateecall "^3.0.0"
+    lodash.restparam "^3.0.0"
+
+lodash._getnative@^3.0.0:
+  version "3.9.1"
+  resolved "https://registry.yarnpkg.com/lodash._getnative/-/lodash._getnative-3.9.1.tgz#570bc7dede46d61cdcde687d65d3eecbaa3aaff5"
+
+lodash._isiterateecall@^3.0.0:
+  version "3.0.9"
+  resolved "https://registry.yarnpkg.com/lodash._isiterateecall/-/lodash._isiterateecall-3.0.9.tgz#5203ad7ba425fae842460e696db9cf3e6aac057c"
+
+lodash.assign@^3.2.0:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/lodash.assign/-/lodash.assign-3.2.0.tgz#3ce9f0234b4b2223e296b8fa0ac1fee8ebca64fa"
+  dependencies:
+    lodash._baseassign "^3.0.0"
+    lodash._createassigner "^3.0.0"
+    lodash.keys "^3.0.0"
+
+lodash.assign@^4.2.0:
+  version "4.2.0"
+  resolved "https://registry.yarnpkg.com/lodash.assign/-/lodash.assign-4.2.0.tgz#0d99f3ccd7a6d261d19bdaeb9245005d285808e7"
+
+lodash.assignin@^4.1.0:
+  version "4.2.0"
+  resolved "https://registry.yarnpkg.com/lodash.assignin/-/lodash.assignin-4.2.0.tgz#ba8df5fb841eb0a3e8044232b0e263a8dc6a28a2"
+
+lodash.clonedeep@^4.3.2, lodash.clonedeep@^4.4.1:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz#e23f3f9c4f8fbdde872529c1071857a086e5ccef"
+
+lodash.debounce@^3.0.1, lodash.debounce@^3.1.1:
+  version "3.1.1"
+  resolved "https://registry.yarnpkg.com/lodash.debounce/-/lodash.debounce-3.1.1.tgz#812211c378a94cc29d5aa4e3346cf0bfce3a7df5"
+  dependencies:
+    lodash._getnative "^3.0.0"
+
+lodash.find@^4.5.1:
+  version "4.6.0"
+  resolved "https://registry.yarnpkg.com/lodash.find/-/lodash.find-4.6.0.tgz#cb0704d47ab71789ffa0de8b97dd926fb88b13b1"
+
+lodash.flatten@^3.0.2:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/lodash.flatten/-/lodash.flatten-3.0.2.tgz#de1cf57758f8f4479319d35c3e9cc60c4501938c"
+  dependencies:
+    lodash._baseflatten "^3.0.0"
+    lodash._isiterateecall "^3.0.0"
+
+lodash.isarguments@^3.0.0:
+  version "3.1.0"
+  resolved "https://registry.yarnpkg.com/lodash.isarguments/-/lodash.isarguments-3.1.0.tgz#2f573d85c6a24289ff00663b491c1d338ff3458a"
+
+lodash.isarray@^3.0.0:
+  version "3.0.4"
+  resolved "https://registry.yarnpkg.com/lodash.isarray/-/lodash.isarray-3.0.4.tgz#79e4eb88c36a8122af86f844aa9bcd851b5fbb55"
+
+lodash.isplainobject@^3.0.0:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/lodash.isplainobject/-/lodash.isplainobject-3.2.0.tgz#9a8238ae16b200432960cd7346512d0123fbf4c5"
+  dependencies:
+    lodash._basefor "^3.0.0"
+    lodash.isarguments "^3.0.0"
+    lodash.keysin "^3.0.0"
+
+lodash.istypedarray@^3.0.0:
+  version "3.0.6"
+  resolved "https://registry.yarnpkg.com/lodash.istypedarray/-/lodash.istypedarray-3.0.6.tgz#c9a477498607501d8e8494d283b87c39281cef62"
+
+lodash.keys@^3.0.0:
+  version "3.1.2"
+  resolved "https://registry.yarnpkg.com/lodash.keys/-/lodash.keys-3.1.2.tgz#4dbc0472b156be50a0b286855d1bd0b0c656098a"
+  dependencies:
+    lodash._getnative "^3.0.0"
+    lodash.isarguments "^3.0.0"
+    lodash.isarray "^3.0.0"
+
+lodash.keysin@^3.0.0:
+  version "3.0.8"
+  resolved "https://registry.yarnpkg.com/lodash.keysin/-/lodash.keysin-3.0.8.tgz#22c4493ebbedb1427962a54b445b2c8a767fb47f"
+  dependencies:
+    lodash.isarguments "^3.0.0"
+    lodash.isarray "^3.0.0"
+
+lodash.merge@^3.0.2, lodash.merge@^3.3.2:
+  version "3.3.2"
+  resolved "https://registry.yarnpkg.com/lodash.merge/-/lodash.merge-3.3.2.tgz#0d90d93ed637b1878437bb3e21601260d7afe994"
+  dependencies:
+    lodash._arraycopy "^3.0.0"
+    lodash._arrayeach "^3.0.0"
+    lodash._createassigner "^3.0.0"
+    lodash._getnative "^3.0.0"
+    lodash.isarguments "^3.0.0"
+    lodash.isarray "^3.0.0"
+    lodash.isplainobject "^3.0.0"
+    lodash.istypedarray "^3.0.0"
+    lodash.keys "^3.0.0"
+    lodash.keysin "^3.0.0"
+    lodash.toplainobject "^3.0.0"
+
+lodash.merge@^4.3.0, lodash.merge@^4.5.1:
+  version "4.6.0"
+  resolved "https://registry.yarnpkg.com/lodash.merge/-/lodash.merge-4.6.0.tgz#69884ba144ac33fe699737a6086deffadd0f89c5"
+
+lodash.omit@^4.1.0:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/lodash.omit/-/lodash.omit-4.5.0.tgz#6eb19ae5a1ee1dd9df0b969e66ce0b7fa30b5e60"
+
+lodash.pad@^4.1.0:
+  version "4.5.1"
+  resolved "https://registry.yarnpkg.com/lodash.pad/-/lodash.pad-4.5.1.tgz#4330949a833a7c8da22cc20f6a26c4d59debba70"
+
+lodash.padend@^4.1.0:
+  version "4.6.1"
+  resolved "https://registry.yarnpkg.com/lodash.padend/-/lodash.padend-4.6.1.tgz#53ccba047d06e158d311f45da625f4e49e6f166e"
+
+lodash.padstart@^4.1.0:
+  version "4.6.1"
+  resolved "https://registry.yarnpkg.com/lodash.padstart/-/lodash.padstart-4.6.1.tgz#d2e3eebff0d9d39ad50f5cbd1b52a7bce6bb611b"
+
+lodash.restparam@^3.0.0:
+  version "3.6.1"
+  resolved "https://registry.yarnpkg.com/lodash.restparam/-/lodash.restparam-3.6.1.tgz#936a4e309ef330a7645ed4145986c85ae5b20805"
+
+lodash.toplainobject@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/lodash.toplainobject/-/lodash.toplainobject-3.0.0.tgz#28790ad942d293d78aa663a07ecf7f52ca04198d"
+  dependencies:
+    lodash._basecopy "^3.0.0"
+    lodash.keysin "^3.0.0"
+
+lodash.uniq@^4.2.0:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773"
+
+lodash.uniqby@^4.7.0:
+  version "4.7.0"
+  resolved "https://registry.yarnpkg.com/lodash.uniqby/-/lodash.uniqby-4.7.0.tgz#d99c07a669e9e6d24e1362dfe266c67616af1302"
+
+lodash@3.7.x:
+  version "3.7.0"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-3.7.0.tgz#3678bd8ab995057c07ade836ed2ef087da811d45"
+
+lodash@^3.10.0, lodash@^3.3.1, lodash@^3.9.3:
+  version "3.10.1"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-3.10.1.tgz#5bf45e8e49ba4189e17d482789dfd15bd140b7b6"
+
+lodash@^4.0.0, lodash@^4.14.0, lodash@^4.2.0, lodash@^4.5.1, lodash@^4.6.1:
+  version "4.17.4"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.4.tgz#78203a4d1c328ae1d86dca6460e369b57f4055ae"
+
+lodash@~2.4.1:
+  version "2.4.2"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-2.4.2.tgz#fadd834b9683073da179b3eae6d9c0d15053f73e"
+
+lodash@~4.16.4:
+  version "4.16.6"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.16.6.tgz#d22c9ac660288f3843e16ba7d2b5d06cca27d777"
+
+lolex@1.3.2:
+  version "1.3.2"
+  resolved "https://registry.yarnpkg.com/lolex/-/lolex-1.3.2.tgz#7c3da62ffcb30f0f5a80a2566ca24e45d8a01f31"
+
+longest@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/longest/-/longest-1.0.1.tgz#30a0b2da38f73770e8294a0d22e6625ed77d0097"
+
+loose-envify@^1.0.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.3.1.tgz#d1a8ad33fa9ce0e713d65fdd0ac8b748d478c848"
+  dependencies:
+    js-tokens "^3.0.0"
+
+loud-rejection@^1.0.0:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/loud-rejection/-/loud-rejection-1.6.0.tgz#5b46f80147edee578870f086d04821cf998e551f"
+  dependencies:
+    currently-unhandled "^0.4.1"
+    signal-exit "^3.0.0"
+
+lowercase-keys@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-1.0.0.tgz#4e3366b39e7f5457e35f1324bdf6f88d0bfc7306"
+
+lru-cache@2, lru-cache@^2.3.0, lru-cache@^2.5.0:
+  version "2.7.3"
+  resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-2.7.3.tgz#6d4524e8b955f95d4f5b58851ce21dd72fb4e952"
+
+lru-cache@^4.0.1:
+  version "4.0.2"
+  resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-4.0.2.tgz#1d17679c069cda5d040991a09dbc2c0db377e55e"
+  dependencies:
+    pseudomap "^1.0.1"
+    yallist "^2.0.0"
+
+lru-cache@~3.2.0:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-3.2.0.tgz#71789b3b7f5399bec8565dda38aa30d2a097efee"
+  dependencies:
+    pseudomap "^1.0.1"
+
+lru-queue@0.1:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/lru-queue/-/lru-queue-0.1.0.tgz#2738bd9f0d3cf4f84490c5736c48699ac632cda3"
+  dependencies:
+    es5-ext "~0.10.2"
+
+make-array@^0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/make-array/-/make-array-0.1.2.tgz#335e36ebb0c5a43154d21213a1ecaeae2a1bb3ef"
+
+makeerror@1.0.x:
+  version "1.0.11"
+  resolved "https://registry.yarnpkg.com/makeerror/-/makeerror-1.0.11.tgz#e01a5c9109f2af79660e4e8b9587790184f5a96c"
+  dependencies:
+    tmpl "1.0.x"
+
+map-obj@^1.0.0, map-obj@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/map-obj/-/map-obj-1.0.1.tgz#d933ceb9205d82bdcf4886f6742bdc2b4dea146d"
+
+markdown-it-terminal@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/markdown-it-terminal/-/markdown-it-terminal-0.0.3.tgz#c77a8533c2170b46d2a907a3c3452d4d7f4aa5db"
+  dependencies:
+    ansi-styles "^2.1.0"
+    cardinal "^0.5.0"
+    cli-table "^0.3.1"
+    lodash.merge "^3.3.2"
+    markdown-it "^4.4.0"
+
+markdown-it@4.3.0:
+  version "4.3.0"
+  resolved "https://registry.yarnpkg.com/markdown-it/-/markdown-it-4.3.0.tgz#0ee2b0724079d186b3f04b7345ce395ae47cc474"
+  dependencies:
+    argparse "~1.0.2"
+    entities "~1.1.1"
+    linkify-it "~1.2.0"
+    mdurl "~1.0.0"
+    uc.micro "^1.0.0"
+
+markdown-it@^4.4.0:
+  version "4.4.0"
+  resolved "https://registry.yarnpkg.com/markdown-it/-/markdown-it-4.4.0.tgz#3df373dbea587a9a7fef3e56311b68908f75c414"
+  dependencies:
+    argparse "~1.0.2"
+    entities "~1.1.1"
+    linkify-it "~1.2.0"
+    mdurl "~1.0.0"
+    uc.micro "^1.0.0"
+
+matcher-collection@^1.0.0, matcher-collection@^1.0.1:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/matcher-collection/-/matcher-collection-1.0.4.tgz#2f66ae0869996f29e43d0b62c83dd1d43e581755"
+  dependencies:
+    minimatch "^3.0.2"
+
+md5-hex@^1.0.2:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/md5-hex/-/md5-hex-1.3.0.tgz#d2c4afe983c4370662179b8cad145219135046c4"
+  dependencies:
+    md5-o-matic "^0.1.1"
+
+md5-o-matic@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/md5-o-matic/-/md5-o-matic-0.1.1.tgz#822bccd65e117c514fab176b25945d54100a03c3"
+
+mdurl@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/mdurl/-/mdurl-1.0.1.tgz#fe85b2ec75a59037f2adfec100fd6c601761152e"
+
+media-typer@0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748"
+
+memoizee@~0.3.8:
+  version "0.3.10"
+  resolved "https://registry.yarnpkg.com/memoizee/-/memoizee-0.3.10.tgz#4eca0d8aed39ec9d017f4c5c2f2f6432f42e5c8f"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.11"
+    es6-weak-map "~0.1.4"
+    event-emitter "~0.3.4"
+    lru-queue "0.1"
+    next-tick "~0.2.2"
+    timers-ext "0.1"
+
+memory-streams@^0.1.0:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/memory-streams/-/memory-streams-0.1.2.tgz#273ff777ab60fec599b116355255282cca2c50c2"
+  dependencies:
+    readable-stream "~1.0.2"
+
+meow@^3.7.0:
+  version "3.7.0"
+  resolved "https://registry.yarnpkg.com/meow/-/meow-3.7.0.tgz#72cb668b425228290abbfa856892587308a801fb"
+  dependencies:
+    camelcase-keys "^2.0.0"
+    decamelize "^1.1.2"
+    loud-rejection "^1.0.0"
+    map-obj "^1.0.1"
+    minimist "^1.1.3"
+    normalize-package-data "^2.3.4"
+    object-assign "^4.0.1"
+    read-pkg-up "^1.0.1"
+    redent "^1.0.0"
+    trim-newlines "^1.0.0"
+
+merge-defaults@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/merge-defaults/-/merge-defaults-0.2.1.tgz#dd42248eb96bb6a51521724321c72ff9583dde80"
+  dependencies:
+    lodash "~2.4.1"
+
+merge-descriptors@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61"
+
+merge@^1.1.3, merge@^1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/merge/-/merge-1.2.0.tgz#7531e39d4949c281a66b8c5a6e0265e8b05894da"
+
+methods@~1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee"
+
+micromatch@^2.1.5:
+  version "2.3.11"
+  resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-2.3.11.tgz#86677c97d1720b363431d04d0d15293bd38c1565"
+  dependencies:
+    arr-diff "^2.0.0"
+    array-unique "^0.2.1"
+    braces "^1.8.2"
+    expand-brackets "^0.1.4"
+    extglob "^0.3.1"
+    filename-regex "^2.0.0"
+    is-extglob "^1.0.0"
+    is-glob "^2.0.1"
+    kind-of "^3.0.2"
+    normalize-path "^2.0.1"
+    object.omit "^2.0.0"
+    parse-glob "^3.0.4"
+    regex-cache "^0.4.2"
+
+"mime-db@>= 1.27.0 < 2", mime-db@~1.27.0:
+  version "1.27.0"
+  resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.27.0.tgz#820f572296bbd20ec25ed55e5b5de869e5436eb1"
+
+mime-db@~1.12.0:
+  version "1.12.0"
+  resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.12.0.tgz#3d0c63180f458eb10d325aaa37d7c58ae312e9d7"
+
+mime-types@^2.1.11, mime-types@^2.1.12, mime-types@~2.1.11, mime-types@~2.1.15, mime-types@~2.1.7:
+  version "2.1.15"
+  resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.15.tgz#a4ebf5064094569237b8cf70046776d09fc92aed"
+  dependencies:
+    mime-db "~1.27.0"
+
+mime-types@~2.0.1, mime-types@~2.0.3:
+  version "2.0.14"
+  resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.0.14.tgz#310e159db23e077f8bb22b748dabfa4957140aa6"
+  dependencies:
+    mime-db "~1.12.0"
+
+mime@1.3.4, mime@^1.2.11:
+  version "1.3.4"
+  resolved "https://registry.yarnpkg.com/mime/-/mime-1.3.4.tgz#115f9e3b6b3daf2959983cb38f149a2d40eb5d53"
+
+minimatch@1:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-1.0.0.tgz#e0dd2120b49e1b724ce8d714c520822a9438576d"
+  dependencies:
+    lru-cache "2"
+    sigmund "~1.0.0"
+
+"minimatch@2 || 3", minimatch@^3.0.0, minimatch@^3.0.2, minimatch@~3.0.0, minimatch@~3.0.2:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.3.tgz#2a4e4090b96b2db06a9d7df01055a62a77c9b774"
+  dependencies:
+    brace-expansion "^1.0.0"
+
+minimatch@^2.0.1, minimatch@^2.0.3:
+  version "2.0.10"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-2.0.10.tgz#8d087c39c6b38c001b97fca7ce6d0e1e80afbac7"
+  dependencies:
+    brace-expansion "^1.0.0"
+
+minimist@0.0.8, minimist@~0.0.1:
+  version "0.0.8"
+  resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d"
+
+minimist@^1.1.0, minimist@^1.1.1, minimist@^1.1.3, minimist@^1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.0.tgz#a35008b20f41383eec1fb914f4cd5df79a264284"
+
+mkdirp@0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.0.tgz#1d73076a6df986cd9344e15e71fcc05a4c9abf12"
+  dependencies:
+    minimist "0.0.8"
+
+mkdirp@0.5.x, "mkdirp@>=0.5 0", mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@~0.5.0, mkdirp@~0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903"
+  dependencies:
+    minimist "0.0.8"
+
+mkdirp@^0.3.5:
+  version "0.3.5"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.3.5.tgz#de3e5f8961c88c787ee1368df849ac4413eca8d7"
+
+mkdirp@~0.4.0:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.4.2.tgz#427c8c18ece398b932f6f666f4e1e5b7740e78c8"
+  dependencies:
+    minimist "0.0.8"
+
+mkpath@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/mkpath/-/mkpath-0.1.0.tgz#7554a6f8d871834cc97b5462b122c4c124d6de91"
+
+mktemp@~0.3.4:
+  version "0.3.5"
+  resolved "https://registry.yarnpkg.com/mktemp/-/mktemp-0.3.5.tgz#a1504c706d0d2b198c6a0eb645f7fdaf8181f7de"
+
+moment-timezone@^0.3.0:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/moment-timezone/-/moment-timezone-0.3.1.tgz#3ef47856b02d53b718a10a5ec2023aa299e07bf5"
+  dependencies:
+    moment ">= 2.6.0"
+
+"moment@>= 2.6.0":
+  version "2.18.1"
+  resolved "https://registry.yarnpkg.com/moment/-/moment-2.18.1.tgz#c36193dd3ce1c2eed2adb7c802dbbc77a81b1c0f"
+
+morgan@^1.5.2:
+  version "1.8.1"
+  resolved "https://registry.yarnpkg.com/morgan/-/morgan-1.8.1.tgz#f93023d3887bd27b78dfd6023cea7892ee27a4b1"
+  dependencies:
+    basic-auth "~1.1.0"
+    debug "2.6.1"
+    depd "~1.1.0"
+    on-finished "~2.3.0"
+    on-headers "~1.0.1"
+
+mout@^0.11.0:
+  version "0.11.1"
+  resolved "https://registry.yarnpkg.com/mout/-/mout-0.11.1.tgz#ba3611df5f0e5b1ffbfd01166b8f02d1f5fa2b99"
+
+mout@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/mout/-/mout-1.0.0.tgz#9bdf1d4af57d66d47cb353a6335a3281098e1501"
+
+ms@0.7.1:
+  version "0.7.1"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.1.tgz#9cd13c03adbff25b65effde7ce864ee952017098"
+
+ms@0.7.2:
+  version "0.7.2"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.2.tgz#ae25cf2512b3885a1d95d7f037868d8431124765"
+
+ms@0.7.3:
+  version "0.7.3"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.3.tgz#708155a5e44e33f5fd0fc53e81d0d40a91be1fff"
+
+mustache@^2.2.1:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/mustache/-/mustache-2.3.0.tgz#4028f7778b17708a489930a6e52ac3bca0da41d0"
+
+mute-stream@0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.4.tgz#a9219960a6d5d5d046597aee51252c6655f7177e"
+
+mute-stream@0.0.5, mute-stream@~0.0.4:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.5.tgz#8fbfabb0a98a253d3184331f9e8deb7372fac6c0"
+
+nan@^2.3.2:
+  version "2.6.2"
+  resolved "https://registry.yarnpkg.com/nan/-/nan-2.6.2.tgz#e4ff34e6c95fdfb5aecc08de6596f43605a7db45"
+
+natives@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/natives/-/natives-1.1.0.tgz#e9ff841418a6b2ec7a495e939984f78f163e6e31"
+
+negotiator@0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.1.tgz#2b327184e8992101177b28563fb5e7102acd0ca9"
+
+next-tick@1:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/next-tick/-/next-tick-1.0.0.tgz#ca86d1fe8828169b0120208e3dc8424b9db8342c"
+
+next-tick@~0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/next-tick/-/next-tick-0.2.2.tgz#75da4a927ee5887e39065880065b7336413b310d"
+
+node-gyp@^3.3.1:
+  version "3.6.0"
+  resolved "https://registry.yarnpkg.com/node-gyp/-/node-gyp-3.6.0.tgz#7474f63a3a0501161dda0b6341f022f14c423fa6"
+  dependencies:
+    fstream "^1.0.0"
+    glob "^7.0.3"
+    graceful-fs "^4.1.2"
+    minimatch "^3.0.2"
+    mkdirp "^0.5.0"
+    nopt "2 || 3"
+    npmlog "0 || 1 || 2 || 3 || 4"
+    osenv "0"
+    request "2"
+    rimraf "2"
+    semver "~5.3.0"
+    tar "^2.0.0"
+    which "1"
+
+node-gyp@~3.3.0:
+  version "3.3.1"
+  resolved "https://registry.yarnpkg.com/node-gyp/-/node-gyp-3.3.1.tgz#80f7b6d7c2f9c0495ba42c518a670c99bdf6e4a0"
+  dependencies:
+    fstream "^1.0.0"
+    glob "3 || 4"
+    graceful-fs "^4.1.2"
+    minimatch "1"
+    mkdirp "^0.5.0"
+    nopt "2 || 3"
+    npmlog "0 || 1 || 2"
+    osenv "0"
+    path-array "^1.0.0"
+    request "2"
+    rimraf "2"
+    semver "2.x || 3.x || 4 || 5"
+    tar "^2.0.0"
+    which "1"
+
+node-int64@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/node-int64/-/node-int64-0.4.0.tgz#87a9065cdb355d3182d8f94ce11188b825c68a3b"
+
+node-modules-path@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/node-modules-path/-/node-modules-path-1.0.1.tgz#40096b08ce7ad0ea14680863af449c7c75a5d1c8"
+
+node-notifier@^5.0.1:
+  version "5.1.2"
+  resolved "https://registry.yarnpkg.com/node-notifier/-/node-notifier-5.1.2.tgz#2fa9e12605fa10009d44549d6fcd8a63dde0e4ff"
+  dependencies:
+    growly "^1.3.0"
+    semver "^5.3.0"
+    shellwords "^0.1.0"
+    which "^1.2.12"
+
+node-sass@^3.11.1, node-sass@^3.8.0:
+  version "3.13.1"
+  resolved "https://registry.yarnpkg.com/node-sass/-/node-sass-3.13.1.tgz#7240fbbff2396304b4223527ed3020589c004fc2"
+  dependencies:
+    async-foreach "^0.1.3"
+    chalk "^1.1.1"
+    cross-spawn "^3.0.0"
+    gaze "^1.0.0"
+    get-stdin "^4.0.1"
+    glob "^7.0.3"
+    in-publish "^2.0.0"
+    lodash.assign "^4.2.0"
+    lodash.clonedeep "^4.3.2"
+    meow "^3.7.0"
+    mkdirp "^0.5.1"
+    nan "^2.3.2"
+    node-gyp "^3.3.1"
+    npmlog "^4.0.0"
+    request "^2.61.0"
+    sass-graph "^2.1.1"
+
+node-status-codes@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/node-status-codes/-/node-status-codes-1.0.0.tgz#5ae5541d024645d32a58fcddc9ceecea7ae3ac2f"
+
+node-uuid@^1.4.3, node-uuid@~1.4.0, node-uuid@~1.4.7:
+  version "1.4.8"
+  resolved "https://registry.yarnpkg.com/node-uuid/-/node-uuid-1.4.8.tgz#b040eb0923968afabf8d32fb1f17f1167fdab907"
+
+"nopt@2 || 3", nopt@^3.0.1, nopt@^3.0.3, nopt@~3.0.6:
+  version "3.0.6"
+  resolved "https://registry.yarnpkg.com/nopt/-/nopt-3.0.6.tgz#c6465dbf08abcd4db359317f79ac68a646b28ff9"
+  dependencies:
+    abbrev "1"
+
+nopt@~1.0.10:
+  version "1.0.10"
+  resolved "https://registry.yarnpkg.com/nopt/-/nopt-1.0.10.tgz#6ddd21bd2a31417b92727dd585f8a6f37608ebee"
+  dependencies:
+    abbrev "1"
+
+normalize-git-url@~3.0.1:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/normalize-git-url/-/normalize-git-url-3.0.2.tgz#8e5f14be0bdaedb73e07200310aa416c27350fc4"
+
+normalize-package-data@^2.0.0, normalize-package-data@^2.3.2, normalize-package-data@^2.3.4, "normalize-package-data@~1.0.1 || ^2.0.0", normalize-package-data@~2.3.5:
+  version "2.3.8"
+  resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-2.3.8.tgz#d819eda2a9dedbd1ffa563ea4071d936782295bb"
+  dependencies:
+    hosted-git-info "^2.1.4"
+    is-builtin-module "^1.0.0"
+    semver "2 || 3 || 4 || 5"
+    validate-npm-package-license "^3.0.1"
+
+normalize-path@^2.0.1:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-2.1.1.tgz#1ab28b556e198363a8c1a6f7e6fa20137fe6aed9"
+  dependencies:
+    remove-trailing-separator "^1.0.1"
+
+npm-cache-filename@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/npm-cache-filename/-/npm-cache-filename-1.0.2.tgz#ded306c5b0bfc870a9e9faf823bc5f283e05ae11"
+
+npm-git-info@^1.0.0:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/npm-git-info/-/npm-git-info-1.0.3.tgz#a933c42ec321e80d3646e0d6e844afe94630e1d5"
+
+npm-install-checks@~1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/npm-install-checks/-/npm-install-checks-1.0.7.tgz#6d91aeda0ac96801f1ed7aadee116a6c0a086a57"
+  dependencies:
+    npmlog "0.1 || 1 || 2"
+    semver "^2.3.0 || 3.x || 4 || 5"
+
+"npm-package-arg@^3.0.0 || ^4.0.0", "npm-package-arg@^4.0.0 || ^5.0.0", npm-package-arg@^4.1.1, npm-package-arg@~4.1.0:
+  version "4.1.1"
+  resolved "https://registry.yarnpkg.com/npm-package-arg/-/npm-package-arg-4.1.1.tgz#86d9dca985b4c5e5d59772dfd5de6919998a495a"
+  dependencies:
+    hosted-git-info "^2.1.4"
+    semver "4 || 5"
+
+npm-registry-client@~7.0.9:
+  version "7.0.9"
+  resolved "https://registry.yarnpkg.com/npm-registry-client/-/npm-registry-client-7.0.9.tgz#1baf86ee5285c4e6d38d4556208ded56049231bb"
+  dependencies:
+    chownr "^1.0.1"
+    concat-stream "^1.4.6"
+    graceful-fs "^4.1.2"
+    mkdirp "^0.5.0"
+    normalize-package-data "~1.0.1 || ^2.0.0"
+    npm-package-arg "^3.0.0 || ^4.0.0"
+    once "^1.3.0"
+    request "^2.47.0"
+    retry "^0.8.0"
+    rimraf "2"
+    semver "2 >=2.2.1 || 3.x || 4 || 5"
+    slide "^1.1.3"
+  optionalDependencies:
+    npmlog "~2.0.0"
+
+npm-user-validate@~0.1.2:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/npm-user-validate/-/npm-user-validate-0.1.5.tgz#52465d50c2d20294a57125b996baedbf56c5004b"
+
+npm@2.14.21:
+  version "2.14.21"
+  resolved "https://registry.yarnpkg.com/npm/-/npm-2.14.21.tgz#4be88073d5eb95864fc84c1df2c743bfdeded70e"
+  dependencies:
+    abbrev "~1.0.7"
+    ansi "~0.3.1"
+    ansicolors "~0.3.2"
+    ansistyles "~0.1.3"
+    archy "~1.0.0"
+    async-some "~1.0.2"
+    block-stream "0.0.8"
+    char-spinner "~1.0.1"
+    chmodr "~1.0.2"
+    chownr "~1.0.1"
+    cmd-shim "~2.0.2"
+    columnify "~1.5.4"
+    config-chain "~1.1.10"
+    dezalgo "~1.0.3"
+    editor "~1.0.0"
+    fs-vacuum "~1.2.7"
+    fs-write-stream-atomic "~1.0.8"
+    fstream "~1.0.8"
+    fstream-npm "~1.0.7"
+    github-url-from-git "~1.4.0"
+    github-url-from-username-repo "~1.0.2"
+    glob "~5.0.15"
+    graceful-fs "~4.1.3"
+    hosted-git-info "~2.1.4"
+    inflight "~1.0.4"
+    inherits "~2.0.1"
+    ini "~1.3.4"
+    init-package-json "~1.9.3"
+    lockfile "~1.0.1"
+    lru-cache "~3.2.0"
+    minimatch "~3.0.0"
+    mkdirp "~0.5.1"
+    node-gyp "~3.3.0"
+    nopt "~3.0.6"
+    normalize-git-url "~3.0.1"
+    normalize-package-data "~2.3.5"
+    npm-cache-filename "~1.0.2"
+    npm-install-checks "~1.0.7"
+    npm-package-arg "~4.1.0"
+    npm-registry-client "~7.0.9"
+    npm-user-validate "~0.1.2"
+    npmlog "~2.0.2"
+    once "~1.3.3"
+    opener "~1.4.1"
+    osenv "~0.1.3"
+    path-is-inside "~1.0.0"
+    read "~1.0.7"
+    read-installed "~4.0.3"
+    read-package-json "~2.0.3"
+    readable-stream "~1.1.13"
+    realize-package-specifier "~3.0.1"
+    request "~2.69.0"
+    retry "~0.9.0"
+    rimraf "~2.5.2"
+    semver "~5.1.0"
+    sha "~2.0.1"
+    slide "~1.1.6"
+    sorted-object "~1.0.0"
+    spdx-license-ids "~1.2.0"
+    tar "~2.2.1"
+    text-table "~0.2.0"
+    uid-number "0.0.6"
+    umask "~1.1.0"
+    validate-npm-package-license "~3.0.1"
+    validate-npm-package-name "~2.2.2"
+    which "~1.2.4"
+    wrappy "~1.0.1"
+    write-file-atomic "~1.1.4"
+
+"npmlog@0 || 1 || 2", "npmlog@0.1 || 1 || 2", npmlog@~2.0.0, npmlog@~2.0.2:
+  version "2.0.4"
+  resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-2.0.4.tgz#98b52530f2514ca90d09ec5b22c8846722375692"
+  dependencies:
+    ansi "~0.3.1"
+    are-we-there-yet "~1.1.2"
+    gauge "~1.2.5"
+
+"npmlog@0 || 1 || 2 || 3 || 4", npmlog@^4.0.0:
+  version "4.0.2"
+  resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-4.0.2.tgz#d03950e0e78ce1527ba26d2a7592e9348ac3e75f"
+  dependencies:
+    are-we-there-yet "~1.1.2"
+    console-control-strings "~1.1.0"
+    gauge "~2.7.1"
+    set-blocking "~2.0.0"
+
+number-is-nan@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/number-is-nan/-/number-is-nan-1.0.1.tgz#097b602b53422a522c1afb8790318336941a011d"
+
+oauth-sign@~0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.6.0.tgz#7dbeae44f6ca454e1f168451d630746735813ce3"
+
+oauth-sign@~0.8.0, oauth-sign@~0.8.1:
+  version "0.8.2"
+  resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.8.2.tgz#46a6ab7f0aead8deae9ec0565780b7d4efeb9d43"
+
+object-assign@4.1.0:
+  version "4.1.0"
+  resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.0.tgz#7a3b3d0e98063d43f4c03f2e8ae6cd51a86883a0"
+
+object-assign@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-2.1.1.tgz#43c36e5d569ff8e4816c4efa8be02d26967c18aa"
+
+object-assign@^4.0.1, object-assign@^4.1.0:
+  version "4.1.1"
+  resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863"
+
+object-component@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/object-component/-/object-component-0.0.3.tgz#f0c69aa50efc95b866c186f400a33769cb2f1291"
+
+object.omit@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/object.omit/-/object.omit-2.0.1.tgz#1a9c744829f39dbb858c76ca3579ae2a54ebd1fa"
+  dependencies:
+    for-own "^0.1.4"
+    is-extendable "^0.1.1"
+
+on-finished@~2.3.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.3.0.tgz#20f1336481b083cd75337992a16971aa2d906947"
+  dependencies:
+    ee-first "1.1.1"
+
+on-headers@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/on-headers/-/on-headers-1.0.1.tgz#928f5d0f470d49342651ea6794b0857c100693f7"
+
+once@^1.3.0, once@^1.3.1, once@^1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1"
+  dependencies:
+    wrappy "1"
+
+once@~1.3.3:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/once/-/once-1.3.3.tgz#b2e261557ce4c314ec8304f3fa82663e4297ca20"
+  dependencies:
+    wrappy "1"
+
+onetime@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/onetime/-/onetime-1.1.0.tgz#a1f7838f8314c516f05ecefcbc4ccfe04b4ed789"
+
+opener@~1.4.1:
+  version "1.4.3"
+  resolved "https://registry.yarnpkg.com/opener/-/opener-1.4.3.tgz#5c6da2c5d7e5831e8ffa3964950f8d6674ac90b8"
+
+opn@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/opn/-/opn-1.0.2.tgz#b909643346d00a1abc977a8b96f3ce3c53d5cf5f"
+
+optimist@^0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.6.1.tgz#da3ea74686fa21a19a111c326e90eb15a0196686"
+  dependencies:
+    minimist "~0.0.1"
+    wordwrap "~0.0.2"
+
+optimist@~0.3, optimist@~0.3.5:
+  version "0.3.7"
+  resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.3.7.tgz#c90941ad59e4273328923074d2cf2e7cbc6ec0d9"
+  dependencies:
+    wordwrap "~0.0.2"
+
+options@>=0.0.5:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/options/-/options-0.0.6.tgz#ec22d312806bb53e731773e7cdaefcf1c643128f"
+
+os-homedir@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3"
+
+os-locale@^1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/os-locale/-/os-locale-1.4.0.tgz#20f9f17ae29ed345e8bde583b13d2009803c14d9"
+  dependencies:
+    lcid "^1.0.0"
+
+os-name@^1.0.0:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/os-name/-/os-name-1.0.3.tgz#1b379f64835af7c5a7f498b357cb95215c159edf"
+  dependencies:
+    osx-release "^1.0.0"
+    win-release "^1.0.0"
+
+os-tmpdir@^1.0.0, os-tmpdir@^1.0.1, os-tmpdir@~1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274"
+
+osenv@0, osenv@^0.1.0, osenv@^0.1.3, osenv@~0.1.3:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.1.4.tgz#42fe6d5953df06c8064be6f176c3d05aaaa34644"
+  dependencies:
+    os-homedir "^1.0.0"
+    os-tmpdir "^1.0.0"
+
+osx-release@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/osx-release/-/osx-release-1.1.0.tgz#f217911a28136949af1bf9308b241e2737d3cd6c"
+  dependencies:
+    minimist "^1.1.0"
+
+output-file-sync@^1.1.0:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/output-file-sync/-/output-file-sync-1.1.2.tgz#d0a33eefe61a205facb90092e826598d5245ce76"
+  dependencies:
+    graceful-fs "^4.1.4"
+    mkdirp "^0.5.1"
+    object-assign "^4.1.0"
+
+p-throttler@0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/p-throttler/-/p-throttler-0.1.1.tgz#15246409d225d3eefca85c50de710a83a78cca6a"
+  dependencies:
+    q "~0.9.2"
+
+package-json@^2.0.0:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/package-json/-/package-json-2.4.0.tgz#0d15bd67d1cbbddbb2ca222ff2edb86bcb31a8bb"
+  dependencies:
+    got "^5.0.0"
+    registry-auth-token "^3.0.1"
+    registry-url "^3.0.3"
+    semver "^5.1.0"
+
+parse-glob@^3.0.4:
+  version "3.0.4"
+  resolved "https://registry.yarnpkg.com/parse-glob/-/parse-glob-3.0.4.tgz#b2c376cfb11f35513badd173ef0bb6e3a388391c"
+  dependencies:
+    glob-base "^0.3.0"
+    is-dotfile "^1.0.0"
+    is-extglob "^1.0.0"
+    is-glob "^2.0.0"
+
+parse-json@^2.1.0, parse-json@^2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-2.2.0.tgz#f480f40434ef80741f8469099f8dea18f55a4dc9"
+  dependencies:
+    error-ex "^1.2.0"
+
+parsejson@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/parsejson/-/parsejson-0.0.3.tgz#ab7e3759f209ece99437973f7d0f1f64ae0e64ab"
+  dependencies:
+    better-assert "~1.0.0"
+
+parseqs@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/parseqs/-/parseqs-0.0.5.tgz#d5208a3738e46766e291ba2ea173684921a8b89d"
+  dependencies:
+    better-assert "~1.0.0"
+
+parseuri@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/parseuri/-/parseuri-0.0.5.tgz#80204a50d4dbb779bfdc6ebe2778d90e4bce320a"
+  dependencies:
+    better-assert "~1.0.0"
+
+parseurl@~1.3.0, parseurl@~1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.1.tgz#c8ab8c9223ba34888aa64a297b28853bec18da56"
+
+path-array@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/path-array/-/path-array-1.0.1.tgz#7e2f0f35f07a2015122b868b7eac0eb2c4fec271"
+  dependencies:
+    array-index "^1.0.0"
+
+path-exists@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-1.0.0.tgz#d5a8998eb71ef37a74c34eb0d9eba6e878eea081"
+
+path-exists@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-2.1.0.tgz#0feb6c64f0fc518d9a754dd5efb62c7022761f4b"
+  dependencies:
+    pinkie-promise "^2.0.0"
+
+path-is-absolute@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f"
+
+path-is-inside@^1.0.1, path-is-inside@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/path-is-inside/-/path-is-inside-1.0.2.tgz#365417dede44430d1c11af61027facf074bdfc53"
+
+path-parse@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.5.tgz#3c1adf871ea9cd6c9431b6ea2bd74a0ff055c4c1"
+
+path-posix@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/path-posix/-/path-posix-1.0.0.tgz#06b26113f56beab042545a23bfa88003ccac260f"
+
+path-to-regexp@0.1.7:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c"
+
+path-type@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/path-type/-/path-type-1.1.0.tgz#59c44f7ee491da704da415da5a4070ba4f8fe441"
+  dependencies:
+    graceful-fs "^4.1.2"
+    pify "^2.0.0"
+    pinkie-promise "^2.0.0"
+
+pend@~1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/pend/-/pend-1.2.0.tgz#7a57eb550a6783f9115331fcf4663d5c8e007a50"
+
+performance-now@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-0.2.0.tgz#33ef30c5c77d4ea21c5a53869d91b56d8f2555e5"
+
+phantomjs@^1.9.2:
+  version "1.9.20"
+  resolved "https://registry.yarnpkg.com/phantomjs/-/phantomjs-1.9.20.tgz#4424aca20e14d255c0b0889af6f6b8973da10e0d"
+  dependencies:
+    extract-zip "~1.5.0"
+    fs-extra "~0.26.4"
+    hasha "^2.2.0"
+    kew "~0.7.0"
+    progress "~1.1.8"
+    request "~2.67.0"
+    request-progress "~2.0.1"
+    which "~1.2.2"
+
+pify@^2.0.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c"
+
+pinkie-promise@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa"
+  dependencies:
+    pinkie "^2.0.0"
+
+pinkie@^2.0.0:
+  version "2.0.4"
+  resolved "https://registry.yarnpkg.com/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870"
+
+pleasant-progress@^1.0.2:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/pleasant-progress/-/pleasant-progress-1.1.0.tgz#c99cd730a2e50cffdd3badff845fc4d5282e266b"
+
+portfinder@^1.0.2:
+  version "1.0.13"
+  resolved "https://registry.yarnpkg.com/portfinder/-/portfinder-1.0.13.tgz#bb32ecd87c27104ae6ee44b5a3ccbf0ebb1aede9"
+  dependencies:
+    async "^1.5.2"
+    debug "^2.2.0"
+    mkdirp "0.5.x"
+
+prepend-http@^1.0.1:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-1.0.4.tgz#d4f4562b0ce3696e41ac52d0e002e57a635dc6dc"
+
+preserve@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/preserve/-/preserve-0.2.0.tgz#815ed1f6ebc65926f865b310c0713bcb3315ce4b"
+
+printf@^0.2.3:
+  version "0.2.5"
+  resolved "https://registry.yarnpkg.com/printf/-/printf-0.2.5.tgz#c438ca2ca33e3927671db4ab69c0e52f936a4f0f"
+
+private@^0.1.6, private@~0.1.5:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/private/-/private-0.1.7.tgz#68ce5e8a1ef0a23bb570cc28537b5332aba63ef1"
+
+process-nextick-args@~1.0.6:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-1.0.7.tgz#150e20b756590ad3f91093f25a4f2ad8bff30ba3"
+
+process-relative-require@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/process-relative-require/-/process-relative-require-1.0.0.tgz#1590dfcf5b8f2983ba53e398446b68240b4cc68a"
+  dependencies:
+    node-modules-path "^1.0.0"
+
+progress@~1.1.8:
+  version "1.1.8"
+  resolved "https://registry.yarnpkg.com/progress/-/progress-1.1.8.tgz#e260c78f6161cdd9b0e56cc3e0a85de17c7a57be"
+
+promise-map-series@^0.2.1:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/promise-map-series/-/promise-map-series-0.2.3.tgz#c2d377afc93253f6bd03dbb77755eb88ab20a847"
+  dependencies:
+    rsvp "^3.0.14"
+
+promptly@0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/promptly/-/promptly-0.2.0.tgz#73ef200fa8329d5d3a8df41798950b8646ca46d9"
+  dependencies:
+    read "~1.0.4"
+
+promzard@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/promzard/-/promzard-0.3.0.tgz#26a5d6ee8c7dee4cb12208305acfb93ba382a9ee"
+  dependencies:
+    read "1"
+
+proto-list@~1.2.1:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/proto-list/-/proto-list-1.2.4.tgz#212d5bfe1318306a420f6402b8e26ff39647a849"
+
+proxy-addr@~1.1.3:
+  version "1.1.4"
+  resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-1.1.4.tgz#27e545f6960a44a627d9b44467e35c1b6b4ce2f3"
+  dependencies:
+    forwarded "~0.1.0"
+    ipaddr.js "1.3.0"
+
+pseudomap@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/pseudomap/-/pseudomap-1.0.2.tgz#f052a28da70e618917ef0a8ac34c1ae5a68286b3"
+
+pump@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/pump/-/pump-1.0.2.tgz#3b3ee6512f94f0e575538c17995f9f16990a5d51"
+  dependencies:
+    end-of-stream "^1.1.0"
+    once "^1.3.1"
+
+punycode@^1.4.1:
+  version "1.4.1"
+  resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e"
+
+q@^1.1.2:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/q/-/q-1.5.0.tgz#dd01bac9d06d30e6f219aecb8253ee9ebdc308f1"
+
+q@~0.9.2:
+  version "0.9.7"
+  resolved "https://registry.yarnpkg.com/q/-/q-0.9.7.tgz#4de2e6cb3b29088c9e4cbc03bf9d42fb96ce2f75"
+
+qs@5.2.0:
+  version "5.2.0"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-5.2.0.tgz#a9f31142af468cb72b25b30136ba2456834916be"
+
+qs@6.4.0, qs@~6.4.0:
+  version "6.4.0"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-6.4.0.tgz#13e26d28ad6b0ffaa91312cd3bf708ed351e7233"
+
+qs@~2.3.1:
+  version "2.3.3"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-2.3.3.tgz#e9e85adbe75da0bbe4c8e0476a086290f863b404"
+
+qs@~5.1.0:
+  version "5.1.0"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-5.1.0.tgz#4d932e5c7ea411cca76a312d39a606200fd50cd9"
+
+qs@~5.2.0:
+  version "5.2.1"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-5.2.1.tgz#801fee030e0b9450d6385adc48a4cc55b44aedfc"
+
+qs@~6.0.2:
+  version "6.0.4"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-6.0.4.tgz#51019d84720c939b82737e84556a782338ecea7b"
+
+quick-temp@0.1.5, quick-temp@^0.1.0, quick-temp@^0.1.2, quick-temp@^0.1.3, quick-temp@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/quick-temp/-/quick-temp-0.1.5.tgz#0d0d67f0fb6a589a0e142f90985f76cdbaf403f7"
+  dependencies:
+    mktemp "~0.3.4"
+    rimraf "~2.2.6"
+    underscore.string "~2.3.3"
+
+qunitjs@^1.20.0:
+  version "1.23.1"
+  resolved "https://registry.yarnpkg.com/qunitjs/-/qunitjs-1.23.1.tgz#1971cf97ac9be01a64d2315508d2e48e6fd4e719"
+
+randomatic@^1.1.3:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/randomatic/-/randomatic-1.1.6.tgz#110dcabff397e9dcff7c0789ccc0a49adf1ec5bb"
+  dependencies:
+    is-number "^2.0.2"
+    kind-of "^3.0.2"
+
+range-parser@~1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.0.tgz#f49be6b487894ddc40dcc94a322f611092e00d5e"
+
+raw-body@~2.1.5:
+  version "2.1.7"
+  resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.1.7.tgz#adfeace2e4fb3098058014d08c072dcc59758774"
+  dependencies:
+    bytes "2.4.0"
+    iconv-lite "0.4.13"
+    unpipe "1.0.0"
+
+rc@^1.0.1, rc@^1.1.6:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/rc/-/rc-1.2.1.tgz#2e03e8e42ee450b8cb3dce65be1bf8974e1dfd95"
+  dependencies:
+    deep-extend "~0.4.0"
+    ini "~1.3.0"
+    minimist "^1.2.0"
+    strip-json-comments "~2.0.1"
+
+read-all-stream@^3.0.0:
+  version "3.1.0"
+  resolved "https://registry.yarnpkg.com/read-all-stream/-/read-all-stream-3.1.0.tgz#35c3e177f2078ef789ee4bfafa4373074eaef4fa"
+  dependencies:
+    pinkie-promise "^2.0.0"
+    readable-stream "^2.0.0"
+
+read-installed@~4.0.3:
+  version "4.0.3"
+  resolved "https://registry.yarnpkg.com/read-installed/-/read-installed-4.0.3.tgz#ff9b8b67f187d1e4c29b9feb31f6b223acd19067"
+  dependencies:
+    debuglog "^1.0.1"
+    read-package-json "^2.0.0"
+    readdir-scoped-modules "^1.0.0"
+    semver "2 || 3 || 4 || 5"
+    slide "~1.1.3"
+    util-extend "^1.0.1"
+  optionalDependencies:
+    graceful-fs "^4.1.2"
+
+"read-package-json@1 || 2", read-package-json@^2.0.0, read-package-json@~2.0.3:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/read-package-json/-/read-package-json-2.0.5.tgz#f93a64e641529df68a08c64de46389e8a3f88845"
+  dependencies:
+    glob "^7.1.1"
+    json-parse-helpfulerror "^1.0.2"
+    normalize-package-data "^2.0.0"
+  optionalDependencies:
+    graceful-fs "^4.1.2"
+
+read-pkg-up@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-1.0.1.tgz#9d63c13276c065918d57f002a57f40a1b643fb02"
+  dependencies:
+    find-up "^1.0.0"
+    read-pkg "^1.0.0"
+
+read-pkg@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-1.1.0.tgz#f5ffaa5ecd29cb31c0474bca7d756b6bb29e3f28"
+  dependencies:
+    load-json-file "^1.0.0"
+    normalize-package-data "^2.3.2"
+    path-type "^1.0.0"
+
+read@1, read@~1.0.1, read@~1.0.4, read@~1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/read/-/read-1.0.7.tgz#b3da19bd052431a97671d44a42634adf710b40c4"
+  dependencies:
+    mute-stream "~0.0.4"
+
+"readable-stream@1 || 2", readable-stream@^2, readable-stream@^2.0.0, readable-stream@^2.0.2, readable-stream@^2.0.5, readable-stream@^2.0.6, readable-stream@~2.0.0, readable-stream@~2.0.5:
+  version "2.0.6"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.0.6.tgz#8f90341e68a53ccc928788dacfcd11b36eb9b78e"
+  dependencies:
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "~1.0.0"
+    process-nextick-args "~1.0.6"
+    string_decoder "~0.10.x"
+    util-deprecate "~1.0.1"
+
+readable-stream@1.1, readable-stream@^1.1.8, readable-stream@~1.1.13:
+  version "1.1.14"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.1.14.tgz#7cf4c54ef648e3813084c636dd2079e166c081d9"
+  dependencies:
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "0.0.1"
+    string_decoder "~0.10.x"
+
+readable-stream@~1.0.2, readable-stream@~1.0.26:
+  version "1.0.34"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.0.34.tgz#125820e34bc842d2f2aaafafe4c2916ee32c157c"
+  dependencies:
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "0.0.1"
+    string_decoder "~0.10.x"
+
+readdir-scoped-modules@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/readdir-scoped-modules/-/readdir-scoped-modules-1.0.2.tgz#9fafa37d286be5d92cbaebdee030dc9b5f406747"
+  dependencies:
+    debuglog "^1.0.1"
+    dezalgo "^1.0.0"
+    graceful-fs "^4.1.2"
+    once "^1.3.0"
+
+readline2@0.1.1, readline2@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/readline2/-/readline2-0.1.1.tgz#99443ba6e83b830ef3051bfd7dc241a82728d568"
+  dependencies:
+    mute-stream "0.0.4"
+    strip-ansi "^2.0.1"
+
+readline2@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/readline2/-/readline2-1.0.1.tgz#41059608ffc154757b715d9989d199ffbf372e35"
+  dependencies:
+    code-point-at "^1.0.0"
+    is-fullwidth-code-point "^1.0.0"
+    mute-stream "0.0.5"
+
+realize-package-specifier@~3.0.1:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/realize-package-specifier/-/realize-package-specifier-3.0.3.tgz#d0def882952b8de3f67eba5e91199661271f41f4"
+  dependencies:
+    dezalgo "^1.0.1"
+    npm-package-arg "^4.1.1"
+
+recast@0.10.33, recast@^0.10.10:
+  version "0.10.33"
+  resolved "https://registry.yarnpkg.com/recast/-/recast-0.10.33.tgz#942808f7aa016f1fa7142c461d7e5704aaa8d697"
+  dependencies:
+    ast-types "0.8.12"
+    esprima-fb "~15001.1001.0-dev-harmony-fb"
+    private "~0.1.5"
+    source-map "~0.5.0"
+
+recast@^0.11.17, recast@^0.11.3:
+  version "0.11.23"
+  resolved "https://registry.yarnpkg.com/recast/-/recast-0.11.23.tgz#451fd3004ab1e4df9b4e4b66376b2a21912462d3"
+  dependencies:
+    ast-types "0.9.6"
+    esprima "~3.1.0"
+    private "~0.1.5"
+    source-map "~0.5.0"
+
+redent@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/redent/-/redent-1.0.0.tgz#cf916ab1fd5f1f16dfb20822dd6ec7f730c2afde"
+  dependencies:
+    indent-string "^2.1.0"
+    strip-indent "^1.0.1"
+
+redeyed@~0.4.0:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/redeyed/-/redeyed-0.4.4.tgz#37e990a6f2b21b2a11c2e6a48fd4135698cba97f"
+  dependencies:
+    esprima "~1.0.4"
+
+redeyed@~0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/redeyed/-/redeyed-0.5.0.tgz#7ab000e60ee3875ac115d29edb32c1403c6c25d1"
+  dependencies:
+    esprima-fb "~12001.1.0-dev-harmony-fb"
+
+regenerate@^1.2.1:
+  version "1.3.2"
+  resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.3.2.tgz#d1941c67bad437e1be76433add5b385f95b19260"
+
+regenerator-runtime@^0.10.0:
+  version "0.10.5"
+  resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.10.5.tgz#336c3efc1220adcedda2c9fab67b5a7955a33658"
+
+regenerator-transform@0.9.11:
+  version "0.9.11"
+  resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.9.11.tgz#3a7d067520cb7b7176769eb5ff868691befe1283"
+  dependencies:
+    babel-runtime "^6.18.0"
+    babel-types "^6.19.0"
+    private "^0.1.6"
+
+regenerator@0.8.40:
+  version "0.8.40"
+  resolved "https://registry.yarnpkg.com/regenerator/-/regenerator-0.8.40.tgz#a0e457c58ebdbae575c9f8cd75127e93756435d8"
+  dependencies:
+    commoner "~0.10.3"
+    defs "~1.1.0"
+    esprima-fb "~15001.1001.0-dev-harmony-fb"
+    private "~0.1.5"
+    recast "0.10.33"
+    through "~2.3.8"
+
+regex-cache@^0.4.2:
+  version "0.4.3"
+  resolved "https://registry.yarnpkg.com/regex-cache/-/regex-cache-0.4.3.tgz#9b1a6c35d4d0dfcef5711ae651e8e9d3d7114145"
+  dependencies:
+    is-equal-shallow "^0.1.3"
+    is-primitive "^2.0.0"
+
+regexpu-core@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-2.0.0.tgz#49d038837b8dcf8bfa5b9a42139938e6ea2ae240"
+  dependencies:
+    regenerate "^1.2.1"
+    regjsgen "^0.2.0"
+    regjsparser "^0.1.4"
+
+regexpu@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/regexpu/-/regexpu-1.3.0.tgz#e534dc991a9e5846050c98de6d7dd4a55c9ea16d"
+  dependencies:
+    esprima "^2.6.0"
+    recast "^0.10.10"
+    regenerate "^1.2.1"
+    regjsgen "^0.2.0"
+    regjsparser "^0.1.4"
+
+registry-auth-token@^3.0.1:
+  version "3.3.0"
+  resolved "https://registry.yarnpkg.com/registry-auth-token/-/registry-auth-token-3.3.0.tgz#57ae67347e73d96345ed1bc01294c7237c02aa63"
+  dependencies:
+    rc "^1.1.6"
+    safe-buffer "^5.0.1"
+
+registry-url@^3.0.3:
+  version "3.1.0"
+  resolved "https://registry.yarnpkg.com/registry-url/-/registry-url-3.1.0.tgz#3d4ef870f73dde1d77f0cf9a381432444e174942"
+  dependencies:
+    rc "^1.0.1"
+
+regjsgen@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.2.0.tgz#6c016adeac554f75823fe37ac05b92d5a4edb1f7"
+
+regjsparser@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.1.5.tgz#7ee8f84dc6fa792d3fd0ae228d24bd949ead205c"
+  dependencies:
+    jsesc "~0.5.0"
+
+remove-trailing-separator@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/remove-trailing-separator/-/remove-trailing-separator-1.0.1.tgz#615ebb96af559552d4bf4057c8436d486ab63cc4"
+
+repeat-element@^1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/repeat-element/-/repeat-element-1.1.2.tgz#ef089a178d1483baae4d93eb98b4f9e4e11d990a"
+
+repeat-string@^1.5.2:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637"
+
+repeating@^1.1.0, repeating@^1.1.2:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/repeating/-/repeating-1.1.3.tgz#3d4114218877537494f97f77f9785fab810fa4ac"
+  dependencies:
+    is-finite "^1.0.0"
+
+repeating@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/repeating/-/repeating-2.0.1.tgz#5214c53a926d3552707527fbab415dbc08d06dda"
+  dependencies:
+    is-finite "^1.0.0"
+
+request-progress@0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/request-progress/-/request-progress-0.3.1.tgz#0721c105d8a96ac6b2ce8b2c89ae2d5ecfcf6b3a"
+  dependencies:
+    throttleit "~0.0.2"
+
+request-progress@~2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/request-progress/-/request-progress-2.0.1.tgz#5d36bb57961c673aa5b788dbc8141fdf23b44e08"
+  dependencies:
+    throttleit "^1.0.0"
+
+request-replay@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/request-replay/-/request-replay-0.2.0.tgz#9b693a5d118b39f5c596ead5ed91a26444057f60"
+  dependencies:
+    retry "~0.6.0"
+
+request@2, request@^2.27.0, request@^2.40.0, request@^2.47.0, request@^2.51.0, request@^2.61.0:
+  version "2.81.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.81.0.tgz#c6928946a0e06c5f8d6f8a9333469ffda46298a0"
+  dependencies:
+    aws-sign2 "~0.6.0"
+    aws4 "^1.2.1"
+    caseless "~0.12.0"
+    combined-stream "~1.0.5"
+    extend "~3.0.0"
+    forever-agent "~0.6.1"
+    form-data "~2.1.1"
+    har-validator "~4.2.1"
+    hawk "~3.1.3"
+    http-signature "~1.1.0"
+    is-typedarray "~1.0.0"
+    isstream "~0.1.2"
+    json-stringify-safe "~5.0.1"
+    mime-types "~2.1.7"
+    oauth-sign "~0.8.1"
+    performance-now "^0.2.0"
+    qs "~6.4.0"
+    safe-buffer "^5.0.1"
+    stringstream "~0.0.4"
+    tough-cookie "~2.3.0"
+    tunnel-agent "^0.6.0"
+    uuid "^3.0.0"
+
+request@2.53.0:
+  version "2.53.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.53.0.tgz#180a3ae92b7b639802e4f9545dd8fcdeb71d760c"
+  dependencies:
+    aws-sign2 "~0.5.0"
+    bl "~0.9.0"
+    caseless "~0.9.0"
+    combined-stream "~0.0.5"
+    forever-agent "~0.5.0"
+    form-data "~0.2.0"
+    hawk "~2.3.0"
+    http-signature "~0.10.0"
+    isstream "~0.1.1"
+    json-stringify-safe "~5.0.0"
+    mime-types "~2.0.1"
+    node-uuid "~1.4.0"
+    oauth-sign "~0.6.0"
+    qs "~2.3.1"
+    stringstream "~0.0.4"
+    tough-cookie ">=0.12.0"
+    tunnel-agent "~0.4.0"
+
+request@~2.67.0:
+  version "2.67.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.67.0.tgz#8af74780e2bf11ea0ae9aa965c11f11afd272742"
+  dependencies:
+    aws-sign2 "~0.6.0"
+    bl "~1.0.0"
+    caseless "~0.11.0"
+    combined-stream "~1.0.5"
+    extend "~3.0.0"
+    forever-agent "~0.6.1"
+    form-data "~1.0.0-rc3"
+    har-validator "~2.0.2"
+    hawk "~3.1.0"
+    http-signature "~1.1.0"
+    is-typedarray "~1.0.0"
+    isstream "~0.1.2"
+    json-stringify-safe "~5.0.1"
+    mime-types "~2.1.7"
+    node-uuid "~1.4.7"
+    oauth-sign "~0.8.0"
+    qs "~5.2.0"
+    stringstream "~0.0.4"
+    tough-cookie "~2.2.0"
+    tunnel-agent "~0.4.1"
+
+request@~2.69.0:
+  version "2.69.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.69.0.tgz#cf91d2e000752b1217155c005241911991a2346a"
+  dependencies:
+    aws-sign2 "~0.6.0"
+    aws4 "^1.2.1"
+    bl "~1.0.0"
+    caseless "~0.11.0"
+    combined-stream "~1.0.5"
+    extend "~3.0.0"
+    forever-agent "~0.6.1"
+    form-data "~1.0.0-rc3"
+    har-validator "~2.0.6"
+    hawk "~3.1.0"
+    http-signature "~1.1.0"
+    is-typedarray "~1.0.0"
+    isstream "~0.1.2"
+    json-stringify-safe "~5.0.1"
+    mime-types "~2.1.7"
+    node-uuid "~1.4.7"
+    oauth-sign "~0.8.0"
+    qs "~6.0.2"
+    stringstream "~0.0.4"
+    tough-cookie "~2.2.0"
+    tunnel-agent "~0.4.1"
+
+require-directory@^2.1.1:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42"
+
+require-main-filename@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-1.0.1.tgz#97f717b69d48784f5f526a6c5aa8ffdda055a4d1"
+
+requires-port@1.x.x:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/requires-port/-/requires-port-1.0.0.tgz#925d2601d39ac485e091cf0da5c6e694dc3dcaff"
+
+resolve@^1.1.2, resolve@^1.1.6, resolve@^1.1.7:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.3.3.tgz#655907c3469a8680dc2de3a275a8fdd69691f0e5"
+  dependencies:
+    path-parse "^1.0.5"
+
+restore-cursor@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/restore-cursor/-/restore-cursor-1.0.1.tgz#34661f46886327fed2991479152252df92daa541"
+  dependencies:
+    exit-hook "^1.0.0"
+    onetime "^1.0.0"
+
+retry@0.6.1, retry@~0.6.0:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/retry/-/retry-0.6.1.tgz#fdc90eed943fde11b893554b8cc63d0e899ba918"
+
+retry@^0.8.0:
+  version "0.8.0"
+  resolved "https://registry.yarnpkg.com/retry/-/retry-0.8.0.tgz#2367628dc0edb247b1eab649dc53ac8628ac2d5f"
+
+retry@~0.9.0:
+  version "0.9.0"
+  resolved "https://registry.yarnpkg.com/retry/-/retry-0.9.0.tgz#6f697e50a0e4ddc8c8f7fb547a9b60dead43678d"
+
+right-align@^0.1.1:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/right-align/-/right-align-0.1.3.tgz#61339b722fe6a3515689210d24e14c96148613ef"
+  dependencies:
+    align-text "^0.1.1"
+
+rimraf@2, rimraf@^2.2.0, rimraf@^2.2.8, rimraf@^2.3.4, rimraf@^2.4.3, rimraf@^2.4.4, rimraf@^2.5.2, rimraf@^2.5.3, rimraf@^2.6.1:
+  version "2.6.1"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.1.tgz#c2338ec643df7a1b7fe5c54fa86f57428a55f33d"
+  dependencies:
+    glob "^7.0.5"
+
+rimraf@~2.2.6:
+  version "2.2.8"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.2.8.tgz#e439be2aaee327321952730f99a8929e4fc50582"
+
+rimraf@~2.4.3:
+  version "2.4.5"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.4.5.tgz#ee710ce5d93a8fdb856fb5ea8ff0e2d75934b2da"
+  dependencies:
+    glob "^6.0.1"
+
+rimraf@~2.5.2:
+  version "2.5.4"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.5.4.tgz#96800093cbf1a0c86bd95b4625467535c29dfa04"
+  dependencies:
+    glob "^7.0.5"
+
+rsvp@^3.0.14, rsvp@^3.0.16, rsvp@^3.0.17, rsvp@^3.0.18, rsvp@^3.0.21, rsvp@^3.0.6, rsvp@^3.1.0:
+  version "3.5.0"
+  resolved "https://registry.yarnpkg.com/rsvp/-/rsvp-3.5.0.tgz#a62c573a4ae4e1dfd0697ebc6242e79c681eaa34"
+
+rsvp@~3.0.6:
+  version "3.0.21"
+  resolved "https://registry.yarnpkg.com/rsvp/-/rsvp-3.0.21.tgz#49c588fe18ef293bcd0ab9f4e6756e6ac433359f"
+
+rsvp@~3.2.1:
+  version "3.2.1"
+  resolved "https://registry.yarnpkg.com/rsvp/-/rsvp-3.2.1.tgz#07cb4a5df25add9e826ebc67dcc9fd89db27d84a"
+
+run-async@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/run-async/-/run-async-0.1.0.tgz#c8ad4a5e110661e402a7d21b530e009f25f8e389"
+  dependencies:
+    once "^1.3.0"
+
+rx-lite@^3.1.2:
+  version "3.1.2"
+  resolved "https://registry.yarnpkg.com/rx-lite/-/rx-lite-3.1.2.tgz#19ce502ca572665f3b647b10939f97fd1615f102"
+
+safe-buffer@^5.0.1:
+  version "5.0.1"
+  resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.0.1.tgz#d263ca54696cd8a306b5ca6551e92de57918fbe7"
+
+samsam@1.1.2, samsam@~1.1:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/samsam/-/samsam-1.1.2.tgz#bec11fdc83a9fda063401210e40176c3024d1567"
+
+sane@^1.1.1:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/sane/-/sane-1.6.0.tgz#9610c452307a135d29c1fdfe2547034180c46775"
+  dependencies:
+    anymatch "^1.3.0"
+    exec-sh "^0.2.0"
+    fb-watchman "^1.8.0"
+    minimatch "^3.0.2"
+    minimist "^1.1.1"
+    walker "~1.0.5"
+    watch "~0.10.0"
+
+sanitize-filename@^1.5.3:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/sanitize-filename/-/sanitize-filename-1.6.1.tgz#612da1c96473fa02dccda92dcd5b4ab164a6772a"
+  dependencies:
+    truncate-utf8-bytes "^1.0.0"
+
+sass-graph@^2.1.1:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/sass-graph/-/sass-graph-2.2.2.tgz#f4d6c95b546ea2a09d14176d0fc1a07ee2b48354"
+  dependencies:
+    glob "^7.0.0"
+    lodash "^4.0.0"
+    scss-tokenizer "^0.2.1"
+    yargs "^6.6.0"
+
+scss-tokenizer@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/scss-tokenizer/-/scss-tokenizer-0.2.1.tgz#07c0cc577bb7ab4d08fd900185adbf4bc844141d"
+  dependencies:
+    js-base64 "^2.1.8"
+    source-map "^0.4.2"
+
+semver-diff@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/semver-diff/-/semver-diff-2.1.0.tgz#4bbb8437c8d37e4b0cf1a68fd726ec6d645d6d36"
+  dependencies:
+    semver "^5.0.3"
+
+semver-utils@^1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/semver-utils/-/semver-utils-1.1.1.tgz#27d92fec34d27cfa42707d3b40d025ae9855f2df"
+
+"semver@2 >=2.2.1 || 3.x || 4 || 5", "semver@2 || 3 || 4 || 5", "semver@2.x || 3.x || 4 || 5", "semver@4 || 5", "semver@^2.3.0 || 3.x || 4 || 5", semver@^5.0.1, semver@^5.0.3, semver@^5.1.0, semver@^5.3.0, semver@~5.3.0:
+  version "5.3.0"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-5.3.0.tgz#9b2ce5d3de02d17c6012ad326aa6b4d0cf54f94f"
+
+semver@^2.3.0:
+  version "2.3.2"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-2.3.2.tgz#b9848f25d6cf36333073ec9ef8856d42f1233e52"
+
+semver@^4.1.0, semver@^4.3.1:
+  version "4.3.6"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-4.3.6.tgz#300bc6e0e86374f7ba61068b5b1ecd57fc6532da"
+
+semver@~5.1.0:
+  version "5.1.1"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-5.1.1.tgz#a3292a373e6f3e0798da0b20641b9a9c5bc47e19"
+
+send@0.15.1:
+  version "0.15.1"
+  resolved "https://registry.yarnpkg.com/send/-/send-0.15.1.tgz#8a02354c26e6f5cca700065f5f0cdeba90ec7b5f"
+  dependencies:
+    debug "2.6.1"
+    depd "~1.1.0"
+    destroy "~1.0.4"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    etag "~1.8.0"
+    fresh "0.5.0"
+    http-errors "~1.6.1"
+    mime "1.3.4"
+    ms "0.7.2"
+    on-finished "~2.3.0"
+    range-parser "~1.2.0"
+    statuses "~1.3.1"
+
+serve-static@1.12.1:
+  version "1.12.1"
+  resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.12.1.tgz#7443a965e3ced647aceb5639fa06bf4d1bbe0039"
+  dependencies:
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    parseurl "~1.3.1"
+    send "0.15.1"
+
+set-blocking@^2.0.0, set-blocking@~2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7"
+
+setprototypeof@1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.0.3.tgz#66567e37043eeb4f04d91bd658c0cbefb55b8e04"
+
+sha@~2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/sha/-/sha-2.0.1.tgz#6030822fbd2c9823949f8f72ed6411ee5cf25aae"
+  dependencies:
+    graceful-fs "^4.1.2"
+    readable-stream "^2.0.2"
+
+shebang-command@^1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-1.2.0.tgz#44aac65b695b03398968c39f363fee5deafdf1ea"
+  dependencies:
+    shebang-regex "^1.0.0"
+
+shebang-regex@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-1.0.0.tgz#da42f49740c0b42db2ca9728571cb190c98efea3"
+
+shell-quote@^1.4.2:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/shell-quote/-/shell-quote-1.6.1.tgz#f4781949cce402697127430ea3b3c5476f481767"
+  dependencies:
+    array-filter "~0.0.0"
+    array-map "~0.0.0"
+    array-reduce "~0.0.0"
+    jsonify "~0.0.0"
+
+shelljs@0.3.x:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/shelljs/-/shelljs-0.3.0.tgz#3596e6307a781544f591f37da618360f31db57b1"
+
+shellwords@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/shellwords/-/shellwords-0.1.0.tgz#66afd47b6a12932d9071cbfd98a52e785cd0ba14"
+
+sigmund@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/sigmund/-/sigmund-1.0.1.tgz#3ff21f198cad2175f9f3b781853fd94d0d19b590"
+
+signal-exit@^3.0.0:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.2.tgz#b5fdc08f1287ea1178628e415e25132b73646c6d"
+
+silent-error@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/silent-error/-/silent-error-1.0.1.tgz#71b7d503d1c6f94882b51b56be879b113cb4822c"
+  dependencies:
+    debug "^2.2.0"
+
+simple-fmt@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/simple-fmt/-/simple-fmt-0.1.0.tgz#191bf566a59e6530482cb25ab53b4a8dc85c3a6b"
+
+simple-is@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/simple-is/-/simple-is-0.2.0.tgz#2abb75aade39deb5cc815ce10e6191164850baf0"
+
+sinon@^1.17.3:
+  version "1.17.7"
+  resolved "https://registry.yarnpkg.com/sinon/-/sinon-1.17.7.tgz#4542a4f49ba0c45c05eb2e9dd9d203e2b8efe0bf"
+  dependencies:
+    formatio "1.1.1"
+    lolex "1.3.2"
+    samsam "1.1.2"
+    util ">=0.10.3 <1"
+
+slash@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/slash/-/slash-1.0.0.tgz#c41f2f6c39fc16d1cd17ad4b5d896114ae470d55"
+
+slide@^1.1.3, slide@^1.1.5, slide@~1.1.3, slide@~1.1.6:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/slide/-/slide-1.1.6.tgz#56eb027d65b4d2dce6cb2e2d32c4d4afc9e1d707"
+
+sntp@1.x.x:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/sntp/-/sntp-1.0.9.tgz#6541184cc90aeea6c6e7b35e2659082443c66198"
+  dependencies:
+    hoek "2.x.x"
+
+socket.io-adapter@0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/socket.io-adapter/-/socket.io-adapter-0.5.0.tgz#cb6d4bb8bec81e1078b99677f9ced0046066bb8b"
+  dependencies:
+    debug "2.3.3"
+    socket.io-parser "2.3.1"
+
+socket.io-client@1.6.0:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/socket.io-client/-/socket.io-client-1.6.0.tgz#5b668f4f771304dfeed179064708386fa6717853"
+  dependencies:
+    backo2 "1.0.2"
+    component-bind "1.0.0"
+    component-emitter "1.2.1"
+    debug "2.3.3"
+    engine.io-client "1.8.0"
+    has-binary "0.1.7"
+    indexof "0.0.1"
+    object-component "0.0.3"
+    parseuri "0.0.5"
+    socket.io-parser "2.3.1"
+    to-array "0.1.4"
+
+socket.io-parser@2.3.1:
+  version "2.3.1"
+  resolved "https://registry.yarnpkg.com/socket.io-parser/-/socket.io-parser-2.3.1.tgz#dd532025103ce429697326befd64005fcfe5b4a0"
+  dependencies:
+    component-emitter "1.1.2"
+    debug "2.2.0"
+    isarray "0.0.1"
+    json3 "3.3.2"
+
+socket.io@1.6.0:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/socket.io/-/socket.io-1.6.0.tgz#3e40d932637e6bd923981b25caf7c53e83b6e2e1"
+  dependencies:
+    debug "2.3.3"
+    engine.io "1.8.0"
+    has-binary "0.1.7"
+    object-assign "4.1.0"
+    socket.io-adapter "0.5.0"
+    socket.io-client "1.6.0"
+    socket.io-parser "2.3.1"
+
+sorted-object@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/sorted-object/-/sorted-object-1.0.0.tgz#5d1f4f9c1fb2cd48965967304e212eb44cfb6d05"
+
+source-map-support@^0.2.10:
+  version "0.2.10"
+  resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.2.10.tgz#ea5a3900a1c1cb25096a0ae8cc5c2b4b10ded3dc"
+  dependencies:
+    source-map "0.1.32"
+
+source-map-support@^0.4.2:
+  version "0.4.15"
+  resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.4.15.tgz#03202df65c06d2bd8c7ec2362a193056fef8d3b1"
+  dependencies:
+    source-map "^0.5.6"
+
+source-map-url@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/source-map-url/-/source-map-url-0.3.0.tgz#7ecaf13b57bcd09da8a40c5d269db33799d4aaf9"
+
+source-map@0.1.32:
+  version "0.1.32"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.32.tgz#c8b6c167797ba4740a8ea33252162ff08591b266"
+  dependencies:
+    amdefine ">=0.0.4"
+
+source-map@0.4.x, source-map@^0.4.2, source-map@^0.4.4:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.4.4.tgz#eba4f5da9c0dc999de68032d8b4f76173652036b"
+  dependencies:
+    amdefine ">=0.0.4"
+
+source-map@^0.5.0, source-map@^0.5.6, source-map@~0.5.0, source-map@~0.5.1:
+  version "0.5.6"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.6.tgz#75ce38f52bf0733c5a7f0c118d81334a2bb5f412"
+
+source-map@~0.1.7:
+  version "0.1.43"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.43.tgz#c24bc146ca517c1471f5dacbe2571b2b7f9e3346"
+  dependencies:
+    amdefine ">=0.0.4"
+
+spawn-args@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/spawn-args/-/spawn-args-0.2.0.tgz#fb7d0bd1d70fd4316bd9e3dec389e65f9d6361bb"
+
+spawnback@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/spawnback/-/spawnback-1.0.0.tgz#f73662f7e54d95367eca74d6426c677dd7ea686f"
+
+spdx-correct@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-1.0.2.tgz#4b3073d933ff51f3912f03ac5519498a4150db40"
+  dependencies:
+    spdx-license-ids "^1.0.2"
+
+spdx-expression-parse@~1.0.0:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-1.0.4.tgz#9bdf2f20e1f40ed447fbe273266191fced51626c"
+
+spdx-license-ids@^1.0.2, spdx-license-ids@~1.2.0:
+  version "1.2.2"
+  resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-1.2.2.tgz#c9df7a3424594ade6bd11900d596696dc06bac57"
+
+sprintf-js@~1.0.2:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c"
+
+sri-toolbox@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/sri-toolbox/-/sri-toolbox-0.2.0.tgz#a7fea5c3fde55e675cf1c8c06f3ebb5c2935835e"
+
+sshpk@^1.7.0:
+  version "1.13.0"
+  resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.13.0.tgz#ff2a3e4fd04497555fed97b39a0fd82fafb3a33c"
+  dependencies:
+    asn1 "~0.2.3"
+    assert-plus "^1.0.0"
+    dashdash "^1.12.0"
+    getpass "^0.1.1"
+  optionalDependencies:
+    bcrypt-pbkdf "^1.0.0"
+    ecc-jsbn "~0.1.1"
+    jodid25519 "^1.0.0"
+    jsbn "~0.1.0"
+    tweetnacl "~0.14.0"
+
+stable@~0.1.3:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/stable/-/stable-0.1.6.tgz#910f5d2aed7b520c6e777499c1f32e139fdecb10"
+
+statuses@1, "statuses@>= 1.3.1 < 2", statuses@~1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.3.1.tgz#faf51b9eb74aaef3b3acf4ad5f61abf24cb7b93e"
+
+string-width@^1.0.1, string-width@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3"
+  dependencies:
+    code-point-at "^1.0.0"
+    is-fullwidth-code-point "^1.0.0"
+    strip-ansi "^3.0.0"
+
+string_decoder@~0.10.x:
+  version "0.10.31"
+  resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-0.10.31.tgz#62e203bc41766c6c28c9fc84301dab1c5310fa94"
+
+stringify-object@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/stringify-object/-/stringify-object-1.0.1.tgz#86d35e7dbfbce9aa45637d7ecdd7847e159db8a2"
+
+stringmap@~0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/stringmap/-/stringmap-0.2.2.tgz#556c137b258f942b8776f5b2ef582aa069d7d1b1"
+
+stringset@~0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/stringset/-/stringset-0.2.1.tgz#ef259c4e349344377fcd1c913dd2e848c9c042b5"
+
+stringstream@~0.0.4:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/stringstream/-/stringstream-0.0.5.tgz#4e484cd4de5a0bbbee18e46307710a8a81621878"
+
+strip-ansi@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-0.3.0.tgz#25f48ea22ca79187f3174a4db8759347bb126220"
+  dependencies:
+    ansi-regex "^0.2.1"
+
+strip-ansi@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-2.0.1.tgz#df62c1aa94ed2f114e1d0f21fd1d50482b79a60e"
+  dependencies:
+    ansi-regex "^1.0.0"
+
+strip-ansi@^3.0.0, strip-ansi@^3.0.1:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf"
+  dependencies:
+    ansi-regex "^2.0.0"
+
+strip-ansi@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-0.1.1.tgz#39e8a98d044d150660abe4a6808acf70bb7bc991"
+
+strip-bom@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-2.0.0.tgz#6219a85616520491f35788bdbf1447a99c7e6b0e"
+  dependencies:
+    is-utf8 "^0.2.0"
+
+strip-indent@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/strip-indent/-/strip-indent-1.0.1.tgz#0c7962a6adefa7bbd4ac366460a638552ae1a0a2"
+  dependencies:
+    get-stdin "^4.0.1"
+
+strip-json-comments@1.0.x:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-1.0.4.tgz#1e15fbcac97d3ee99bf2d73b4c656b082bbafb91"
+
+strip-json-comments@~2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a"
+
+styled_string@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/styled_string/-/styled_string-0.0.1.tgz#d22782bd81295459bc4f1df18c4bad8e94dd124a"
+
+sum-up@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/sum-up/-/sum-up-1.0.3.tgz#1c661f667057f63bcb7875aa1438bc162525156e"
+  dependencies:
+    chalk "^1.0.0"
+
+supports-color@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-0.2.0.tgz#d92de2694eb3f67323973d7ae3d8b55b4c22190a"
+
+supports-color@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7"
+
+symlink-or-copy@^1.0.0, symlink-or-copy@^1.0.1, symlink-or-copy@^1.1.8:
+  version "1.1.8"
+  resolved "https://registry.yarnpkg.com/symlink-or-copy/-/symlink-or-copy-1.1.8.tgz#cabe61e0010c1c023c173b25ee5108b37f4b4aa3"
+
+tap-parser@^5.1.0:
+  version "5.3.3"
+  resolved "https://registry.yarnpkg.com/tap-parser/-/tap-parser-5.3.3.tgz#53ec8a90f275d6fff43f169e56a679502a741185"
+  dependencies:
+    events-to-array "^1.0.1"
+    js-yaml "^3.2.7"
+  optionalDependencies:
+    readable-stream "^2"
+
+tar-fs@^1.4.1:
+  version "1.15.2"
+  resolved "https://registry.yarnpkg.com/tar-fs/-/tar-fs-1.15.2.tgz#761f5b32932c7b39461a60d537faea0d8084830c"
+  dependencies:
+    chownr "^1.0.1"
+    mkdirp "^0.5.1"
+    pump "^1.0.0"
+    tar-stream "^1.1.2"
+
+tar-stream@^1.1.2:
+  version "1.5.2"
+  resolved "https://registry.yarnpkg.com/tar-stream/-/tar-stream-1.5.2.tgz#fbc6c6e83c1a19d4cb48c7d96171fc248effc7bf"
+  dependencies:
+    bl "^1.0.0"
+    end-of-stream "^1.0.0"
+    readable-stream "^2.0.0"
+    xtend "^4.0.0"
+
+tar@^2.0.0, tar@~2.2.1:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/tar/-/tar-2.2.1.tgz#8e4d2a256c0e2185c6b18ad694aec968b83cb1d1"
+  dependencies:
+    block-stream "*"
+    fstream "^1.0.2"
+    inherits "2"
+
+temp@0.8.3:
+  version "0.8.3"
+  resolved "https://registry.yarnpkg.com/temp/-/temp-0.8.3.tgz#e0c6bc4d26b903124410e4fed81103014dfc1f59"
+  dependencies:
+    os-tmpdir "^1.0.0"
+    rimraf "~2.2.6"
+
+testem@^1.3.0:
+  version "1.16.0"
+  resolved "https://registry.yarnpkg.com/testem/-/testem-1.16.0.tgz#3933040b5d5b5fbdb6a2b1e7032e511b54a05867"
+  dependencies:
+    backbone "^1.1.2"
+    bluebird "^3.4.6"
+    charm "^1.0.0"
+    commander "^2.6.0"
+    consolidate "^0.14.0"
+    cross-spawn "^5.1.0"
+    express "^4.10.7"
+    fireworm "^0.7.0"
+    glob "^7.0.4"
+    http-proxy "^1.13.1"
+    js-yaml "^3.2.5"
+    lodash.assignin "^4.1.0"
+    lodash.clonedeep "^4.4.1"
+    lodash.find "^4.5.1"
+    lodash.uniqby "^4.7.0"
+    mkdirp "^0.5.1"
+    mustache "^2.2.1"
+    node-notifier "^5.0.1"
+    npmlog "^4.0.0"
+    printf "^0.2.3"
+    rimraf "^2.4.4"
+    socket.io "1.6.0"
+    spawn-args "^0.2.0"
+    styled_string "0.0.1"
+    tap-parser "^5.1.0"
+    xmldom "^0.1.19"
+
+text-table@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4"
+
+"textextensions@1 || 2":
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/textextensions/-/textextensions-2.1.0.tgz#1be0dc2a0dc244d44be8a09af6a85afb93c4dbc3"
+
+throttleit@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/throttleit/-/throttleit-1.0.0.tgz#9e785836daf46743145a5984b6268d828528ac6c"
+
+throttleit@~0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/throttleit/-/throttleit-0.0.2.tgz#cfedf88e60c00dd9697b61fdd2a8343a9b680eaf"
+
+through@^2.3.6, through@~2.3.4, through@~2.3.8:
+  version "2.3.8"
+  resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5"
+
+timed-out@^3.0.0:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/timed-out/-/timed-out-3.1.3.tgz#95860bfcc5c76c277f8f8326fd0f5b2e20eba217"
+
+timers-ext@0.1:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/timers-ext/-/timers-ext-0.1.2.tgz#61cc47a76c1abd3195f14527f978d58ae94c5204"
+  dependencies:
+    es5-ext "~0.10.14"
+    next-tick "1"
+
+tiny-lr@0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/tiny-lr/-/tiny-lr-0.2.1.tgz#b3fdba802e5d56a33c2f6f10794b32e477ac729d"
+  dependencies:
+    body-parser "~1.14.0"
+    debug "~2.2.0"
+    faye-websocket "~0.10.0"
+    livereload-js "^2.2.0"
+    parseurl "~1.3.0"
+    qs "~5.1.0"
+
+tmp@0.0.24:
+  version "0.0.24"
+  resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.24.tgz#d6a5e198d14a9835cc6f2d7c3d9e302428c8cf12"
+
+tmp@0.0.28:
+  version "0.0.28"
+  resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.28.tgz#172735b7f614ea7af39664fa84cf0de4e515d120"
+  dependencies:
+    os-tmpdir "~1.0.1"
+
+tmpl@1.0.x:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/tmpl/-/tmpl-1.0.4.tgz#23640dd7b42d00433911140820e5cf440e521dd1"
+
+to-array@0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/to-array/-/to-array-0.1.4.tgz#17e6c11f73dd4f3d74cda7a4ff3238e9ad9bf890"
+
+to-fast-properties@^1.0.0, to-fast-properties@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-1.0.2.tgz#f3f5c0c3ba7299a7ef99427e44633257ade43320"
+
+touch@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/touch/-/touch-0.0.3.tgz#51aef3d449571d4f287a5d87c9c8b49181a0db1d"
+  dependencies:
+    nopt "~1.0.10"
+
+tough-cookie@>=0.12.0, tough-cookie@^2.0.0, tough-cookie@~2.3.0:
+  version "2.3.2"
+  resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.3.2.tgz#f081f76e4c85720e6c37a5faced737150d84072a"
+  dependencies:
+    punycode "^1.4.1"
+
+tough-cookie@~2.2.0:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.2.2.tgz#c83a1830f4e5ef0b93ef2a3488e724f8de016ac7"
+
+"traverse@>=0.3.0 <0.4":
+  version "0.3.9"
+  resolved "https://registry.yarnpkg.com/traverse/-/traverse-0.3.9.tgz#717b8f220cc0bb7b44e40514c22b2e8bbc70d8b9"
+
+tree-sync@^1.1.0:
+  version "1.2.2"
+  resolved "https://registry.yarnpkg.com/tree-sync/-/tree-sync-1.2.2.tgz#2cf76b8589f59ffedb58db5a3ac7cb013d0158b7"
+  dependencies:
+    debug "^2.2.0"
+    fs-tree-diff "^0.5.6"
+    mkdirp "^0.5.1"
+    quick-temp "^0.1.5"
+    walk-sync "^0.2.7"
+
+trim-newlines@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/trim-newlines/-/trim-newlines-1.0.0.tgz#5887966bb582a4503a41eb524f7d35011815a613"
+
+trim-right@^1.0.0, trim-right@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/trim-right/-/trim-right-1.0.1.tgz#cb2e1203067e0c8de1f614094b9fe45704ea6003"
+
+truncate-utf8-bytes@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/truncate-utf8-bytes/-/truncate-utf8-bytes-1.0.2.tgz#405923909592d56f78a5818434b0b78489ca5f2b"
+  dependencies:
+    utf8-byte-length "^1.0.1"
+
+try-resolve@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/try-resolve/-/try-resolve-1.0.1.tgz#cfde6fabd72d63e5797cfaab873abbe8e700e912"
+
+tryor@~0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/tryor/-/tryor-0.1.2.tgz#8145e4ca7caff40acde3ccf946e8b8bb75b4172b"
+
+tunnel-agent@^0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd"
+  dependencies:
+    safe-buffer "^5.0.1"
+
+tunnel-agent@~0.4.0, tunnel-agent@~0.4.1:
+  version "0.4.3"
+  resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.4.3.tgz#6373db76909fe570e08d73583365ed828a74eeeb"
+
+tweetnacl@^0.14.3, tweetnacl@~0.14.0:
+  version "0.14.5"
+  resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64"
+
+type-is@~1.6.10, type-is@~1.6.14:
+  version "1.6.15"
+  resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.15.tgz#cab10fb4909e441c82842eafe1ad646c81804410"
+  dependencies:
+    media-typer "0.3.0"
+    mime-types "~2.1.15"
+
+typedarray@~0.0.5:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777"
+
+uc.micro@^1.0.0, uc.micro@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/uc.micro/-/uc.micro-1.0.3.tgz#7ed50d5e0f9a9fb0a573379259f2a77458d50192"
+
+uglify-js@^2.6, uglify-js@^2.7.0:
+  version "2.8.22"
+  resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-2.8.22.tgz#d54934778a8da14903fa29a326fb24c0ab51a1a0"
+  dependencies:
+    source-map "~0.5.1"
+    yargs "~3.10.0"
+  optionalDependencies:
+    uglify-to-browserify "~1.0.0"
+
+uglify-js@~2.3:
+  version "2.3.6"
+  resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-2.3.6.tgz#fa0984770b428b7a9b2a8058f46355d14fef211a"
+  dependencies:
+    async "~0.2.6"
+    optimist "~0.3.5"
+    source-map "~0.1.7"
+
+uglify-to-browserify@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/uglify-to-browserify/-/uglify-to-browserify-1.0.2.tgz#6e0924d6bda6b5afe349e39a6d632850a0f882b7"
+
+uid-number@0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/uid-number/-/uid-number-0.0.6.tgz#0ea10e8035e8eb5b8e4449f06da1c730663baa81"
+
+ultron@1.0.x:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/ultron/-/ultron-1.0.2.tgz#ace116ab557cd197386a4e88f4685378c8b2e4fa"
+
+umask@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/umask/-/umask-1.1.0.tgz#f29cebf01df517912bb58ff9c4e50fde8e33320d"
+
+underscore.string@~2.3.3:
+  version "2.3.3"
+  resolved "https://registry.yarnpkg.com/underscore.string/-/underscore.string-2.3.3.tgz#71c08bf6b428b1133f37e78fa3a21c82f7329b0d"
+
+underscore@>=1.8.3:
+  version "1.8.3"
+  resolved "https://registry.yarnpkg.com/underscore/-/underscore-1.8.3.tgz#4f3fb53b106e6097fcf9cb4109f2a5e9bdfa5022"
+
+unpipe@1.0.0, unpipe@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec"
+
+untildify@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/untildify/-/untildify-2.1.0.tgz#17eb2807987f76952e9c0485fc311d06a826a2e0"
+  dependencies:
+    os-homedir "^1.0.0"
+
+unzip-response@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/unzip-response/-/unzip-response-1.0.2.tgz#b984f0877fc0a89c2c773cc1ef7b5b232b5b06fe"
+
+update-notifier@^0.6.0:
+  version "0.6.3"
+  resolved "https://registry.yarnpkg.com/update-notifier/-/update-notifier-0.6.3.tgz#776dec8daa13e962a341e8a1d98354306b67ae08"
+  dependencies:
+    boxen "^0.3.1"
+    chalk "^1.0.0"
+    configstore "^2.0.0"
+    is-npm "^1.0.0"
+    latest-version "^2.0.0"
+    semver-diff "^2.0.0"
+
+url-parse-lax@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/url-parse-lax/-/url-parse-lax-1.0.0.tgz#7af8f303645e9bd79a272e7a14ac68bc0609da73"
+  dependencies:
+    prepend-http "^1.0.1"
+
+user-home@^1.0.0, user-home@^1.1.0, user-home@^1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/user-home/-/user-home-1.1.1.tgz#2b5be23a32b63a7c9deb8d0f28d485724a3df190"
+
+utf8-byte-length@^1.0.1:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/utf8-byte-length/-/utf8-byte-length-1.0.4.tgz#f45f150c4c66eee968186505ab93fcbb8ad6bf61"
+
+util-deprecate@~1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf"
+
+util-extend@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/util-extend/-/util-extend-1.0.3.tgz#a7c216d267545169637b3b6edc6ca9119e2ff93f"
+
+"util@>=0.10.3 <1":
+  version "0.10.3"
+  resolved "https://registry.yarnpkg.com/util/-/util-0.10.3.tgz#7afb1afe50805246489e3db7fe0ed379336ac0f9"
+  dependencies:
+    inherits "2.0.1"
+
+utils-merge@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.0.tgz#0294fb922bb9375153541c4f7096231f287c8af8"
+
+uuid@^2.0.1:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/uuid/-/uuid-2.0.3.tgz#67e2e863797215530dff318e5bf9dcebfd47b21a"
+
+uuid@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.0.1.tgz#6544bba2dfda8c1cf17e629a3a305e2bb1fee6c1"
+
+validate-npm-package-license@^3.0.1, validate-npm-package-license@~3.0.1:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.1.tgz#2804babe712ad3379459acfbe24746ab2c303fbc"
+  dependencies:
+    spdx-correct "~1.0.0"
+    spdx-expression-parse "~1.0.0"
+
+validate-npm-package-name@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/validate-npm-package-name/-/validate-npm-package-name-3.0.0.tgz#5fa912d81eb7d0c74afc140de7317f0ca7df437e"
+  dependencies:
+    builtins "^1.0.3"
+
+validate-npm-package-name@~2.2.2:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/validate-npm-package-name/-/validate-npm-package-name-2.2.2.tgz#f65695b22f7324442019a3c7fa39a6e7fd299085"
+  dependencies:
+    builtins "0.0.7"
+
+vary@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.1.tgz#67535ebb694c1d52257457984665323f587e8d37"
+
+verror@1.3.6:
+  version "1.3.6"
+  resolved "https://registry.yarnpkg.com/verror/-/verror-1.3.6.tgz#cff5df12946d297d2baaefaa2689e25be01c005c"
+  dependencies:
+    extsprintf "1.0.2"
+
+walk-sync@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/walk-sync/-/walk-sync-0.1.3.tgz#8a07261a00bda6cfb1be25e9f100fad57546f583"
+
+walk-sync@^0.2.5, walk-sync@^0.2.6, walk-sync@^0.2.7:
+  version "0.2.7"
+  resolved "https://registry.yarnpkg.com/walk-sync/-/walk-sync-0.2.7.tgz#b49be4ee6867657aeb736978b56a29d10fa39969"
+  dependencies:
+    ensure-posix-path "^1.0.0"
+    matcher-collection "^1.0.0"
+
+walk-sync@^0.3.0, walk-sync@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/walk-sync/-/walk-sync-0.3.1.tgz#558a16aeac8c0db59c028b73c66f397684ece465"
+  dependencies:
+    ensure-posix-path "^1.0.0"
+    matcher-collection "^1.0.0"
+
+walker@~1.0.5:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/walker/-/walker-1.0.7.tgz#2f7f9b8fd10d677262b18a884e28d19618e028fb"
+  dependencies:
+    makeerror "1.0.x"
+
+watch@~0.10.0:
+  version "0.10.0"
+  resolved "https://registry.yarnpkg.com/watch/-/watch-0.10.0.tgz#77798b2da0f9910d595f1ace5b0c2258521f21dc"
+
+wcwidth@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/wcwidth/-/wcwidth-1.0.1.tgz#f0b0dcf915bc5ff1528afadb2c0e17b532da2fe8"
+  dependencies:
+    defaults "^1.0.3"
+
+websocket-driver@>=0.5.1:
+  version "0.6.5"
+  resolved "https://registry.yarnpkg.com/websocket-driver/-/websocket-driver-0.6.5.tgz#5cb2556ceb85f4373c6d8238aa691c8454e13a36"
+  dependencies:
+    websocket-extensions ">=0.1.1"
+
+websocket-extensions@>=0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/websocket-extensions/-/websocket-extensions-0.1.1.tgz#76899499c184b6ef754377c2dbb0cd6cb55d29e7"
+
+which-module@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/which-module/-/which-module-1.0.0.tgz#bba63ca861948994ff307736089e3b96026c2a4f"
+
+which@1, which@^1.0.8, which@^1.2.12, which@^1.2.9, which@~1.2.2, which@~1.2.4:
+  version "1.2.14"
+  resolved "https://registry.yarnpkg.com/which/-/which-1.2.14.tgz#9a87c4378f03e827cecaf1acdf56c736c01c14e5"
+  dependencies:
+    isexe "^2.0.0"
+
+wide-align@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.0.tgz#40edde802a71fea1f070da3e62dcda2e7add96ad"
+  dependencies:
+    string-width "^1.0.1"
+
+widest-line@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/widest-line/-/widest-line-1.0.0.tgz#0c09c85c2a94683d0d7eaf8ee097d564bf0e105c"
+  dependencies:
+    string-width "^1.0.1"
+
+win-release@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/win-release/-/win-release-1.1.1.tgz#5fa55e02be7ca934edfc12665632e849b72e5209"
+  dependencies:
+    semver "^5.0.1"
+
+window-size@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.0.tgz#5438cd2ea93b202efa3a19fe8887aee7c94f9c9d"
+
+window-size@^0.1.2:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.4.tgz#f8e1aa1ee5a53ec5bf151ffa09742a6ad7697876"
+
+wordwrap@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.2.tgz#b79669bb42ecb409f83d583cad52ca17eaa1643f"
+
+wordwrap@~0.0.2:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.3.tgz#a3d5da6cd5c0bc0008d37234bbaf1bed63059107"
+
+wrap-ansi@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-2.1.0.tgz#d8fc3d284dd05794fe84973caecdd1cf824fdd85"
+  dependencies:
+    string-width "^1.0.1"
+    strip-ansi "^3.0.1"
+
+wrappy@1, wrappy@~1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f"
+
+write-file-atomic@^1.1.2:
+  version "1.3.4"
+  resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-1.3.4.tgz#f807a4f0b1d9e913ae7a48112e6cc3af1991b45f"
+  dependencies:
+    graceful-fs "^4.1.11"
+    imurmurhash "^0.1.4"
+    slide "^1.1.5"
+
+write-file-atomic@~1.1.4:
+  version "1.1.4"
+  resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-1.1.4.tgz#b1f52dc2e8dc0e3cb04d187a25f758a38a90ca3b"
+  dependencies:
+    graceful-fs "^4.1.2"
+    imurmurhash "^0.1.4"
+    slide "^1.1.5"
+
+ws@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ws/-/ws-1.1.1.tgz#082ddb6c641e85d4bb451f03d52f06eabdb1f018"
+  dependencies:
+    options ">=0.0.5"
+    ultron "1.0.x"
+
+wtf-8@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/wtf-8/-/wtf-8-1.0.0.tgz#392d8ba2d0f1c34d1ee2d630f15d0efb68e1048a"
+
+xdg-basedir@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-1.0.1.tgz#14ff8f63a4fdbcb05d5b6eea22b36f3033b9f04e"
+  dependencies:
+    user-home "^1.0.0"
+
+xdg-basedir@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-2.0.0.tgz#edbc903cc385fc04523d966a335504b5504d1bd2"
+  dependencies:
+    os-homedir "^1.0.0"
+
+xmldom@^0.1.19:
+  version "0.1.27"
+  resolved "https://registry.yarnpkg.com/xmldom/-/xmldom-0.1.27.tgz#d501f97b3bdb403af8ef9ecc20573187aadac0e9"
+
+xmlhttprequest-ssl@1.5.3:
+  version "1.5.3"
+  resolved "https://registry.yarnpkg.com/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.5.3.tgz#185a888c04eca46c3e4070d99f7b49de3528992d"
+
+xtend@^4.0.0:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.1.tgz#a5c6d532be656e23db820efb943a1f04998d63af"
+
+y18n@^3.2.0, y18n@^3.2.1:
+  version "3.2.1"
+  resolved "https://registry.yarnpkg.com/y18n/-/y18n-3.2.1.tgz#6d15fba884c08679c0d77e88e7759e811e07fa41"
+
+yallist@^2.0.0:
+  version "2.1.2"
+  resolved "https://registry.yarnpkg.com/yallist/-/yallist-2.1.2.tgz#1c11f9218f076089a47dd512f93c6699a6a81d52"
+
+yam@0.0.18:
+  version "0.0.18"
+  resolved "https://registry.yarnpkg.com/yam/-/yam-0.0.18.tgz#e5cab771f0fc80ca599814cb9c269cb8bff00e2c"
+  dependencies:
+    findup "^0.1.5"
+    fs-extra "^0.16.3"
+    lodash.merge "^3.0.2"
+
+yargs-parser@^4.2.0:
+  version "4.2.1"
+  resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-4.2.1.tgz#29cceac0dc4f03c6c87b4a9f217dd18c9f74871c"
+  dependencies:
+    camelcase "^3.0.0"
+
+yargs@^6.6.0:
+  version "6.6.0"
+  resolved "https://registry.yarnpkg.com/yargs/-/yargs-6.6.0.tgz#782ec21ef403345f830a808ca3d513af56065208"
+  dependencies:
+    camelcase "^3.0.0"
+    cliui "^3.2.0"
+    decamelize "^1.1.1"
+    get-caller-file "^1.0.1"
+    os-locale "^1.4.0"
+    read-pkg-up "^1.0.1"
+    require-directory "^2.1.1"
+    require-main-filename "^1.0.1"
+    set-blocking "^2.0.0"
+    string-width "^1.0.2"
+    which-module "^1.0.0"
+    y18n "^3.2.1"
+    yargs-parser "^4.2.0"
+
+yargs@~3.10.0:
+  version "3.10.0"
+  resolved "https://registry.yarnpkg.com/yargs/-/yargs-3.10.0.tgz#f7ee7bd857dd7c1d2d38c0e74efbd681d1431fd1"
+  dependencies:
+    camelcase "^1.0.2"
+    cliui "^2.1.0"
+    decamelize "^1.0.0"
+    window-size "0.1.0"
+
+yargs@~3.27.0:
+  version "3.27.0"
+  resolved "https://registry.yarnpkg.com/yargs/-/yargs-3.27.0.tgz#21205469316e939131d59f2da0c6d7f98221ea40"
+  dependencies:
+    camelcase "^1.2.1"
+    cliui "^2.1.0"
+    decamelize "^1.0.0"
+    os-locale "^1.4.0"
+    window-size "^0.1.2"
+    y18n "^3.2.0"
+
+yauzl@2.4.1:
+  version "2.4.1"
+  resolved "https://registry.yarnpkg.com/yauzl/-/yauzl-2.4.1.tgz#9528f442dab1b2284e58b4379bb194e22e0c4005"
+  dependencies:
+    fd-slicer "~1.0.1"
+
+yeast@0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/yeast/-/yeast-0.1.2.tgz#008e06d8094320c372dbc2f8ed76a0ca6c8ac419"
diff --git a/contrib/views/hive-next/pom.xml b/contrib/views/hive-next/pom.xml
index c0eb774..578a82c 100644
--- a/contrib/views/hive-next/pom.xml
+++ b/contrib/views/hive-next/pom.xml
@@ -242,29 +242,35 @@
       <plugin>
         <groupId>com.github.eirslett</groupId>
         <artifactId>frontend-maven-plugin</artifactId>
-        <version>1.3</version>
+        <version>1.4</version>
         <configuration>
           <nodeVersion>v4.5.0</nodeVersion>
-          <npmVersion>2.15.0</npmVersion>
+          <yarnVersion>v0.23.2</yarnVersion>
           <workingDirectory>src/main/resources/ui/hive-web/</workingDirectory>
           <npmInheritsProxyConfigFromMaven>false</npmInheritsProxyConfigFromMaven>
+          <!-- setting npm_config_tmp environment variable is a workaround for
+               https://github.com/Medium/phantomjs/issues/673 -->
+          <environmentVariables>
+            <npm_config_tmp>/tmp/npm_config_tmp</npm_config_tmp>
+          </environmentVariables>
         </configuration>
         <executions>
           <execution>
-            <id>install node and npm</id>
+            <id>install node and yarn</id>
             <phase>generate-sources</phase>
             <goals>
-              <goal>install-node-and-npm</goal>
+              <goal>install-node-and-yarn</goal>
             </goals>
           </execution>
           <execution>
-            <id>npm install</id>
+            <id>yarn install --pure-lockfile</id>
             <phase>generate-sources</phase>
             <goals>
-              <goal>npm</goal>
+              <goal>yarn</goal>
             </goals>
             <configuration>
               <arguments>install --python="${project.basedir}/../src/main/unix/ambari-python-wrap" --unsafe-perm</arguments>
+              <arguments>--ignore-engines</arguments>
             </configuration>
           </execution>
         </executions>
diff --git a/contrib/views/hive-next/src/main/java/org/apache/ambari/view/hive2/actor/HiveActor.java b/contrib/views/hive-next/src/main/java/org/apache/ambari/view/hive2/actor/HiveActor.java
index 36bc430..a71ca5c 100644
--- a/contrib/views/hive-next/src/main/java/org/apache/ambari/view/hive2/actor/HiveActor.java
+++ b/contrib/views/hive-next/src/main/java/org/apache/ambari/view/hive2/actor/HiveActor.java
@@ -25,26 +25,22 @@
 
 public abstract class HiveActor extends UntypedActor {
 
-    private final Logger LOG = LoggerFactory.getLogger(getClass());
+  private static final Logger LOG = LoggerFactory.getLogger(HiveActor.class);
 
-    @Override
-    final public void onReceive(Object message) throws Exception {
-        HiveMessage hiveMessage = new HiveMessage(message);
-        if(LOG.isDebugEnabled()){
-            LOG.debug("Received message: " + message.getClass().getName() + ", generated id: " + hiveMessage.getId() +
-                    " sent by: " + sender() + ", recieved by" + self());
-        }
-
-        handleMessage(hiveMessage);
-
-        if(LOG.isDebugEnabled()){
-            LOG.debug("Message submitted: " + hiveMessage.getId());
-
-        }
+  @Override
+  final public void onReceive(Object message) throws Exception {
+    HiveMessage hiveMessage = new HiveMessage(message);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Received message: " + message.getClass().getName() + ", generated id: " + hiveMessage.getId() +
+          " sent by: " + sender() + ", recieved by" + self());
     }
 
-    public abstract void handleMessage(HiveMessage hiveMessage);
+    handleMessage(hiveMessage);
 
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Message submitted: " + hiveMessage.getId());
+    }
+  }
 
-
+  public abstract void handleMessage(HiveMessage hiveMessage);
 }
diff --git a/contrib/views/hive-next/src/main/java/org/apache/ambari/view/hive2/actor/JdbcConnector.java b/contrib/views/hive-next/src/main/java/org/apache/ambari/view/hive2/actor/JdbcConnector.java
index 997c28a..f7746d9 100644
--- a/contrib/views/hive-next/src/main/java/org/apache/ambari/view/hive2/actor/JdbcConnector.java
+++ b/contrib/views/hive-next/src/main/java/org/apache/ambari/view/hive2/actor/JdbcConnector.java
@@ -77,7 +77,7 @@
  */
 public class JdbcConnector extends HiveActor {
 
-  private final Logger LOG = LoggerFactory.getLogger(getClass());
+  private static final Logger LOG = LoggerFactory.getLogger(JdbcConnector.class);
 
   public static final String SUFFIX = "validating the login";
 
@@ -527,12 +527,6 @@
   }
 
   private void checkTerminationInactivity() {
-    if (!isAsync()) {
-      // Should not terminate if job is sync. Will terminate after the job is finished.
-      stopTerminateInactivityScheduler();
-      return;
-    }
-
     LOG.debug("Termination check, executing status: {}", executing);
     if (executing) {
       keepAlive();
diff --git a/contrib/views/hive-next/src/main/java/org/apache/ambari/view/hive2/actor/OperationController.java b/contrib/views/hive-next/src/main/java/org/apache/ambari/view/hive2/actor/OperationController.java
index 98e60f2..c8c8a6c 100644
--- a/contrib/views/hive-next/src/main/java/org/apache/ambari/view/hive2/actor/OperationController.java
+++ b/contrib/views/hive-next/src/main/java/org/apache/ambari/view/hive2/actor/OperationController.java
@@ -63,7 +63,7 @@
  */
 public class OperationController extends HiveActor {
 
-  private final Logger LOG = LoggerFactory.getLogger(getClass());
+  private static final Logger LOG = LoggerFactory.getLogger(OperationController.class);
 
   private final ActorSystem system;
   private final ActorRef deathWatch;
diff --git a/contrib/views/hive-next/src/main/java/org/apache/ambari/view/hive2/resources/files/FileService.java b/contrib/views/hive-next/src/main/java/org/apache/ambari/view/hive2/resources/files/FileService.java
index 64880bb..88f8df1 100644
--- a/contrib/views/hive-next/src/main/java/org/apache/ambari/view/hive2/resources/files/FileService.java
+++ b/contrib/views/hive-next/src/main/java/org/apache/ambari/view/hive2/resources/files/FileService.java
@@ -185,9 +185,9 @@
     try {
       filePath = sanitizeFilePath(filePath);
       LOG.debug("Rewriting file " + filePath);
-      FSDataOutputStream output = getSharedObjectsFactory().getHdfsApi().create(filePath, true);
-      output.writeBytes(request.file.getFileContent());
-      output.close();
+      HdfsApi hdfsApi = getSharedObjectsFactory().getHdfsApi();
+      HdfsUtil.putStringToFile(hdfsApi, filePath,
+          request.file.getFileContent());
       return Response.status(204).build();
     } catch (WebApplicationException ex) {
       throw ex;
@@ -209,7 +209,7 @@
       try {
         FSDataOutputStream output = getSharedObjectsFactory().getHdfsApi().create(request.file.getFilePath(), false);
         if (request.file.getFileContent() != null) {
-          output.writeBytes(request.file.getFileContent());
+          output.write(request.file.getFileContent().getBytes());
         }
         output.close();
       } catch (FileAlreadyExistsException ex) {
diff --git a/contrib/views/hive-next/src/main/java/org/apache/ambari/view/hive2/resources/jobs/JobService.java b/contrib/views/hive-next/src/main/java/org/apache/ambari/view/hive2/resources/jobs/JobService.java
index 5fa96bb..80f4e24 100644
--- a/contrib/views/hive-next/src/main/java/org/apache/ambari/view/hive2/resources/jobs/JobService.java
+++ b/contrib/views/hive-next/src/main/java/org/apache/ambari/view/hive2/resources/jobs/JobService.java
@@ -176,11 +176,11 @@
    * Get job results in csv format
    */
   @GET
-  @Path("{jobId}/results/csv")
+  @Path("{jobId}/results/csv/{fileName}")
   @Produces("text/csv")
   public Response getResultsCSV(@PathParam("jobId") String jobId,
                                 @Context HttpServletResponse response,
-                                @QueryParam("fileName") String fileName,
+                                @PathParam("fileName") String fileName,
                                 @QueryParam("columns") final String requestedColumns) {
     try {
 
@@ -223,12 +223,7 @@
         }
       };
 
-      if (fileName == null || fileName.isEmpty()) {
-        fileName = "results.csv";
-      }
-
       return Response.ok(stream).
-          header("Content-Disposition", String.format("attachment; filename=\"%s\"", fileName)).
           build();
 
 
diff --git a/contrib/views/hive-next/src/main/resources/ui/hive-web/Brocfile.js b/contrib/views/hive-next/src/main/resources/ui/hive-web/Brocfile.js
index 318d1f8..791c88d 100644
--- a/contrib/views/hive-next/src/main/resources/ui/hive-web/Brocfile.js
+++ b/contrib/views/hive-next/src/main/resources/ui/hive-web/Brocfile.js
@@ -50,5 +50,6 @@
 app.import('vendor/codemirror/codemirror.css');
 app.import('vendor/codemirror/show-hint.css');
 app.import('vendor/dagre.min.js');
+app.import('vendor/browser-pollyfills.js');
 
 module.exports = app.toTree();
diff --git a/contrib/views/hive-next/src/main/resources/ui/hive-web/app/controllers/index.js b/contrib/views/hive-next/src/main/resources/ui/hive-web/app/controllers/index.js
index 8250dbb..5644feb 100644
--- a/contrib/views/hive-next/src/main/resources/ui/hive-web/app/controllers/index.js
+++ b/contrib/views/hive-next/src/main/resources/ui/hive-web/app/controllers/index.js
@@ -586,7 +586,7 @@
 
       defer.promise.then(function (text) {
         // download file ...
-        var urlString = "%@/?fileName=%@.csv";
+        var urlString = "%@/%@.csv";
         var url = self.get('csvUrl');
         url = urlString.fmt(url, text);
         window.open(url);
diff --git a/contrib/views/hive-next/src/main/resources/ui/hive-web/app/routes/splash.js b/contrib/views/hive-next/src/main/resources/ui/hive-web/app/routes/splash.js
index 087bab3..34379d2 100644
--- a/contrib/views/hive-next/src/main/resources/ui/hive-web/app/routes/splash.js
+++ b/contrib/views/hive-next/src/main/resources/ui/hive-web/app/routes/splash.js
@@ -98,7 +98,7 @@
     }
 
     this.fetchServiceCheckPolicy()
-      .then((data) => {
+      .then (function(data) {
         var numberOfChecks = 0;
         var serviceCheckPolicy = data.serviceCheckPolicy;
         for (var serviceCheck in serviceCheckPolicy) {
diff --git a/contrib/views/hive-next/src/main/resources/ui/hive-web/bower.json b/contrib/views/hive-next/src/main/resources/ui/hive-web/bower.json
index d029eff..62861b2 100644
--- a/contrib/views/hive-next/src/main/resources/ui/hive-web/bower.json
+++ b/contrib/views/hive-next/src/main/resources/ui/hive-web/bower.json
@@ -20,7 +20,8 @@
     "pretender": "0.1.0",
     "ember-uploader": "0.3.9",
     "polestar": "https://github.com/hortonworks/polestar.git#0.7.2",
-    "voyager": "https://github.com/hortonworks/voyager.git#0.7.2"
+    "voyager": "https://github.com/hortonworks/voyager.git#0.7.2",
+    "font-awesome": "^4.7.0"
   },
   "resolutions": {
     "ember": "1.10.0"
diff --git a/contrib/views/hive-next/src/main/resources/ui/hive-web/package.json b/contrib/views/hive-next/src/main/resources/ui/hive-web/package.json
index 623f14f..545cefd 100644
--- a/contrib/views/hive-next/src/main/resources/ui/hive-web/package.json
+++ b/contrib/views/hive-next/src/main/resources/ui/hive-web/package.json
@@ -10,7 +10,7 @@
     "start": "ember server",
     "build": "ember build",
     "test": "ember test",
-    "preinstall": "chmod +x node/node_modules/npm/bin/node-gyp-bin/node-gyp",
+    "preinstall": "",
     "postinstall": "node node_modules/.bin/bower --allow-root install"
   },
   "repository": "https://github.com/stefanpenner/ember-cli",
@@ -23,17 +23,18 @@
     "body-parser": "^1.2.0",
     "bower": ">= 1.3.12",
     "broccoli-asset-rev": "^2.0.0",
+    "ember-ajax": "^2.0.1",
     "broccoli-sass": "^0.6.3",
     "ember-cli": "0.2.2",
     "ember-cli-autoprefixer": "0.4.1",
     "ember-cli-blanket": "^0.5.0",
+    "ember-cli-moment-shim": "3.0.1",
     "ember-cli-content-security-policy": "0.3.0",
-    "ember-cli-font-awesome": "0.0.4",
+    "ember-font-awesome": "2.2.0",
     "ember-cli-htmlbars": "0.7.4",
-    "ember-cli-ic-ajax": "0.1.1",
     "ember-cli-inject-live-reload": "^1.3.0",
     "ember-cli-jquery-ui": "0.0.12",
-    "ember-cli-moment": "0.0.1",
+    "ember-moment": "7.2.0",
     "ember-cli-pretender": "^0.3.1",
     "ember-cli-qunit": "0.3.14",
     "ember-cli-selectize": "0.0.19",
diff --git a/contrib/views/hive-next/src/main/resources/ui/hive-web/vendor/browser-pollyfills.js b/contrib/views/hive-next/src/main/resources/ui/hive-web/vendor/browser-pollyfills.js
new file mode 100644
index 0000000..88a59c1
--- /dev/null
+++ b/contrib/views/hive-next/src/main/resources/ui/hive-web/vendor/browser-pollyfills.js
@@ -0,0 +1,213 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+if (!String.prototype.startsWith) {
+  String.prototype.startsWith = function (searchString, position) {
+    position = position || 0;
+    return this.substr(position, searchString.length) === searchString;
+  };
+}
+
+if (!String.prototype.endsWith) {
+  String.prototype.endsWith = function (searchString, position) {
+    var subjectString = this.toString();
+    if (typeof position !== 'number' || !isFinite(position) || Math.floor(position) !== position || position > subjectString.length) {
+      position = subjectString.length;
+    }
+    position -= searchString.length;
+    var lastIndex = subjectString.lastIndexOf(searchString, position);
+    return lastIndex !== -1 && lastIndex === position;
+  };
+}
+
+if (typeof Object.assign != 'function') {
+  Object.assign = function (target, varArgs) { // .length of function is 2
+    'use strict';
+    if (target == null) { // TypeError if undefined or null
+      throw new TypeError('Cannot convert undefined or null to object');
+    }
+
+    var to = Object(target);
+
+    for (var index = 1; index < arguments.length; index++) {
+      var nextSource = arguments[index];
+
+      if (nextSource != null) { // Skip over if undefined or null
+        for (var nextKey in nextSource) {
+          // Avoid bugs when hasOwnProperty is shadowed
+          if (Object.prototype.hasOwnProperty.call(nextSource, nextKey)) {
+            to[nextKey] = nextSource[nextKey];
+          }
+        }
+      }
+    }
+    return to;
+  };
+}
+
+
+if (!Array.from) {
+  Array.from = (function () {
+    var toStr = Object.prototype.toString;
+    var isCallable = function (fn) {
+      return typeof fn === 'function' || toStr.call(fn) === '[object Function]';
+    };
+    var toInteger = function (value) {
+      var number = Number(value);
+      if (isNaN(number)) {
+        return 0;
+      }
+      if (number === 0 || !isFinite(number)) {
+        return number;
+      }
+      return (number > 0 ? 1 : -1) * Math.floor(Math.abs(number));
+    };
+    var maxSafeInteger = Math.pow(2, 53) - 1;
+    var toLength = function (value) {
+      var len = toInteger(value);
+      return Math.min(Math.max(len, 0), maxSafeInteger);
+    };
+
+    // The length property of the from method is 1.
+    return function from(arrayLike/*, mapFn, thisArg */) {
+      // 1. Let C be the this value.
+      var C = this;
+
+      // 2. Let items be ToObject(arrayLike).
+      var items = Object(arrayLike);
+
+      // 3. ReturnIfAbrupt(items).
+      if (arrayLike == null) {
+        throw new TypeError('Array.from requires an array-like object - not null or undefined');
+      }
+
+      // 4. If mapfn is undefined, then let mapping be false.
+      var mapFn = arguments.length > 1 ? arguments[1] : void undefined;
+      var T;
+      if (typeof mapFn !== 'undefined') {
+        // 5. else
+        // 5. a If IsCallable(mapfn) is false, throw a TypeError exception.
+        if (!isCallable(mapFn)) {
+          throw new TypeError('Array.from: when provided, the second argument must be a function');
+        }
+
+        // 5. b. If thisArg was supplied, let T be thisArg; else let T be undefined.
+        if (arguments.length > 2) {
+          T = arguments[2];
+        }
+      }
+
+      // 10. Let lenValue be Get(items, "length").
+      // 11. Let len be ToLength(lenValue).
+      var len = toLength(items.length);
+
+      // 13. If IsConstructor(C) is true, then
+      // 13. a. Let A be the result of calling the [[Construct]] internal method
+      // of C with an argument list containing the single item len.
+      // 14. a. Else, Let A be ArrayCreate(len).
+      var A = isCallable(C) ? Object(new C(len)) : new Array(len);
+
+      // 16. Let k be 0.
+      var k = 0;
+      // 17. Repeat, while k < len… (also steps a - h)
+      var kValue;
+      while (k < len) {
+        kValue = items[k];
+        if (mapFn) {
+          A[k] = typeof T === 'undefined' ? mapFn(kValue, k) : mapFn.call(T, kValue, k);
+        } else {
+          A[k] = kValue;
+        }
+        k += 1;
+      }
+      // 18. Let putStatus be Put(A, "length", len, true).
+      A.length = len;
+      // 20. Return A.
+      return A;
+    };
+  }());
+}
+
+Number.isNaN = Number.isNaN || function (value) {
+    return typeof value === 'number' && isNaN(value);
+  }
+
+
+if (!String.fromCodePoint) {
+  (function () {
+    var defineProperty = (function () {
+      // IE 8 only supports `Object.defineProperty` on DOM elements
+      try {
+        var object = {};
+        var $defineProperty = Object.defineProperty;
+        var result = $defineProperty(object, object, object) && $defineProperty;
+      } catch (error) {
+      }
+      return result;
+    }());
+    var stringFromCharCode = String.fromCharCode;
+    var floor = Math.floor;
+    var fromCodePoint = function () {
+      var MAX_SIZE = 0x4000;
+      var codeUnits = [];
+      var highSurrogate;
+      var lowSurrogate;
+      var index = -1;
+      var length = arguments.length;
+      if (!length) {
+        return '';
+      }
+      var result = '';
+      while (++index < length) {
+        var codePoint = Number(arguments[index]);
+        if (
+          !isFinite(codePoint) ||       // `NaN`, `+Infinity`, or `-Infinity`
+          codePoint < 0 ||              // not a valid Unicode code point
+          codePoint > 0x10FFFF ||       // not a valid Unicode code point
+          floor(codePoint) != codePoint // not an integer
+        ) {
+          throw RangeError('Invalid code point: ' + codePoint);
+        }
+        if (codePoint <= 0xFFFF) { // BMP code point
+          codeUnits.push(codePoint);
+        } else { // Astral code point; split in surrogate halves
+          // http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
+          codePoint -= 0x10000;
+          highSurrogate = (codePoint >> 10) + 0xD800;
+          lowSurrogate = (codePoint % 0x400) + 0xDC00;
+          codeUnits.push(highSurrogate, lowSurrogate);
+        }
+        if (index + 1 == length || codeUnits.length > MAX_SIZE) {
+          result += stringFromCharCode.apply(null, codeUnits);
+          codeUnits.length = 0;
+        }
+      }
+      return result;
+    };
+    if (defineProperty) {
+      defineProperty(String, 'fromCodePoint', {
+        'value': fromCodePoint,
+        'configurable': true,
+        'writable': true
+      });
+    } else {
+      String.fromCodePoint = fromCodePoint;
+    }
+  }());
+}
diff --git a/contrib/views/hive-next/src/main/resources/ui/hive-web/yarn.lock b/contrib/views/hive-next/src/main/resources/ui/hive-web/yarn.lock
new file mode 100644
index 0000000..372fe54
--- /dev/null
+++ b/contrib/views/hive-next/src/main/resources/ui/hive-web/yarn.lock
@@ -0,0 +1,5066 @@
+# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
+# yarn lockfile v1
+
+
+abbrev@1, abbrev@^1.0.5:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.0.tgz#d0554c2256636e2f56e7c2e5ad183f859428d81f"
+
+abbrev@~1.0.9:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.0.9.tgz#91b4792588a7738c25f35dd6f63752a2f8776135"
+
+accepts@1.3.3, accepts@~1.3.3:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.3.tgz#c3ca7434938648c3e0d9c1e328dd68b622c284ca"
+  dependencies:
+    mime-types "~2.1.11"
+    negotiator "0.6.1"
+
+acorn@^1.0.1:
+  version "1.2.2"
+  resolved "https://registry.yarnpkg.com/acorn/-/acorn-1.2.2.tgz#c8ce27de0acc76d896d2b1fad3df588d9e82f014"
+
+acorn@^4.0.3:
+  version "4.0.11"
+  resolved "https://registry.yarnpkg.com/acorn/-/acorn-4.0.11.tgz#edcda3bd937e7556410d42ed5860f67399c794c0"
+
+after@0.8.2:
+  version "0.8.2"
+  resolved "https://registry.yarnpkg.com/after/-/after-0.8.2.tgz#fedb394f9f0e02aa9768e702bda23b505fae7e1f"
+
+ajv@^4.9.1:
+  version "4.11.8"
+  resolved "https://registry.yarnpkg.com/ajv/-/ajv-4.11.8.tgz#82ffb02b29e662ae53bdc20af15947706739c536"
+  dependencies:
+    co "^4.6.0"
+    json-stable-stringify "^1.0.1"
+
+align-text@^0.1.1, align-text@^0.1.3:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/align-text/-/align-text-0.1.4.tgz#0cd90a561093f35d0a99256c22b7069433fad117"
+  dependencies:
+    kind-of "^3.0.2"
+    longest "^1.0.1"
+    repeat-string "^1.5.2"
+
+alter@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/alter/-/alter-0.2.0.tgz#c7588808617572034aae62480af26b1d4d1cb3cd"
+  dependencies:
+    stable "~0.1.3"
+
+amdefine@>=0.0.4:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5"
+
+ansi-regex@^0.2.0, ansi-regex@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-0.2.1.tgz#0d8e946967a3d8143f93e24e298525fc1b2235f9"
+
+ansi-regex@^1.0.0, ansi-regex@^1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-1.1.1.tgz#41c847194646375e6a1a5d10c3ca054ef9fc980d"
+
+ansi-regex@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df"
+
+ansi-styles@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-1.1.0.tgz#eaecbf66cd706882760b2f4691582b8f55d7a7de"
+
+ansi-styles@^2.0.1, ansi-styles@^2.2.1:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe"
+
+ansi-styles@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-1.0.0.tgz#cb102df1c56f5123eab8b67cd7b98027a0279178"
+
+ansi@^0.3.0, ansi@~0.3.0, ansi@~0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/ansi/-/ansi-0.3.1.tgz#0c42d4fb17160d5a9af1e484bace1c66922c1b21"
+
+ansicolors@~0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/ansicolors/-/ansicolors-0.2.1.tgz#be089599097b74a5c9c4a84a0cdbcdb62bd87aef"
+
+ansicolors@~0.3.2:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/ansicolors/-/ansicolors-0.3.2.tgz#665597de86a9ffe3aa9bfbe6cae5c6ea426b4979"
+
+ansistyles@~0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/ansistyles/-/ansistyles-0.1.3.tgz#5de60415bda071bb37127854c864f41b23254539"
+
+anymatch@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-1.3.0.tgz#a3e52fa39168c825ff57b0248126ce5a8ff95507"
+  dependencies:
+    arrify "^1.0.0"
+    micromatch "^2.1.5"
+
+aproba@^1.0.3:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.1.1.tgz#95d3600f07710aa0e9298c726ad5ecf2eacbabab"
+
+archy@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/archy/-/archy-1.0.0.tgz#f9c8c13757cc1dd7bc379ac77b2c62a5c2868c40"
+
+are-we-there-yet@~1.0.0:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.0.6.tgz#a2d28c93102aa6cc96245a26cb954de06ec53f0c"
+  dependencies:
+    delegates "^1.0.0"
+    readable-stream "^2.0.0 || ^1.1.13"
+
+are-we-there-yet@~1.1.2:
+  version "1.1.4"
+  resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.4.tgz#bb5dca382bb94f05e15194373d16fd3ba1ca110d"
+  dependencies:
+    delegates "^1.0.0"
+    readable-stream "^2.0.6"
+
+argparse@^1.0.7, argparse@~1.0.0:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.9.tgz#73d83bc263f86e97f8cc4f6bae1b0e90a7d22c86"
+  dependencies:
+    sprintf-js "~1.0.2"
+
+arr-diff@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-2.0.0.tgz#8f3b827f955a8bd669697e4a4256ac3ceae356cf"
+  dependencies:
+    arr-flatten "^1.0.1"
+
+arr-flatten@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.0.3.tgz#a274ed85ac08849b6bd7847c4580745dc51adfb1"
+
+array-equal@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/array-equal/-/array-equal-1.0.0.tgz#8c2a5ef2472fd9ea742b04c77a75093ba2757c93"
+
+array-find-index@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/array-find-index/-/array-find-index-1.0.2.tgz#df010aa1287e164bbda6f9723b0a96a1ec4187a1"
+
+array-flatten@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2"
+
+array-unique@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.2.1.tgz#a1d97ccafcbc2625cc70fadceb36a50c58b01a53"
+
+arraybuffer.slice@0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/arraybuffer.slice/-/arraybuffer.slice-0.0.6.tgz#f33b2159f0532a3f3107a272c0ccfbd1ad2979ca"
+
+arrify@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d"
+
+asap@^2.0.0:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/asap/-/asap-2.0.5.tgz#522765b50c3510490e52d7dcfe085ef9ba96958f"
+
+asn1@~0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.3.tgz#dac8787713c9966849fc8180777ebe9c1ddf3b86"
+
+assert-plus@1.0.0, assert-plus@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525"
+
+assert-plus@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.2.0.tgz#d74e1b87e7affc0db8aadb7021f3fe48101ab234"
+
+ast-traverse@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/ast-traverse/-/ast-traverse-0.1.1.tgz#69cf2b8386f19dcda1bb1e05d68fe359d8897de6"
+
+ast-types@0.8.12:
+  version "0.8.12"
+  resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.8.12.tgz#a0d90e4351bb887716c83fd637ebf818af4adfcc"
+
+ast-types@0.9.6:
+  version "0.9.6"
+  resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.9.6.tgz#102c9e9e9005d3e7e3829bf0c4fa24ee862ee9b9"
+
+ast-types@~0.6.1:
+  version "0.6.16"
+  resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.6.16.tgz#04205b72eddd195a8feaa081f11d0294a24ded93"
+
+async-disk-cache@^1.2.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/async-disk-cache/-/async-disk-cache-1.3.1.tgz#3394010d9448b16205b01e0e2e704180805413d3"
+  dependencies:
+    debug "^2.1.3"
+    heimdalljs "^0.2.3"
+    istextorbinary "2.1.0"
+    mkdirp "^0.5.0"
+    rimraf "^2.5.3"
+    rsvp "^3.0.18"
+
+async-foreach@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/async-foreach/-/async-foreach-0.1.3.tgz#36121f845c0578172de419a97dbeb1d16ec34542"
+
+async-some@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/async-some/-/async-some-1.0.2.tgz#4d8a81620d5958791b5b98f802d3207776e95509"
+  dependencies:
+    dezalgo "^1.0.2"
+
+async@^0.9.0:
+  version "0.9.2"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.9.2.tgz#aea74d5e61c1f899613bf64bda66d4c78f2fd17d"
+
+async@^2.0.1:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/async/-/async-2.4.0.tgz#4990200f18ea5b837c2cc4f8c031a6985c385611"
+  dependencies:
+    lodash "^4.14.0"
+
+async@~0.2.6, async@~0.2.9:
+  version "0.2.10"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.2.10.tgz#b6bbe0b0674b9d719708ca38de8c237cb526c3d1"
+
+async@~0.8.0:
+  version "0.8.0"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.8.0.tgz#ee65ec77298c2ff1456bc4418a052d0f06435112"
+
+asynckit@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79"
+
+autoprefixer-core@^5.0.0:
+  version "5.2.1"
+  resolved "https://registry.yarnpkg.com/autoprefixer-core/-/autoprefixer-core-5.2.1.tgz#e640c414ae419aae21c1ad43c8ea0f3db82a566d"
+  dependencies:
+    browserslist "~0.4.0"
+    caniuse-db "^1.0.30000214"
+    num2fraction "^1.1.0"
+    postcss "~4.1.12"
+
+aws-sign2@~0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.6.0.tgz#14342dd38dbcc94d0e5b87d763cd63612c0e794f"
+
+aws4@^1.2.1:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.6.0.tgz#83ef5ca860b2b32e4a0deedee8c771b9db57471e"
+
+babel-core@^5.0.0:
+  version "5.8.38"
+  resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-5.8.38.tgz#1fcaee79d7e61b750b00b8e54f6dfc9d0af86558"
+  dependencies:
+    babel-plugin-constant-folding "^1.0.1"
+    babel-plugin-dead-code-elimination "^1.0.2"
+    babel-plugin-eval "^1.0.1"
+    babel-plugin-inline-environment-variables "^1.0.1"
+    babel-plugin-jscript "^1.0.4"
+    babel-plugin-member-expression-literals "^1.0.1"
+    babel-plugin-property-literals "^1.0.1"
+    babel-plugin-proto-to-assign "^1.0.3"
+    babel-plugin-react-constant-elements "^1.0.3"
+    babel-plugin-react-display-name "^1.0.3"
+    babel-plugin-remove-console "^1.0.1"
+    babel-plugin-remove-debugger "^1.0.1"
+    babel-plugin-runtime "^1.0.7"
+    babel-plugin-undeclared-variables-check "^1.0.2"
+    babel-plugin-undefined-to-void "^1.1.6"
+    babylon "^5.8.38"
+    bluebird "^2.9.33"
+    chalk "^1.0.0"
+    convert-source-map "^1.1.0"
+    core-js "^1.0.0"
+    debug "^2.1.1"
+    detect-indent "^3.0.0"
+    esutils "^2.0.0"
+    fs-readdir-recursive "^0.1.0"
+    globals "^6.4.0"
+    home-or-tmp "^1.0.0"
+    is-integer "^1.0.4"
+    js-tokens "1.0.1"
+    json5 "^0.4.0"
+    lodash "^3.10.0"
+    minimatch "^2.0.3"
+    output-file-sync "^1.1.0"
+    path-exists "^1.0.0"
+    path-is-absolute "^1.0.0"
+    private "^0.1.6"
+    regenerator "0.8.40"
+    regexpu "^1.3.0"
+    repeating "^1.1.2"
+    resolve "^1.1.6"
+    shebang-regex "^1.0.0"
+    slash "^1.0.0"
+    source-map "^0.5.0"
+    source-map-support "^0.2.10"
+    to-fast-properties "^1.0.0"
+    trim-right "^1.0.0"
+    try-resolve "^1.0.0"
+
+babel-plugin-constant-folding@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-constant-folding/-/babel-plugin-constant-folding-1.0.1.tgz#8361d364c98e449c3692bdba51eff0844290aa8e"
+
+babel-plugin-dead-code-elimination@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/babel-plugin-dead-code-elimination/-/babel-plugin-dead-code-elimination-1.0.2.tgz#5f7c451274dcd7cccdbfbb3e0b85dd28121f0f65"
+
+babel-plugin-eval@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-eval/-/babel-plugin-eval-1.0.1.tgz#a2faed25ce6be69ade4bfec263f70169195950da"
+
+babel-plugin-inline-environment-variables@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-inline-environment-variables/-/babel-plugin-inline-environment-variables-1.0.1.tgz#1f58ce91207ad6a826a8bf645fafe68ff5fe3ffe"
+
+babel-plugin-jscript@^1.0.4:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/babel-plugin-jscript/-/babel-plugin-jscript-1.0.4.tgz#8f342c38276e87a47d5fa0a8bd3d5eb6ccad8fcc"
+
+babel-plugin-member-expression-literals@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-member-expression-literals/-/babel-plugin-member-expression-literals-1.0.1.tgz#cc5edb0faa8dc927170e74d6d1c02440021624d3"
+
+babel-plugin-property-literals@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-property-literals/-/babel-plugin-property-literals-1.0.1.tgz#0252301900192980b1c118efea48ce93aab83336"
+
+babel-plugin-proto-to-assign@^1.0.3:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/babel-plugin-proto-to-assign/-/babel-plugin-proto-to-assign-1.0.4.tgz#c49e7afd02f577bc4da05ea2df002250cf7cd123"
+  dependencies:
+    lodash "^3.9.3"
+
+babel-plugin-react-constant-elements@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/babel-plugin-react-constant-elements/-/babel-plugin-react-constant-elements-1.0.3.tgz#946736e8378429cbc349dcff62f51c143b34e35a"
+
+babel-plugin-react-display-name@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/babel-plugin-react-display-name/-/babel-plugin-react-display-name-1.0.3.tgz#754fe38926e8424a4e7b15ab6ea6139dee0514fc"
+
+babel-plugin-remove-console@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-remove-console/-/babel-plugin-remove-console-1.0.1.tgz#d8f24556c3a05005d42aaaafd27787f53ff013a7"
+
+babel-plugin-remove-debugger@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-remove-debugger/-/babel-plugin-remove-debugger-1.0.1.tgz#fd2ea3cd61a428ad1f3b9c89882ff4293e8c14c7"
+
+babel-plugin-runtime@^1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/babel-plugin-runtime/-/babel-plugin-runtime-1.0.7.tgz#bf7c7d966dd56ecd5c17fa1cb253c9acb7e54aaf"
+
+babel-plugin-undeclared-variables-check@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/babel-plugin-undeclared-variables-check/-/babel-plugin-undeclared-variables-check-1.0.2.tgz#5cf1aa539d813ff64e99641290af620965f65dee"
+  dependencies:
+    leven "^1.0.2"
+
+babel-plugin-undefined-to-void@^1.1.6:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/babel-plugin-undefined-to-void/-/babel-plugin-undefined-to-void-1.1.6.tgz#7f578ef8b78dfae6003385d8417a61eda06e2f81"
+
+babylon@^5.8.38:
+  version "5.8.38"
+  resolved "https://registry.yarnpkg.com/babylon/-/babylon-5.8.38.tgz#ec9b120b11bf6ccd4173a18bf217e60b79859ffd"
+
+backbone@^1.1.2:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/backbone/-/backbone-1.3.3.tgz#4cc80ea7cb1631ac474889ce40f2f8bc683b2999"
+  dependencies:
+    underscore ">=1.8.3"
+
+backo2@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/backo2/-/backo2-1.0.2.tgz#31ab1ac8b129363463e35b3ebb69f4dfcfba7947"
+
+balanced-match@^0.4.1:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-0.4.2.tgz#cb3f3e3c732dc0f01ee70b403f302e61d7709838"
+
+base64-arraybuffer@0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/base64-arraybuffer/-/base64-arraybuffer-0.1.5.tgz#73926771923b5a19747ad666aa5cd4bf9c6e9ce8"
+
+base64id@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/base64id/-/base64id-1.0.0.tgz#47688cb99bb6804f0e06d3e763b1c32e57d8e6b6"
+
+basic-auth@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/basic-auth/-/basic-auth-1.1.0.tgz#45221ee429f7ee1e5035be3f51533f1cdfd29884"
+
+bcrypt-pbkdf@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.1.tgz#63bc5dcb61331b92bc05fd528953c33462a06f8d"
+  dependencies:
+    tweetnacl "^0.14.3"
+
+better-assert@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/better-assert/-/better-assert-1.0.2.tgz#40866b9e1b9e0b55b481894311e68faffaebc522"
+  dependencies:
+    callsite "1.0.0"
+
+"binaryextensions@1 || 2":
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/binaryextensions/-/binaryextensions-2.0.0.tgz#e597d1a7a6a3558a2d1c7241a16c99965e6aa40f"
+
+bl@~1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/bl/-/bl-1.1.2.tgz#fdca871a99713aa00d19e3bbba41c44787a65398"
+  dependencies:
+    readable-stream "~2.0.5"
+
+blank-object@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/blank-object/-/blank-object-1.0.2.tgz#f990793fbe9a8c8dd013fb3219420bec81d5f4b9"
+
+blob@0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/blob/-/blob-0.0.4.tgz#bcf13052ca54463f30f9fc7e95b9a47630a94921"
+
+block-stream@*, block-stream@0.0.9:
+  version "0.0.9"
+  resolved "https://registry.yarnpkg.com/block-stream/-/block-stream-0.0.9.tgz#13ebfe778a03205cfe03751481ebb4b3300c126a"
+  dependencies:
+    inherits "~2.0.0"
+
+bluebird@^2.9.33:
+  version "2.11.0"
+  resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-2.11.0.tgz#534b9033c022c9579c56ba3b3e5a5caafbb650e1"
+
+body-parser@^1.12.2, body-parser@^1.2.0:
+  version "1.17.1"
+  resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.17.1.tgz#75b3bc98ddd6e7e0d8ffe750dfaca5c66993fa47"
+  dependencies:
+    bytes "2.4.0"
+    content-type "~1.0.2"
+    debug "2.6.1"
+    depd "~1.1.0"
+    http-errors "~1.6.1"
+    iconv-lite "0.4.15"
+    on-finished "~2.3.0"
+    qs "6.4.0"
+    raw-body "~2.2.0"
+    type-is "~1.6.14"
+
+body-parser@~1.8.0:
+  version "1.8.4"
+  resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.8.4.tgz#d497e04bc13b3f9a8bd8c70bb0cdc16f2e028898"
+  dependencies:
+    bytes "1.0.0"
+    depd "0.4.5"
+    iconv-lite "0.4.4"
+    media-typer "0.3.0"
+    on-finished "2.1.0"
+    qs "2.2.4"
+    raw-body "1.3.0"
+    type-is "~1.5.1"
+
+boom@2.x.x:
+  version "2.10.1"
+  resolved "https://registry.yarnpkg.com/boom/-/boom-2.10.1.tgz#39c8918ceff5799f83f9492a848f625add0c766f"
+  dependencies:
+    hoek "2.x.x"
+
+bower-config@0.5.2:
+  version "0.5.2"
+  resolved "https://registry.yarnpkg.com/bower-config/-/bower-config-0.5.2.tgz#1f7d2e899e99b70c29a613e70d4c64590414b22e"
+  dependencies:
+    graceful-fs "~2.0.0"
+    mout "~0.9.0"
+    optimist "~0.6.0"
+    osenv "0.0.3"
+
+"bower@>= 1.3.12", bower@^1.3.12:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/bower/-/bower-1.8.0.tgz#55dbebef0ad9155382d9e9d3e497c1372345b44a"
+
+brace-expansion@^1.0.0:
+  version "1.1.7"
+  resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.7.tgz#3effc3c50e000531fb720eaff80f0ae8ef23cf59"
+  dependencies:
+    balanced-match "^0.4.1"
+    concat-map "0.0.1"
+
+braces@^1.8.2:
+  version "1.8.5"
+  resolved "https://registry.yarnpkg.com/braces/-/braces-1.8.5.tgz#ba77962e12dff969d6b76711e914b737857bf6a7"
+  dependencies:
+    expand-range "^1.8.1"
+    preserve "^0.2.0"
+    repeat-element "^1.1.2"
+
+breakable@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/breakable/-/breakable-1.0.0.tgz#784a797915a38ead27bad456b5572cb4bbaa78c1"
+
+broccoli-asset-rev@^2.0.0:
+  version "2.5.0"
+  resolved "https://registry.yarnpkg.com/broccoli-asset-rev/-/broccoli-asset-rev-2.5.0.tgz#f5f66eac962bf9f086286921f0eaeaab6d00d819"
+  dependencies:
+    broccoli-asset-rewrite "^1.1.0"
+    broccoli-filter "^1.2.2"
+    json-stable-stringify "^1.0.0"
+    matcher-collection "^1.0.1"
+    rsvp "^3.0.6"
+
+broccoli-asset-rewrite@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-asset-rewrite/-/broccoli-asset-rewrite-1.1.0.tgz#77a5da56157aa318c59113245e8bafb4617f8830"
+  dependencies:
+    broccoli-filter "^1.2.3"
+
+broccoli-autoprefixer@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/broccoli-autoprefixer/-/broccoli-autoprefixer-3.0.0.tgz#b7c9edb7166382ab2c626261ff56566cda0d1a71"
+  dependencies:
+    autoprefixer-core "^5.0.0"
+    broccoli-filter "^0.1.6"
+    object-assign "^2.0.0"
+    postcss "^4.1.11"
+
+broccoli-babel-transpiler@^5.6.2:
+  version "5.6.2"
+  resolved "https://registry.yarnpkg.com/broccoli-babel-transpiler/-/broccoli-babel-transpiler-5.6.2.tgz#958c72e43575b2f0a862a5096dba1ce1ebc7d74d"
+  dependencies:
+    babel-core "^5.0.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-persistent-filter "^1.0.1"
+    clone "^0.2.0"
+    hash-for-dep "^1.0.2"
+    json-stable-stringify "^1.0.0"
+
+broccoli-caching-writer@0.5.3:
+  version "0.5.3"
+  resolved "https://registry.yarnpkg.com/broccoli-caching-writer/-/broccoli-caching-writer-0.5.3.tgz#4ba208a1f0facbd294466dd5a89d107971933f26"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.2.5"
+    core-object "0.0.2"
+    promise-map-series "^0.2.0"
+    quick-temp "^0.1.2"
+    rimraf "^2.2.8"
+    rsvp "^3.0.14"
+    symlink-or-copy "^1.0.0"
+
+broccoli-caching-writer@0.5.5:
+  version "0.5.5"
+  resolved "https://registry.yarnpkg.com/broccoli-caching-writer/-/broccoli-caching-writer-0.5.5.tgz#e8c6b5b3ca3ecd940784fb970476fb29805eb158"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.2.5"
+    core-object "0.0.3"
+    debug "^2.1.1"
+    lodash-node "^2.4.1"
+    promise-map-series "^0.2.0"
+    quick-temp "^0.1.2"
+    rimraf "^2.2.8"
+    rsvp "^3.0.14"
+    symlink-or-copy "^1.0.0"
+
+broccoli-caching-writer@^2.0.4:
+  version "2.3.1"
+  resolved "https://registry.yarnpkg.com/broccoli-caching-writer/-/broccoli-caching-writer-2.3.1.tgz#b93cf58f9264f003075868db05774f4e7f25bd07"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.2.5"
+    broccoli-plugin "1.1.0"
+    debug "^2.1.1"
+    rimraf "^2.2.8"
+    rsvp "^3.0.17"
+    walk-sync "^0.2.5"
+
+broccoli-clean-css@0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/broccoli-clean-css/-/broccoli-clean-css-0.2.0.tgz#15f1c265a6986585a972bfb070bf52e9c054c861"
+  dependencies:
+    broccoli-filter "^0.1.6"
+    clean-css "^2.2.1"
+
+broccoli-es3-safe-recast@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/broccoli-es3-safe-recast/-/broccoli-es3-safe-recast-2.0.0.tgz#cb1d1807df03fd36dfbc35ad09cd6caa63b17f89"
+  dependencies:
+    broccoli-filter "~0.1.6"
+    es3-safe-recast "^2.0.0"
+
+broccoli-es6modules@^0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/broccoli-es6modules/-/broccoli-es6modules-0.5.1.tgz#ae9fa2a0e4be9f198d6ab94f6e733515ff2fa75d"
+  dependencies:
+    broccoli-caching-writer "0.5.3"
+    broccoli-kitchen-sink-helpers "^0.2.5"
+    esperanto "^0.6.8"
+    mkdirp "^0.5.0"
+    rsvp "^3.0.16"
+    walk-sync "^0.1.3"
+
+broccoli-filter@0.1.12, broccoli-filter@^0.1.6, broccoli-filter@~0.1.6:
+  version "0.1.12"
+  resolved "https://registry.yarnpkg.com/broccoli-filter/-/broccoli-filter-0.1.12.tgz#a91385fe697eb3eb71e7813ffd9204c3c183f58f"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.2.6"
+    broccoli-writer "^0.1.1"
+    mkdirp "^0.3.5"
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.2"
+    rsvp "^3.0.16"
+    walk-sync "^0.1.3"
+
+broccoli-filter@^0.1.14:
+  version "0.1.14"
+  resolved "https://registry.yarnpkg.com/broccoli-filter/-/broccoli-filter-0.1.14.tgz#23cae3891ff9ebb7b4d7db00c6dcf03535daf7ad"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.2.6"
+    broccoli-writer "^0.1.1"
+    mkdirp "^0.3.5"
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.2"
+    rsvp "^3.0.16"
+    symlink-or-copy "^1.0.1"
+    walk-sync "^0.1.3"
+
+broccoli-filter@^1.2.2, broccoli-filter@^1.2.3:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/broccoli-filter/-/broccoli-filter-1.2.4.tgz#409afb94b9a3a6da9fac8134e91e205f40cc7330"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-plugin "^1.0.0"
+    copy-dereference "^1.0.0"
+    debug "^2.2.0"
+    mkdirp "^0.5.1"
+    promise-map-series "^0.2.1"
+    rsvp "^3.0.18"
+    symlink-or-copy "^1.0.1"
+    walk-sync "^0.3.1"
+
+broccoli-funnel@0.2.2, broccoli-funnel@^0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/broccoli-funnel/-/broccoli-funnel-0.2.2.tgz#ea8e52fc134cced42c905a51fa14542e7b753df8"
+  dependencies:
+    core-object "0.0.2"
+    minimatch "^2.0.1"
+    mkdirp "^0.5.0"
+    rimraf "^2.2.8"
+    rsvp "^3.0.14"
+    symlink-or-copy "^1.0.0"
+    walk-sync "^0.1.3"
+
+broccoli-funnel@^1.0.0, broccoli-funnel@^1.0.1:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/broccoli-funnel/-/broccoli-funnel-1.2.0.tgz#cddc3afc5ff1685a8023488fff74ce6fb5a51296"
+  dependencies:
+    array-equal "^1.0.0"
+    blank-object "^1.0.1"
+    broccoli-plugin "^1.3.0"
+    debug "^2.2.0"
+    exists-sync "0.0.4"
+    fast-ordered-set "^1.0.0"
+    fs-tree-diff "^0.5.3"
+    heimdalljs "^0.2.0"
+    minimatch "^3.0.0"
+    mkdirp "^0.5.0"
+    path-posix "^1.0.0"
+    rimraf "^2.4.3"
+    symlink-or-copy "^1.0.0"
+    walk-sync "^0.3.1"
+
+broccoli-jshint@0.5.6:
+  version "0.5.6"
+  resolved "https://registry.yarnpkg.com/broccoli-jshint/-/broccoli-jshint-0.5.6.tgz#5b711dac92608a944afefa3a9bd25914ceb12fe7"
+  dependencies:
+    broccoli-filter "^0.1.14"
+    chalk "~0.4.0"
+    findup-sync "~0.1.3"
+    jshint "^2.7.0"
+    mkdirp "~0.4.0"
+
+broccoli-kitchen-sink-helpers@0.2.6, broccoli-kitchen-sink-helpers@^0.2.0, broccoli-kitchen-sink-helpers@^0.2.5, broccoli-kitchen-sink-helpers@^0.2.6:
+  version "0.2.6"
+  resolved "https://registry.yarnpkg.com/broccoli-kitchen-sink-helpers/-/broccoli-kitchen-sink-helpers-0.2.6.tgz#44a610fb600047d627a949a1f3b0a0427ed3610a"
+  dependencies:
+    glob "4.0.4"
+    mkdirp "^0.3.5"
+
+broccoli-kitchen-sink-helpers@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/broccoli-kitchen-sink-helpers/-/broccoli-kitchen-sink-helpers-0.3.1.tgz#77c7c18194b9664163ec4fcee2793444926e0c06"
+  dependencies:
+    glob "^5.0.10"
+    mkdirp "^0.5.1"
+
+broccoli-merge-trees@0.2.1, broccoli-merge-trees@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/broccoli-merge-trees/-/broccoli-merge-trees-0.2.1.tgz#8953bf9f886aeada0dec0ebaafb8251ef53ecf27"
+  dependencies:
+    broccoli-writer "^0.1.1"
+    promise-map-series "^0.2.0"
+    symlink-or-copy "^1.0.0"
+
+broccoli-merge-trees@^1.0.0, broccoli-merge-trees@^1.1.5:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/broccoli-merge-trees/-/broccoli-merge-trees-1.2.4.tgz#a001519bb5067f06589d91afa2942445a2d0fdb5"
+  dependencies:
+    broccoli-plugin "^1.3.0"
+    can-symlink "^1.0.0"
+    fast-ordered-set "^1.0.2"
+    fs-tree-diff "^0.5.4"
+    heimdalljs "^0.2.1"
+    heimdalljs-logger "^0.1.7"
+    rimraf "^2.4.3"
+    symlink-or-copy "^1.0.0"
+
+broccoli-persistent-filter@^1.0.1, broccoli-persistent-filter@^1.0.3, broccoli-persistent-filter@^1.1.6:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/broccoli-persistent-filter/-/broccoli-persistent-filter-1.3.1.tgz#d02556a135c77dfb859bba7844bc3539be7168e1"
+  dependencies:
+    async-disk-cache "^1.2.1"
+    broccoli-plugin "^1.0.0"
+    fs-tree-diff "^0.5.2"
+    hash-for-dep "^1.0.2"
+    heimdalljs "^0.2.1"
+    heimdalljs-logger "^0.1.7"
+    md5-hex "^1.0.2"
+    mkdirp "^0.5.1"
+    promise-map-series "^0.2.1"
+    rimraf "^2.6.1"
+    rsvp "^3.0.18"
+    symlink-or-copy "^1.0.1"
+    walk-sync "^0.3.1"
+
+broccoli-plugin@1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-plugin/-/broccoli-plugin-1.1.0.tgz#73e2cfa05f8ea1e3fc1420c40c3d9e7dc724bf02"
+  dependencies:
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.3"
+    rimraf "^2.3.4"
+    symlink-or-copy "^1.0.1"
+
+broccoli-plugin@^1.0.0, broccoli-plugin@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/broccoli-plugin/-/broccoli-plugin-1.3.0.tgz#bee704a8e42da08cb58e513aaa436efb7f0ef1ee"
+  dependencies:
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.3"
+    rimraf "^2.3.4"
+    symlink-or-copy "^1.1.8"
+
+broccoli-sane-watcher@^1.0.1:
+  version "1.1.5"
+  resolved "https://registry.yarnpkg.com/broccoli-sane-watcher/-/broccoli-sane-watcher-1.1.5.tgz#f2b0af9cf0afb74c7a49cd88eb11c6869ee8c0c0"
+  dependencies:
+    broccoli-slow-trees "^1.1.0"
+    debug "^2.1.0"
+    rsvp "^3.0.18"
+    sane "^1.1.1"
+
+broccoli-sass@^0.6.3:
+  version "0.6.8"
+  resolved "https://registry.yarnpkg.com/broccoli-sass/-/broccoli-sass-0.6.8.tgz#006f1e45c160b09b49d6266b84532c370eda0100"
+  dependencies:
+    broccoli-caching-writer "^2.0.4"
+    mkdirp "^0.3.5"
+    node-sass "^3.3.3"
+
+broccoli-slow-trees@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-slow-trees/-/broccoli-slow-trees-1.1.0.tgz#426c5724e008107e4573f73e8a9ca702916b78f7"
+
+broccoli-sourcemap-concat@^0.4.3:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/broccoli-sourcemap-concat/-/broccoli-sourcemap-concat-0.4.4.tgz#cab6e8c7ca0a8033fb28cb811dfaf43a17254466"
+  dependencies:
+    broccoli-caching-writer "0.5.3"
+    broccoli-kitchen-sink-helpers "^0.2.5"
+    broccoli-writer "^0.1.1"
+    combined-stream "0.0.7"
+    fast-sourcemap-concat " ^0.2.4"
+    lodash-node "^2.4.1"
+    mkdirp "^0.5.0"
+    rsvp "^3.0.14"
+
+broccoli-stew@^1.0.0:
+  version "1.4.2"
+  resolved "https://registry.yarnpkg.com/broccoli-stew/-/broccoli-stew-1.4.2.tgz#9ec4062fd7162c6026561a2fbf64558363aff8d6"
+  dependencies:
+    broccoli-funnel "^1.0.1"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-persistent-filter "^1.1.6"
+    broccoli-plugin "^1.3.0"
+    chalk "^1.1.3"
+    debug "^2.4.0"
+    ensure-posix-path "^1.0.1"
+    fs-extra "^2.0.0"
+    minimatch "^3.0.2"
+    resolve "^1.1.6"
+    rsvp "^3.0.16"
+    sanitize-filename "^1.5.3"
+    symlink-or-copy "^1.1.8"
+    walk-sync "^0.3.0"
+
+broccoli-uglify-sourcemap@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/broccoli-uglify-sourcemap/-/broccoli-uglify-sourcemap-0.2.1.tgz#98df2c99f32e944be0949514655a8fe100248b7f"
+  dependencies:
+    broccoli-writer "^0.1.1"
+    lodash-node "^2.4.1"
+    mkdirp "^0.5.0"
+    source-map-url "^0.3.0"
+    symlink-or-copy "^1.0.1"
+    uglify-js "^2.4.16"
+    walk-sync "^0.1.3"
+
+broccoli-unwatched-tree@0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/broccoli-unwatched-tree/-/broccoli-unwatched-tree-0.1.1.tgz#4312fde04bdafe67a05a967d72cc50b184a9f514"
+
+broccoli-writer@0.1.1, broccoli-writer@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/broccoli-writer/-/broccoli-writer-0.1.1.tgz#d4d71aa8f2afbc67a3866b91a2da79084b96ab2d"
+  dependencies:
+    quick-temp "^0.1.0"
+    rsvp "^3.0.6"
+
+broccoli@0.15.3:
+  version "0.15.3"
+  resolved "https://registry.yarnpkg.com/broccoli/-/broccoli-0.15.3.tgz#3692c7723e07682ba19c40d43a6062d794de84b4"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.2.0"
+    broccoli-slow-trees "^1.1.0"
+    commander "^2.0.0"
+    connect "^3.2.0"
+    copy-dereference "^1.0.0"
+    findup-sync "^0.1.2"
+    handlebars "^2.0.0"
+    mime "^1.2.11"
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.2"
+    rimraf "^2.2.8"
+    rsvp "^3.0.6"
+    tiny-lr "^0.1.4"
+
+browserslist@~0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-0.4.0.tgz#3bd4ab9199dc1b9150d4d6dba4d9d3aabbc86dd4"
+  dependencies:
+    caniuse-db "^1.0.30000153"
+
+bser@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/bser/-/bser-1.0.2.tgz#381116970b2a6deea5646dd15dd7278444b56169"
+  dependencies:
+    node-int64 "^0.4.0"
+
+buffer-equal@~0.0.0:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/buffer-equal/-/buffer-equal-0.0.2.tgz#ecbb790f568d40098a6242b54805c75805eb938f"
+
+buffer-shims@^1.0.0, buffer-shims@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/buffer-shims/-/buffer-shims-1.0.0.tgz#9978ce317388c649ad8793028c3477ef044a8b51"
+
+builtin-modules@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/builtin-modules/-/builtin-modules-1.1.1.tgz#270f076c5a72c02f5b65a47df94c5fe3a278892f"
+
+builtins@0.0.7:
+  version "0.0.7"
+  resolved "https://registry.yarnpkg.com/builtins/-/builtins-0.0.7.tgz#355219cd6cf18dbe7c01cc7fd2dce765cfdc549a"
+
+builtins@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/builtins/-/builtins-1.0.3.tgz#cb94faeb61c8696451db36534e1422f94f0aee88"
+
+bunker@0.1.X:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/bunker/-/bunker-0.1.2.tgz#c88992464a8e2a6ede86930375f92b58077ef97c"
+  dependencies:
+    burrito ">=0.2.5 <0.3"
+
+"burrito@>=0.2.5 <0.3":
+  version "0.2.12"
+  resolved "https://registry.yarnpkg.com/burrito/-/burrito-0.2.12.tgz#d0d6e6ac81d5e99789c6fa4accb0b0031ea54f6b"
+  dependencies:
+    traverse "~0.5.1"
+    uglify-js "~1.1.1"
+
+bytes@1, bytes@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-1.0.0.tgz#3569ede8ba34315fab99c3e92cb04c7220de1fa8"
+
+bytes@2.4.0:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.4.0.tgz#7d97196f9d5baf7f6935e25985549edd2a6c2339"
+
+callsite@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/callsite/-/callsite-1.0.0.tgz#280398e5d664bd74038b6f0905153e6e8af1bc20"
+
+camelcase-keys@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/camelcase-keys/-/camelcase-keys-2.1.0.tgz#308beeaffdf28119051efa1d932213c91b8f92e7"
+  dependencies:
+    camelcase "^2.0.0"
+    map-obj "^1.0.0"
+
+camelcase@^1.0.2, camelcase@^1.2.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-1.2.1.tgz#9bb5304d2e0b56698b2c758b08a3eaa9daa58a39"
+
+camelcase@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-2.1.1.tgz#7c1d16d679a1bbe59ca02cacecfb011e201f5a1f"
+
+camelcase@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-3.0.0.tgz#32fc4b9fcdaf845fcdf7e73bb97cac2261f0ab0a"
+
+can-symlink@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/can-symlink/-/can-symlink-1.0.0.tgz#97b607d8a84bb6c6e228b902d864ecb594b9d219"
+  dependencies:
+    tmp "0.0.28"
+
+caniuse-db@^1.0.30000153, caniuse-db@^1.0.30000214:
+  version "1.0.30000664"
+  resolved "https://registry.yarnpkg.com/caniuse-db/-/caniuse-db-1.0.30000664.tgz#e16316e5fdabb9c7209b2bf0744ffc8a14201f22"
+
+cardinal@^0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/cardinal/-/cardinal-0.5.0.tgz#00d5f661dbd4aabfdf7d41ce48a5a59bca35a291"
+  dependencies:
+    ansicolors "~0.2.1"
+    redeyed "~0.5.0"
+
+caseless@~0.11.0:
+  version "0.11.0"
+  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.11.0.tgz#715b96ea9841593cc33067923f5ec60ebda4f7d7"
+
+caseless@~0.12.0:
+  version "0.12.0"
+  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc"
+
+center-align@^0.1.1:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/center-align/-/center-align-0.1.3.tgz#aa0d32629b6ee972200411cbd4461c907bc2b7ad"
+  dependencies:
+    align-text "^0.1.3"
+    lazy-cache "^1.0.3"
+
+chalk@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.0.0.tgz#b3cf4ed0ff5397c99c75b8f679db2f52831f96dc"
+  dependencies:
+    ansi-styles "^2.0.1"
+    escape-string-regexp "^1.0.2"
+    has-ansi "^1.0.3"
+    strip-ansi "^2.0.1"
+    supports-color "^1.3.0"
+
+chalk@^0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-0.5.1.tgz#663b3a648b68b55d04690d49167aa837858f2174"
+  dependencies:
+    ansi-styles "^1.1.0"
+    escape-string-regexp "^1.0.0"
+    has-ansi "^0.1.0"
+    strip-ansi "^0.3.0"
+    supports-color "^0.2.0"
+
+chalk@^1.0.0, chalk@^1.1.1, chalk@^1.1.3:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98"
+  dependencies:
+    ansi-styles "^2.2.1"
+    escape-string-regexp "^1.0.2"
+    has-ansi "^2.0.0"
+    strip-ansi "^3.0.0"
+    supports-color "^2.0.0"
+
+chalk@~0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-0.4.0.tgz#5199a3ddcd0c1efe23bc08c1b027b06176e0c64f"
+  dependencies:
+    ansi-styles "~1.0.0"
+    has-color "~0.1.0"
+    strip-ansi "~0.1.0"
+
+char-spinner@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/char-spinner/-/char-spinner-1.0.1.tgz#e6ea67bd247e107112983b7ab0479ed362800081"
+
+charm@0.1.x:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/charm/-/charm-0.1.2.tgz#06c21eed1a1b06aeb67553cdc53e23274bac2296"
+
+charm@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/charm/-/charm-1.0.2.tgz#8add367153a6d9a581331052c4090991da995e35"
+  dependencies:
+    inherits "^2.0.1"
+
+chmodr@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/chmodr/-/chmodr-1.0.2.tgz#04662b932d0f02ec66deaa2b0ea42811968e3eb9"
+
+chownr@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.0.1.tgz#e2a75042a9551908bebd25b8523d5f9769d79181"
+
+clean-css@^2.2.1:
+  version "2.2.23"
+  resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-2.2.23.tgz#0590b5478b516c4903edc2d89bd3fdbdd286328c"
+  dependencies:
+    commander "2.2.x"
+
+cli-color@~0.3.2:
+  version "0.3.3"
+  resolved "https://registry.yarnpkg.com/cli-color/-/cli-color-0.3.3.tgz#12d5bdd158ff8a0b0db401198913c03df069f6f5"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.6"
+    memoizee "~0.3.8"
+    timers-ext "0.1"
+
+cli-table@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/cli-table/-/cli-table-0.3.1.tgz#f53b05266a8b1a0b934b3d0821e6e2dc5914ae23"
+  dependencies:
+    colors "1.0.3"
+
+cli@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/cli/-/cli-1.0.1.tgz#22817534f24bfa4950c34d532d48ecbc621b8c14"
+  dependencies:
+    exit "0.1.2"
+    glob "^7.1.1"
+
+cliui@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/cliui/-/cliui-2.1.0.tgz#4b475760ff80264c762c3a1719032e91c7fea0d1"
+  dependencies:
+    center-align "^0.1.1"
+    right-align "^0.1.1"
+    wordwrap "0.0.2"
+
+cliui@^3.2.0:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/cliui/-/cliui-3.2.0.tgz#120601537a916d29940f934da3b48d585a39213d"
+  dependencies:
+    string-width "^1.0.1"
+    strip-ansi "^3.0.1"
+    wrap-ansi "^2.0.0"
+
+clone@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/clone/-/clone-0.2.0.tgz#c6126a90ad4f72dbf5acdb243cc37724fe93fc1f"
+
+clone@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/clone/-/clone-1.0.2.tgz#260b7a99ebb1edfe247538175f783243cb19d149"
+
+clone@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/clone/-/clone-2.1.1.tgz#d217d1e961118e3ac9a4b8bba3285553bf647cdb"
+
+cmd-shim@~2.0.2:
+  version "2.0.2"
+  resolved "https://registry.yarnpkg.com/cmd-shim/-/cmd-shim-2.0.2.tgz#6fcbda99483a8fd15d7d30a196ca69d688a2efdb"
+  dependencies:
+    graceful-fs "^4.1.2"
+    mkdirp "~0.5.0"
+
+co@^4.6.0:
+  version "4.6.0"
+  resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184"
+
+code-point-at@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77"
+
+colors@1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/colors/-/colors-1.0.3.tgz#0433f44d809680fdeb60ed260f1b0c262e82a40b"
+
+colors@^1.0.3:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/colors/-/colors-1.1.2.tgz#168a4701756b6a7f51a12ce0c97bfa28c084ed63"
+
+colors@~0.6.0-1:
+  version "0.6.2"
+  resolved "https://registry.yarnpkg.com/colors/-/colors-0.6.2.tgz#2423fe6678ac0c5dae8852e5d0e5be08c997abcc"
+
+columnify@~1.5.4:
+  version "1.5.4"
+  resolved "https://registry.yarnpkg.com/columnify/-/columnify-1.5.4.tgz#4737ddf1c7b69a8a7c340570782e947eec8e78bb"
+  dependencies:
+    strip-ansi "^3.0.0"
+    wcwidth "^1.0.0"
+
+combined-stream@0.0.7:
+  version "0.0.7"
+  resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-0.0.7.tgz#0137e657baa5a7541c57ac37ac5fc07d73b4dc1f"
+  dependencies:
+    delayed-stream "0.0.5"
+
+combined-stream@^1.0.5, combined-stream@~1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.5.tgz#938370a57b4a51dea2c77c15d5c5fdf895164009"
+  dependencies:
+    delayed-stream "~1.0.0"
+
+commander@2.2.x:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.2.0.tgz#175ad4b9317f3ff615f201c1e57224f55a3e91df"
+
+commander@^2.0.0, commander@^2.5.0, commander@^2.6.0, commander@^2.9.0:
+  version "2.9.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.9.0.tgz#9c99094176e12240cb22d6c5146098400fe0f7d4"
+  dependencies:
+    graceful-readlink ">= 1.0.0"
+
+commander@~2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.1.0.tgz#d121bbae860d9992a3d517ba96f56588e47c6781"
+
+commoner@~0.10.3:
+  version "0.10.8"
+  resolved "https://registry.yarnpkg.com/commoner/-/commoner-0.10.8.tgz#34fc3672cd24393e8bb47e70caa0293811f4f2c5"
+  dependencies:
+    commander "^2.5.0"
+    detective "^4.3.1"
+    glob "^5.0.15"
+    graceful-fs "^4.1.2"
+    iconv-lite "^0.4.5"
+    mkdirp "^0.5.0"
+    private "^0.1.6"
+    q "^1.1.2"
+    recast "^0.11.17"
+
+component-bind@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/component-bind/-/component-bind-1.0.0.tgz#00c608ab7dcd93897c0009651b1d3a8e1e73bbd1"
+
+component-emitter@1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.1.2.tgz#296594f2753daa63996d2af08d15a95116c9aec3"
+
+component-emitter@1.2.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.2.1.tgz#137918d6d78283f7df7a6b7c5a63e140e69425e6"
+
+component-inherit@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/component-inherit/-/component-inherit-0.0.3.tgz#645fc4adf58b72b649d5cae65135619db26ff143"
+
+concat-map@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b"
+
+concat-stream@^1.4.7, concat-stream@^1.5.2:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.6.0.tgz#0aac662fd52be78964d5532f694784e70110acf7"
+  dependencies:
+    inherits "^2.0.3"
+    readable-stream "^2.2.2"
+    typedarray "^0.0.6"
+
+config-chain@~1.1.10:
+  version "1.1.11"
+  resolved "https://registry.yarnpkg.com/config-chain/-/config-chain-1.1.11.tgz#aba09747dfbe4c3e70e766a6e41586e1859fc6f2"
+  dependencies:
+    ini "^1.3.4"
+    proto-list "~1.2.1"
+
+configstore@0.3.2:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/configstore/-/configstore-0.3.2.tgz#25e4c16c3768abf75c5a65bc61761f495055b459"
+  dependencies:
+    graceful-fs "^3.0.1"
+    js-yaml "^3.1.0"
+    mkdirp "^0.5.0"
+    object-assign "^2.0.0"
+    osenv "^0.1.0"
+    user-home "^1.0.0"
+    uuid "^2.0.1"
+    xdg-basedir "^1.0.0"
+
+connect@^3.2.0:
+  version "3.6.1"
+  resolved "https://registry.yarnpkg.com/connect/-/connect-3.6.1.tgz#b7760693a74f0454face1d9378edb3f885b43227"
+  dependencies:
+    debug "2.6.3"
+    finalhandler "1.0.1"
+    parseurl "~1.3.1"
+    utils-merge "1.0.0"
+
+console-browserify@1.1.x:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/console-browserify/-/console-browserify-1.1.0.tgz#f0241c45730a9fc6323b206dbf38edc741d0bb10"
+  dependencies:
+    date-now "^0.1.4"
+
+console-control-strings@^1.0.0, console-control-strings@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/console-control-strings/-/console-control-strings-1.1.0.tgz#3d7cf4464db6446ea644bf4b39507f9851008e8e"
+
+consolidate@^0.11.0:
+  version "0.11.0"
+  resolved "https://registry.yarnpkg.com/consolidate/-/consolidate-0.11.0.tgz#838175806628569c360f6d7f618ae41fdfd73433"
+
+content-disposition@0.5.2:
+  version "0.5.2"
+  resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.2.tgz#0cf68bb9ddf5f2be7961c3a85178cb85dba78cb4"
+
+content-type@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.2.tgz#b7d113aee7a8dd27bd21133c4dc2529df1721eed"
+
+convert-source-map@^1.1.0:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.5.0.tgz#9acd70851c6d5dfdd93d9282e5edf94a03ff46b5"
+
+cookie-signature@1.0.6:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c"
+
+cookie@0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.3.1.tgz#e7e0a1f9ef43b4c8ba925c5c5a96e806d16873bb"
+
+copy-dereference@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/copy-dereference/-/copy-dereference-1.0.0.tgz#6b131865420fd81b413ba994b44d3655311152b6"
+
+core-js@^1.0.0:
+  version "1.2.7"
+  resolved "https://registry.yarnpkg.com/core-js/-/core-js-1.2.7.tgz#652294c14651db28fa93bd2d5ff2983a4f08c636"
+
+core-object@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/core-object/-/core-object-0.0.2.tgz#c9a6fee8f712e281fa9f6fba10243409ea2debc3"
+  dependencies:
+    lodash-node "^2.4.1"
+
+core-object@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/core-object/-/core-object-0.0.3.tgz#550821598b258a33a8c2263afa072b0df5295321"
+  dependencies:
+    lodash-node "^2.4.1"
+
+core-object@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/core-object/-/core-object-1.1.0.tgz#86d63918733cf9da1a5aae729e62c0a88e66ad0a"
+
+core-util-is@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7"
+
+cross-spawn@^0.2.6:
+  version "0.2.9"
+  resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-0.2.9.tgz#bd67f96c07efb6303b7fe94c1e979f88478e0a39"
+  dependencies:
+    lru-cache "^2.5.0"
+
+cross-spawn@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-3.0.1.tgz#1256037ecb9f0c5f79e3d6ef135e30770184b982"
+  dependencies:
+    lru-cache "^4.0.1"
+    which "^1.2.9"
+
+cryptiles@2.x.x:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/cryptiles/-/cryptiles-2.0.5.tgz#3bdfecdc608147c1c67202fa291e7dca59eaa3b8"
+  dependencies:
+    boom "2.x.x"
+
+currently-unhandled@^0.4.1:
+  version "0.4.1"
+  resolved "https://registry.yarnpkg.com/currently-unhandled/-/currently-unhandled-0.4.1.tgz#988df33feab191ef799a61369dd76c17adf957ea"
+  dependencies:
+    array-find-index "^1.0.1"
+
+d@1:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/d/-/d-1.0.0.tgz#754bb5bfe55451da69a58b94d45f4c5b0462d58f"
+  dependencies:
+    es5-ext "^0.10.9"
+
+d@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/d/-/d-0.1.1.tgz#da184c535d18d8ee7ba2aa229b914009fae11309"
+  dependencies:
+    es5-ext "~0.10.2"
+
+dashdash@^1.12.0:
+  version "1.14.1"
+  resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0"
+  dependencies:
+    assert-plus "^1.0.0"
+
+date-now@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/date-now/-/date-now-0.1.4.tgz#eaf439fd4d4848ad74e5cc7dbef200672b9e345b"
+
+debug@2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.2.0.tgz#f87057e995b1a1f6ae6a4960664137bc56f039da"
+  dependencies:
+    ms "0.7.1"
+
+debug@2.3.3:
+  version "2.3.3"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.3.3.tgz#40c453e67e6e13c901ddec317af8986cda9eff8c"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.1, debug@^2.1.0, debug@^2.1.1, debug@^2.1.3, debug@^2.2.0, debug@^2.4.0:
+  version "2.6.1"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.1.tgz#79855090ba2c4e3115cc7d8769491d58f0491351"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.3:
+  version "2.6.3"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.3.tgz#0f7eb8c30965ec08c72accfa0130c8b79984141d"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.4:
+  version "2.6.4"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.4.tgz#7586a9b3c39741c0282ae33445c4e8ac74734fe0"
+  dependencies:
+    ms "0.7.3"
+
+debug@~2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.0.0.tgz#89bd9df6732b51256bc6705342bba02ed12131ef"
+  dependencies:
+    ms "0.6.2"
+
+debuglog@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/debuglog/-/debuglog-1.0.1.tgz#aa24ffb9ac3df9a2351837cfb2d279360cd78492"
+
+decamelize@^1.0.0, decamelize@^1.1.1, decamelize@^1.1.2:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290"
+
+deep-equal@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/deep-equal/-/deep-equal-1.0.1.tgz#f5d260292b660e084eff4cdbc9f08ad3247448b5"
+
+deep-is@0.1.x:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.3.tgz#b369d6fb5dbc13eecf524f91b070feedc357cf34"
+
+defaults@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/defaults/-/defaults-1.0.3.tgz#c656051e9817d9ff08ed881477f3fe4019f3ef7d"
+  dependencies:
+    clone "^1.0.2"
+
+defined@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/defined/-/defined-1.0.0.tgz#c98d9bcef75674188e110969151199e39b1fa693"
+
+defs@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/defs/-/defs-1.1.1.tgz#b22609f2c7a11ba7a3db116805c139b1caffa9d2"
+  dependencies:
+    alter "~0.2.0"
+    ast-traverse "~0.1.1"
+    breakable "~1.0.0"
+    esprima-fb "~15001.1001.0-dev-harmony-fb"
+    simple-fmt "~0.1.0"
+    simple-is "~0.2.0"
+    stringmap "~0.2.2"
+    stringset "~0.2.1"
+    tryor "~0.1.2"
+    yargs "~3.27.0"
+
+delayed-stream@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-0.0.5.tgz#d4b1f43a93e8296dfe02694f4680bc37a313c73f"
+
+delayed-stream@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619"
+
+delegates@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a"
+
+depd@0.4.5:
+  version "0.4.5"
+  resolved "https://registry.yarnpkg.com/depd/-/depd-0.4.5.tgz#1a664b53388b4a6573e8ae67b5f767c693ca97f1"
+
+depd@1.1.0, depd@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.0.tgz#e1bd82c6aab6ced965b97b88b17ed3e528ca18c3"
+
+destroy@~1.0.4:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.0.4.tgz#978857442c44749e4206613e37946205826abd80"
+
+detect-indent@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/detect-indent/-/detect-indent-3.0.1.tgz#9dc5e5ddbceef8325764b9451b02bc6d54084f75"
+  dependencies:
+    get-stdin "^4.0.1"
+    minimist "^1.1.0"
+    repeating "^1.1.0"
+
+detective@^4.3.1:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/detective/-/detective-4.5.0.tgz#6e5a8c6b26e6c7a254b1c6b6d7490d98ec91edd1"
+  dependencies:
+    acorn "^4.0.3"
+    defined "^1.0.0"
+
+dezalgo@^1.0.0, dezalgo@^1.0.1, dezalgo@^1.0.2, dezalgo@~1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/dezalgo/-/dezalgo-1.0.3.tgz#7f742de066fc748bc8db820569dddce49bf0d456"
+  dependencies:
+    asap "^2.0.0"
+    wrappy "1"
+
+did_it_work@0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/did_it_work/-/did_it_work-0.0.6.tgz#5180cb9e16ebf9a8753a0cc6b4af9ccdff71ec05"
+
+diff@^1.3.1:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/diff/-/diff-1.4.0.tgz#7f28d2eb9ee7b15a97efd89ce63dcfdaa3ccbabf"
+
+difflet@~0.2.0:
+  version "0.2.6"
+  resolved "https://registry.yarnpkg.com/difflet/-/difflet-0.2.6.tgz#ab23b31f5649b6faa8e3d2acbd334467365ca6fa"
+  dependencies:
+    charm "0.1.x"
+    deep-is "0.1.x"
+    traverse "0.6.x"
+
+dom-serializer@0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-0.1.0.tgz#073c697546ce0780ce23be4a28e293e40bc30c82"
+  dependencies:
+    domelementtype "~1.1.1"
+    entities "~1.1.1"
+
+domelementtype@1:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.3.0.tgz#b17aed82e8ab59e52dd9c19b1756e0fc187204c2"
+
+domelementtype@~1.1.1:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.1.3.tgz#bd28773e2642881aec51544924299c5cd822185b"
+
+domhandler@2.3:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-2.3.0.tgz#2de59a0822d5027fabff6f032c2b25a2a8abe738"
+  dependencies:
+    domelementtype "1"
+
+domutils@1.5:
+  version "1.5.1"
+  resolved "https://registry.yarnpkg.com/domutils/-/domutils-1.5.1.tgz#dcd8488a26f563d61079e48c9f7b7e32373682cf"
+  dependencies:
+    dom-serializer "0"
+    domelementtype "1"
+
+ecc-jsbn@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.1.tgz#0fc73a9ed5f0d53c38193398523ef7e543777505"
+  dependencies:
+    jsbn "~0.1.0"
+
+editions@^1.1.1:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/editions/-/editions-1.3.3.tgz#0907101bdda20fac3cbe334c27cbd0688dc99a5b"
+
+editor@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/editor/-/editor-1.0.0.tgz#60c7f87bd62bcc6a894fa8ccd6afb7823a24f742"
+
+ee-first@1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.0.5.tgz#8c9b212898d8cd9f1a9436650ce7be202c9e9ff0"
+
+ee-first@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d"
+
+ember-ajax@^2.0.1:
+  version "2.5.6"
+  resolved "https://registry.yarnpkg.com/ember-ajax/-/ember-ajax-2.5.6.tgz#a75f743ccf1b95e979a5cf96013b3dba8fa625e4"
+  dependencies:
+    ember-cli-babel "^5.1.5"
+
+ember-cli-autoprefixer@0.4.1:
+  version "0.4.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-autoprefixer/-/ember-cli-autoprefixer-0.4.1.tgz#cc1a8a8fd28ab3784cd6423144d9f119a3cfc12b"
+  dependencies:
+    broccoli-autoprefixer "^3.0.0"
+    lodash "^3.1.0"
+
+ember-cli-babel@^5.0.0, ember-cli-babel@^5.1.10, ember-cli-babel@^5.1.5, ember-cli-babel@^5.1.6, ember-cli-babel@^5.1.7:
+  version "5.2.4"
+  resolved "https://registry.yarnpkg.com/ember-cli-babel/-/ember-cli-babel-5.2.4.tgz#5ce4f46b08ed6f6d21e878619fb689719d6e8e13"
+  dependencies:
+    broccoli-babel-transpiler "^5.6.2"
+    broccoli-funnel "^1.0.0"
+    clone "^2.0.0"
+    ember-cli-version-checker "^1.0.2"
+    resolve "^1.1.2"
+
+ember-cli-blanket@^0.5.0:
+  version "0.5.4"
+  resolved "https://registry.yarnpkg.com/ember-cli-blanket/-/ember-cli-blanket-0.5.4.tgz#191ce3f7a4c29cc7a8e46eca06961e1627495444"
+  dependencies:
+    body-parser "^1.12.2"
+    broccoli-funnel "^0.2.2"
+    broccoli-merge-trees "^0.2.1"
+    core-object "^1.1.0"
+    fs-extra "^0.18.0"
+    lodash "^3.6.0"
+
+ember-cli-content-security-policy@0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-content-security-policy/-/ember-cli-content-security-policy-0.3.0.tgz#c937f855db2f6fdd0e6daca4017d77e150c01f5d"
+
+ember-cli-copy-dereference@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-copy-dereference/-/ember-cli-copy-dereference-1.0.0.tgz#a1795bf6c70650317df4ab8674dd02e0bea5d4fd"
+
+ember-cli-htmlbars@0.7.4:
+  version "0.7.4"
+  resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-0.7.4.tgz#89ebcd7a3370ee48d8d35f0ede92811936873d13"
+  dependencies:
+    broccoli-filter "^0.1.6"
+    ember-cli-version-checker "^1.0.2"
+
+ember-cli-htmlbars@^1.0.3:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-1.3.0.tgz#e090f011239153bf45dab29625f94a46fce205af"
+  dependencies:
+    broccoli-persistent-filter "^1.0.3"
+    ember-cli-version-checker "^1.0.2"
+    hash-for-dep "^1.0.2"
+    json-stable-stringify "^1.0.0"
+    strip-bom "^2.0.0"
+
+ember-cli-inject-live-reload@^1.3.0:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-inject-live-reload/-/ember-cli-inject-live-reload-1.6.1.tgz#82b8f5be454815a75e7f6d42c9ce0bc883a914a3"
+
+ember-cli-jquery-ui@0.0.12:
+  version "0.0.12"
+  resolved "https://registry.yarnpkg.com/ember-cli-jquery-ui/-/ember-cli-jquery-ui-0.0.12.tgz#e054cf4c273dcd97e0898b8b2baa0b9b68e40967"
+
+ember-cli-moment-shim@3.0.1:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-moment-shim/-/ember-cli-moment-shim-3.0.1.tgz#c67c39f71c1c5a38bcc93bfa65b14e25f069895f"
+  dependencies:
+    broccoli-funnel "^1.0.0"
+    broccoli-merge-trees "^1.1.5"
+    broccoli-stew "^1.0.0"
+    chalk "^1.1.1"
+    ember-cli-babel "^5.0.0"
+    exists-sync "0.0.4"
+    lodash.defaults "^4.1.0"
+    moment "^2.13.0"
+    moment-timezone "^0.5.0"
+
+ember-cli-pretender@^0.3.1:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/ember-cli-pretender/-/ember-cli-pretender-0.3.2.tgz#851c6a1dfbae37f2a36916b2228b1382493554eb"
+
+ember-cli-qunit@0.3.14:
+  version "0.3.14"
+  resolved "https://registry.yarnpkg.com/ember-cli-qunit/-/ember-cli-qunit-0.3.14.tgz#23241dc141b029058d13907b6b041e91262302e7"
+  dependencies:
+    broccoli-jshint "0.5.6"
+
+ember-cli-selectize@0.0.19:
+  version "0.0.19"
+  resolved "https://registry.yarnpkg.com/ember-cli-selectize/-/ember-cli-selectize-0.0.19.tgz#16286cd4ec170cb3194d7384b262db79cc473b42"
+
+ember-cli-uglify@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-uglify/-/ember-cli-uglify-1.0.1.tgz#278a7ce0d19aa58a430d9804ebb54f61cfbd0998"
+  dependencies:
+    broccoli-uglify-sourcemap "^0.2.1"
+
+ember-cli-uploader@^0.3.9:
+  version "0.3.11"
+  resolved "https://registry.yarnpkg.com/ember-cli-uploader/-/ember-cli-uploader-0.3.11.tgz#d2c2dd4631013a29a1b30d9bed1c6050e999ad4d"
+
+ember-cli-version-checker@^1.0.2:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-version-checker/-/ember-cli-version-checker-1.3.1.tgz#0bc2d134c830142da64bf9627a0eded10b61ae72"
+  dependencies:
+    semver "^5.3.0"
+
+ember-cli@0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/ember-cli/-/ember-cli-0.2.2.tgz#4234ed4a8c6cc17e7432c652a704841787cd3944"
+  dependencies:
+    abbrev "^1.0.5"
+    bower "^1.3.12"
+    bower-config "0.5.2"
+    broccoli "0.15.3"
+    broccoli-caching-writer "0.5.5"
+    broccoli-clean-css "0.2.0"
+    broccoli-es3-safe-recast "^2.0.0"
+    broccoli-es6modules "^0.5.1"
+    broccoli-filter "0.1.12"
+    broccoli-funnel "0.2.2"
+    broccoli-kitchen-sink-helpers "0.2.6"
+    broccoli-merge-trees "0.2.1"
+    broccoli-sane-watcher "^1.0.1"
+    broccoli-sourcemap-concat "^0.4.3"
+    broccoli-unwatched-tree "0.1.1"
+    broccoli-writer "0.1.1"
+    chalk "1.0.0"
+    concat-stream "^1.4.7"
+    configstore "0.3.2"
+    core-object "0.0.2"
+    debug "^2.1.3"
+    diff "^1.3.1"
+    ember-cli-copy-dereference "^1.0.0"
+    ember-router-generator "^0.3.2"
+    exit "^0.1.2"
+    express "^4.12.3"
+    findup "0.1.5"
+    fs-extra "0.16.5"
+    git-repo-info "^1.0.4"
+    glob "5.0.3"
+    http-proxy "^1.9.0"
+    inflection "^1.7.0"
+    inquirer "0.5.1"
+    js-string-escape "^1.0.0"
+    leek "0.0.18"
+    lodash "^3.6.0"
+    markdown-it "4.0.3"
+    markdown-it-terminal "0.0.2"
+    minimatch "^2.0.4"
+    morgan "^1.5.2"
+    node-uuid "^1.4.3"
+    nopt "^3.0.1"
+    npm "^2.7.3"
+    pleasant-progress "^1.0.2"
+    promise-map-series "^0.2.1"
+    proxy-middleware "0.11.0"
+    quick-temp "0.1.2"
+    readline2 "0.1.1"
+    resolve "^1.1.6"
+    rimraf "2.3.2"
+    rsvp "^3.0.17"
+    sane "1.0.1"
+    semver "^4.3.1"
+    strip-ansi "^2.0.1"
+    symlink-or-copy "^1.0.1"
+    temp "0.8.1"
+    testem "^0.7.6"
+    through "^2.3.6"
+    tiny-lr "0.1.5"
+    walk-sync "0.1.3"
+    yam "0.0.18"
+
+ember-computed-decorators@0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/ember-computed-decorators/-/ember-computed-decorators-0.2.2.tgz#7c934a575c55ac3a18b6aaeb7cd2cbe149bc9b34"
+  dependencies:
+    ember-cli-babel "^5.1.5"
+
+ember-data@1.0.0-beta.16.1:
+  version "1.0.0-beta.16.1"
+  resolved "https://registry.yarnpkg.com/ember-data/-/ember-data-1.0.0-beta.16.1.tgz#0b14ba20dd374f2125bd72b8d2090eaff6d541de"
+
+ember-dynamic-component@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/ember-dynamic-component/-/ember-dynamic-component-0.0.1.tgz#f9fa692d78668dc37a52117069fa281858c6a0bf"
+
+ember-export-application-global@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ember-export-application-global/-/ember-export-application-global-1.1.1.tgz#f257d5271268932a89d7392679ce4db89d7154af"
+  dependencies:
+    ember-cli-babel "^5.1.10"
+
+ember-font-awesome@2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/ember-font-awesome/-/ember-font-awesome-2.2.0.tgz#8b7b5e4b1b5ff2b865b09331b9bf5506fb0c3c1e"
+  dependencies:
+    chalk "^1.1.3"
+    ember-cli-babel "^5.1.6"
+    ember-cli-htmlbars "^1.0.3"
+    ember-computed-decorators "0.2.2"
+
+ember-macro-helpers@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/ember-macro-helpers/-/ember-macro-helpers-0.4.0.tgz#914670001478fcccccb84819aca7630cc44526c8"
+  dependencies:
+    ember-cli-babel "^5.1.7"
+
+ember-moment@7.2.0:
+  version "7.2.0"
+  resolved "https://registry.yarnpkg.com/ember-moment/-/ember-moment-7.2.0.tgz#73202f5d62d7c645feb3abf580b3801c1beb6efd"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+    ember-macro-helpers "^0.4.0"
+
+ember-router-generator@^0.3.2:
+  version "0.3.3"
+  resolved "https://registry.yarnpkg.com/ember-router-generator/-/ember-router-generator-0.3.3.tgz#1d25b2a78bea3146d700a0393ed1dac361fe1cfa"
+  dependencies:
+    recast "^0.9.16"
+
+encodeurl@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.1.tgz#79e3d58655346909fe6f0f45a5de68103b294d20"
+
+engine.io-client@1.8.3:
+  version "1.8.3"
+  resolved "https://registry.yarnpkg.com/engine.io-client/-/engine.io-client-1.8.3.tgz#1798ed93451246453d4c6f635d7a201fe940d5ab"
+  dependencies:
+    component-emitter "1.2.1"
+    component-inherit "0.0.3"
+    debug "2.3.3"
+    engine.io-parser "1.3.2"
+    has-cors "1.1.0"
+    indexof "0.0.1"
+    parsejson "0.0.3"
+    parseqs "0.0.5"
+    parseuri "0.0.5"
+    ws "1.1.2"
+    xmlhttprequest-ssl "1.5.3"
+    yeast "0.1.2"
+
+engine.io-parser@1.3.2:
+  version "1.3.2"
+  resolved "https://registry.yarnpkg.com/engine.io-parser/-/engine.io-parser-1.3.2.tgz#937b079f0007d0893ec56d46cb220b8cb435220a"
+  dependencies:
+    after "0.8.2"
+    arraybuffer.slice "0.0.6"
+    base64-arraybuffer "0.1.5"
+    blob "0.0.4"
+    has-binary "0.1.7"
+    wtf-8 "1.0.0"
+
+engine.io@1.8.3:
+  version "1.8.3"
+  resolved "https://registry.yarnpkg.com/engine.io/-/engine.io-1.8.3.tgz#8de7f97895d20d39b85f88eeee777b2bd42b13d4"
+  dependencies:
+    accepts "1.3.3"
+    base64id "1.0.0"
+    cookie "0.3.1"
+    debug "2.3.3"
+    engine.io-parser "1.3.2"
+    ws "1.1.2"
+
+ensure-posix-path@^1.0.0, ensure-posix-path@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/ensure-posix-path/-/ensure-posix-path-1.0.2.tgz#a65b3e42d0b71cfc585eb774f9943c8d9b91b0c2"
+
+entities@1.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/entities/-/entities-1.0.0.tgz#b2987aa3821347fcde642b24fdfc9e4fb712bf26"
+
+entities@~1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/entities/-/entities-1.1.1.tgz#6e5c2d0a5621b5dadaecef80b90edfb5cd7772f0"
+
+error-ex@^1.2.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.1.tgz#f855a86ce61adc4e8621c3cda21e7a7612c3a8dc"
+  dependencies:
+    is-arrayish "^0.2.1"
+
+es-simpler-traverser@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/es-simpler-traverser/-/es-simpler-traverser-0.0.1.tgz#f969b0588f0fa35374203aec4f3d87a4e6015421"
+
+es3-safe-recast@^2.0.0:
+  version "2.0.2"
+  resolved "https://registry.yarnpkg.com/es3-safe-recast/-/es3-safe-recast-2.0.2.tgz#7bfdb3557bf03f28091348c34d6b54f69eaccf4b"
+  dependencies:
+    es-simpler-traverser "0.0.1"
+    recast "^0.9.18"
+
+es5-ext@^0.10.14, es5-ext@^0.10.9, es5-ext@~0.10.11, es5-ext@~0.10.14, es5-ext@~0.10.2, es5-ext@~0.10.5, es5-ext@~0.10.6:
+  version "0.10.15"
+  resolved "https://registry.yarnpkg.com/es5-ext/-/es5-ext-0.10.15.tgz#c330a5934c1ee21284a7c081a86e5fd937c91ea6"
+  dependencies:
+    es6-iterator "2"
+    es6-symbol "~3.1"
+
+es6-iterator@2:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/es6-iterator/-/es6-iterator-2.0.1.tgz#8e319c9f0453bf575d374940a655920e59ca5512"
+  dependencies:
+    d "1"
+    es5-ext "^0.10.14"
+    es6-symbol "^3.1"
+
+es6-iterator@~0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/es6-iterator/-/es6-iterator-0.1.3.tgz#d6f58b8c4fc413c249b4baa19768f8e4d7c8944e"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.5"
+    es6-symbol "~2.0.1"
+
+es6-promise@^2.0.0, es6-promise@~2.3.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/es6-promise/-/es6-promise-2.3.0.tgz#96edb9f2fdb01995822b263dd8aadab6748181bc"
+
+es6-symbol@^3.1, es6-symbol@~3.1:
+  version "3.1.1"
+  resolved "https://registry.yarnpkg.com/es6-symbol/-/es6-symbol-3.1.1.tgz#bf00ef4fdab6ba1b46ecb7b629b4c7ed5715cc77"
+  dependencies:
+    d "1"
+    es5-ext "~0.10.14"
+
+es6-symbol@~2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/es6-symbol/-/es6-symbol-2.0.1.tgz#761b5c67cfd4f1d18afb234f691d678682cb3bf3"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.5"
+
+es6-weak-map@~0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/es6-weak-map/-/es6-weak-map-0.1.4.tgz#706cef9e99aa236ba7766c239c8b9e286ea7d228"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.6"
+    es6-iterator "~0.1.3"
+    es6-symbol "~2.0.1"
+
+escape-html@~1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988"
+
+escape-string-regexp@^1.0.0, escape-string-regexp@^1.0.2:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4"
+
+esperanto@^0.6.8:
+  version "0.6.34"
+  resolved "https://registry.yarnpkg.com/esperanto/-/esperanto-0.6.34.tgz#fe0142fdf1e1423d0b70d22c6baacc865e80ded9"
+  dependencies:
+    acorn "^1.0.1"
+    chalk "^1.0.0"
+    magic-string "^0.4.9"
+    minimist "^1.1.0"
+    sander "^0.2.1"
+
+esprima-fb@~10001.1.0-dev-harmony-fb:
+  version "10001.1.0-dev-harmony-fb"
+  resolved "https://registry.yarnpkg.com/esprima-fb/-/esprima-fb-10001.1.0-dev-harmony-fb.tgz#f7efb452d3c8006dde6b3c59678604f7114a882c"
+
+esprima-fb@~12001.1.0-dev-harmony-fb:
+  version "12001.1.0-dev-harmony-fb"
+  resolved "https://registry.yarnpkg.com/esprima-fb/-/esprima-fb-12001.1.0-dev-harmony-fb.tgz#d84400384ba95ce2678c617ad24a7f40808da915"
+
+esprima-fb@~15001.1001.0-dev-harmony-fb:
+  version "15001.1001.0-dev-harmony-fb"
+  resolved "https://registry.yarnpkg.com/esprima-fb/-/esprima-fb-15001.1001.0-dev-harmony-fb.tgz#43beb57ec26e8cf237d3dd8b33e42533577f2659"
+
+esprima@^2.6.0:
+  version "2.7.3"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-2.7.3.tgz#96e3b70d5779f6ad49cd032673d1c312767ba581"
+
+esprima@^3.1.1, esprima@~3.1.0:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-3.1.3.tgz#fdca51cee6133895e3c88d535ce49dbff62a4633"
+
+esutils@^2.0.0:
+  version "2.0.2"
+  resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.2.tgz#0abf4f1caa5bcb1f7a9d8acc6dea4faaa04bac9b"
+
+etag@~1.8.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.0.tgz#6f631aef336d6c46362b51764044ce216be3c051"
+
+event-emitter@~0.3.4:
+  version "0.3.5"
+  resolved "https://registry.yarnpkg.com/event-emitter/-/event-emitter-0.3.5.tgz#df8c69eef1647923c7157b9ce83840610b02cc39"
+  dependencies:
+    d "1"
+    es5-ext "~0.10.14"
+
+eventemitter3@1.x.x:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-1.2.0.tgz#1c86991d816ad1e504750e73874224ecf3bec508"
+
+exec-sh@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/exec-sh/-/exec-sh-0.2.0.tgz#14f75de3f20d286ef933099b2ce50a90359cef10"
+  dependencies:
+    merge "^1.1.3"
+
+exists-sync@0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/exists-sync/-/exists-sync-0.0.4.tgz#9744c2c428cc03b01060db454d4b12f0ef3c8879"
+
+exit@0.1.2, exit@0.1.x, exit@^0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/exit/-/exit-0.1.2.tgz#0632638f8d877cc82107d30a0fff1a17cba1cd0c"
+
+expand-brackets@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-0.1.5.tgz#df07284e342a807cd733ac5af72411e581d1177b"
+  dependencies:
+    is-posix-bracket "^0.1.0"
+
+expand-range@^1.8.1:
+  version "1.8.2"
+  resolved "https://registry.yarnpkg.com/expand-range/-/expand-range-1.8.2.tgz#a299effd335fe2721ebae8e257ec79644fc85337"
+  dependencies:
+    fill-range "^2.1.0"
+
+express@^4.10.7, express@^4.12.3, express@^4.8.5:
+  version "4.15.2"
+  resolved "https://registry.yarnpkg.com/express/-/express-4.15.2.tgz#af107fc148504457f2dca9a6f2571d7129b97b35"
+  dependencies:
+    accepts "~1.3.3"
+    array-flatten "1.1.1"
+    content-disposition "0.5.2"
+    content-type "~1.0.2"
+    cookie "0.3.1"
+    cookie-signature "1.0.6"
+    debug "2.6.1"
+    depd "~1.1.0"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    etag "~1.8.0"
+    finalhandler "~1.0.0"
+    fresh "0.5.0"
+    merge-descriptors "1.0.1"
+    methods "~1.1.2"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    path-to-regexp "0.1.7"
+    proxy-addr "~1.1.3"
+    qs "6.4.0"
+    range-parser "~1.2.0"
+    send "0.15.1"
+    serve-static "1.12.1"
+    setprototypeof "1.0.3"
+    statuses "~1.3.1"
+    type-is "~1.6.14"
+    utils-merge "1.0.0"
+    vary "~1.1.0"
+
+extend@~3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.1.tgz#a755ea7bc1adfcc5a31ce7e762dbaadc5e636444"
+
+extglob@^0.3.1:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/extglob/-/extglob-0.3.2.tgz#2e18ff3d2f49ab2765cec9023f011daa8d8349a1"
+  dependencies:
+    is-extglob "^1.0.0"
+
+extsprintf@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.0.2.tgz#e1080e0658e300b06294990cc70e1502235fd550"
+
+fast-ordered-set@^1.0.0, fast-ordered-set@^1.0.2:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/fast-ordered-set/-/fast-ordered-set-1.0.3.tgz#3fbb36634f7be79e4f7edbdb4a357dee25d184eb"
+  dependencies:
+    blank-object "^1.0.1"
+
+"fast-sourcemap-concat@ ^0.2.4":
+  version "0.2.7"
+  resolved "https://registry.yarnpkg.com/fast-sourcemap-concat/-/fast-sourcemap-concat-0.2.7.tgz#b5d68a6d33e52f9d326fec38b836fa44d9b0d8fc"
+  dependencies:
+    chalk "^0.5.1"
+    debug "^2.2.0"
+    mkdirp "^0.5.0"
+    rsvp "^3.0.14"
+    source-map "^0.4.2"
+    source-map-url "^0.3.0"
+
+faye-websocket@~0.7.2:
+  version "0.7.3"
+  resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.7.3.tgz#cc4074c7f4a4dfd03af54dd65c354b135132ce11"
+  dependencies:
+    websocket-driver ">=0.3.6"
+
+fb-watchman@0.0.0:
+  version "0.0.0"
+  resolved "https://registry.yarnpkg.com/fb-watchman/-/fb-watchman-0.0.0.tgz#0a6775818dc611e306083382591c89c16f712d24"
+  dependencies:
+    json-stream "0.2.2"
+    nextback "~0.1.0"
+
+fb-watchman@^1.8.0:
+  version "1.9.2"
+  resolved "https://registry.yarnpkg.com/fb-watchman/-/fb-watchman-1.9.2.tgz#a24cf47827f82d38fb59a69ad70b76e3b6ae7383"
+  dependencies:
+    bser "1.0.2"
+
+filename-regex@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/filename-regex/-/filename-regex-2.0.1.tgz#c1c4b9bee3e09725ddb106b75c1e301fe2f18b26"
+
+fileset@^0.1.5:
+  version "0.1.8"
+  resolved "https://registry.yarnpkg.com/fileset/-/fileset-0.1.8.tgz#506b91a9396eaa7e32fb42a84077c7a0c736b741"
+  dependencies:
+    glob "3.x"
+    minimatch "0.x"
+
+fill-range@^2.1.0:
+  version "2.2.3"
+  resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-2.2.3.tgz#50b77dfd7e469bc7492470963699fe7a8485a723"
+  dependencies:
+    is-number "^2.1.0"
+    isobject "^2.0.0"
+    randomatic "^1.1.3"
+    repeat-element "^1.1.2"
+    repeat-string "^1.5.2"
+
+finalhandler@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.0.1.tgz#bcd15d1689c0e5ed729b6f7f541a6df984117db8"
+  dependencies:
+    debug "2.6.3"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    statuses "~1.3.1"
+    unpipe "~1.0.0"
+
+finalhandler@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.0.2.tgz#d0e36f9dbc557f2de14423df6261889e9d60c93a"
+  dependencies:
+    debug "2.6.4"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    statuses "~1.3.1"
+    unpipe "~1.0.0"
+
+find-up@^1.0.0:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/find-up/-/find-up-1.1.2.tgz#6b2e9822b1a2ce0a60ab64d610eccad53cb24d0f"
+  dependencies:
+    path-exists "^2.0.0"
+    pinkie-promise "^2.0.0"
+
+findup-sync@^0.1.2, findup-sync@~0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/findup-sync/-/findup-sync-0.1.3.tgz#7f3e7a97b82392c653bf06589bd85190e93c3683"
+  dependencies:
+    glob "~3.2.9"
+    lodash "~2.4.1"
+
+findup@0.1.5, findup@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/findup/-/findup-0.1.5.tgz#8ad929a3393bac627957a7e5de4623b06b0e2ceb"
+  dependencies:
+    colors "~0.6.0-1"
+    commander "~2.1.0"
+
+fireworm@^0.6.6:
+  version "0.6.6"
+  resolved "https://registry.yarnpkg.com/fireworm/-/fireworm-0.6.6.tgz#6023218e215c8ae628ac5105a60e470a50983f6f"
+  dependencies:
+    async "~0.2.9"
+    is-type "0.0.1"
+    lodash "~2.3.0"
+    minimatch "~0.2.9"
+
+for-in@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80"
+
+for-own@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/for-own/-/for-own-0.1.5.tgz#5265c681a4f294dabbf17c9509b6763aa84510ce"
+  dependencies:
+    for-in "^1.0.1"
+
+forever-agent@~0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91"
+
+form-data@~1.0.0-rc4:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-1.0.1.tgz#ae315db9a4907fa065502304a66d7733475ee37c"
+  dependencies:
+    async "^2.0.1"
+    combined-stream "^1.0.5"
+    mime-types "^2.1.11"
+
+form-data@~2.1.1:
+  version "2.1.4"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.1.4.tgz#33c183acf193276ecaa98143a69e94bfee1750d1"
+  dependencies:
+    asynckit "^0.4.0"
+    combined-stream "^1.0.5"
+    mime-types "^2.1.12"
+
+forwarded@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.1.0.tgz#19ef9874c4ae1c297bcf078fde63a09b66a84363"
+
+fresh@0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.0.tgz#f474ca5e6a9246d6fd8e0953cfa9b9c805afa78e"
+
+fs-extra@0.16.5, fs-extra@^0.16.3:
+  version "0.16.5"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.16.5.tgz#1ad661fa6c86c9608cd1b49efc6fce834939a750"
+  dependencies:
+    graceful-fs "^3.0.5"
+    jsonfile "^2.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@^0.18.0:
+  version "0.18.4"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.18.4.tgz#7f205752d6d3959c967533e34540161a7b38dc36"
+  dependencies:
+    graceful-fs "^3.0.5"
+    jsonfile "^2.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@^2.0.0:
+  version "2.1.2"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-2.1.2.tgz#046c70163cef9aad46b0e4a7fa467fb22d71de35"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+
+fs-readdir-recursive@^0.1.0:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/fs-readdir-recursive/-/fs-readdir-recursive-0.1.2.tgz#315b4fb8c1ca5b8c47defef319d073dad3568059"
+
+fs-tree-diff@^0.5.2, fs-tree-diff@^0.5.3, fs-tree-diff@^0.5.4:
+  version "0.5.6"
+  resolved "https://registry.yarnpkg.com/fs-tree-diff/-/fs-tree-diff-0.5.6.tgz#342665749e8dca406800b672268c8f5073f3e623"
+  dependencies:
+    heimdalljs-logger "^0.1.7"
+    object-assign "^4.1.0"
+    path-posix "^1.0.0"
+    symlink-or-copy "^1.1.8"
+
+fs-vacuum@~1.2.9:
+  version "1.2.10"
+  resolved "https://registry.yarnpkg.com/fs-vacuum/-/fs-vacuum-1.2.10.tgz#b7629bec07a4031a2548fdf99f5ecf1cc8b31e36"
+  dependencies:
+    graceful-fs "^4.1.2"
+    path-is-inside "^1.0.1"
+    rimraf "^2.5.2"
+
+fs-write-stream-atomic@~1.0.8:
+  version "1.0.10"
+  resolved "https://registry.yarnpkg.com/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.10.tgz#b47df53493ef911df75731e70a9ded0189db40c9"
+  dependencies:
+    graceful-fs "^4.1.2"
+    iferr "^0.1.5"
+    imurmurhash "^0.1.4"
+    readable-stream "1 || 2"
+
+fs.realpath@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f"
+
+fstream-ignore@^1.0.0:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/fstream-ignore/-/fstream-ignore-1.0.5.tgz#9c31dae34767018fe1d249b24dada67d092da105"
+  dependencies:
+    fstream "^1.0.0"
+    inherits "2"
+    minimatch "^3.0.0"
+
+fstream-npm@~1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/fstream-npm/-/fstream-npm-1.1.1.tgz#6b9175db6239a83d8209e232426c494dbb29690c"
+  dependencies:
+    fstream-ignore "^1.0.0"
+    inherits "2"
+
+fstream@^1.0.0, fstream@^1.0.2, fstream@~1.0.10:
+  version "1.0.11"
+  resolved "https://registry.yarnpkg.com/fstream/-/fstream-1.0.11.tgz#5c1fb1f117477114f0632a0eb4b71b3cb0fd3171"
+  dependencies:
+    graceful-fs "^4.1.2"
+    inherits "~2.0.0"
+    mkdirp ">=0.5 0"
+    rimraf "2"
+
+gauge@~1.2.0, gauge@~1.2.5:
+  version "1.2.7"
+  resolved "https://registry.yarnpkg.com/gauge/-/gauge-1.2.7.tgz#e9cec5483d3d4ee0ef44b60a7d99e4935e136d93"
+  dependencies:
+    ansi "^0.3.0"
+    has-unicode "^2.0.0"
+    lodash.pad "^4.1.0"
+    lodash.padend "^4.1.0"
+    lodash.padstart "^4.1.0"
+
+gauge@~2.7.1:
+  version "2.7.4"
+  resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.4.tgz#2c03405c7538c39d7eb37b317022e325fb018bf7"
+  dependencies:
+    aproba "^1.0.3"
+    console-control-strings "^1.0.0"
+    has-unicode "^2.0.0"
+    object-assign "^4.1.0"
+    signal-exit "^3.0.0"
+    string-width "^1.0.1"
+    strip-ansi "^3.0.1"
+    wide-align "^1.1.0"
+
+gaze@^1.0.0:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/gaze/-/gaze-1.1.2.tgz#847224677adb8870d679257ed3388fdb61e40105"
+  dependencies:
+    globule "^1.0.0"
+
+generate-function@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/generate-function/-/generate-function-2.0.0.tgz#6858fe7c0969b7d4e9093337647ac79f60dfbe74"
+
+generate-object-property@^1.1.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/generate-object-property/-/generate-object-property-1.2.0.tgz#9c0e1c40308ce804f4783618b937fa88f99d50d0"
+  dependencies:
+    is-property "^1.0.0"
+
+get-caller-file@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-1.0.2.tgz#f702e63127e7e231c160a80c1554acb70d5047e5"
+
+get-stdin@^4.0.1:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-4.0.1.tgz#b968c6b0a04384324902e8bf1a5df32579a450fe"
+
+getpass@^0.1.1:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa"
+  dependencies:
+    assert-plus "^1.0.0"
+
+git-repo-info@^1.0.4:
+  version "1.4.1"
+  resolved "https://registry.yarnpkg.com/git-repo-info/-/git-repo-info-1.4.1.tgz#2a072823254aaf62fcf0766007d7b6651bd41943"
+
+github-url-from-git@~1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/github-url-from-git/-/github-url-from-git-1.4.0.tgz#285e6b520819001bde128674704379e4ff03e0de"
+
+github-url-from-username-repo@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/github-url-from-username-repo/-/github-url-from-username-repo-1.0.2.tgz#7dd79330d2abe69c10c2cef79714c97215791dfa"
+
+glob-base@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/glob-base/-/glob-base-0.3.0.tgz#dbb164f6221b1c0b1ccf82aea328b497df0ea3c4"
+  dependencies:
+    glob-parent "^2.0.0"
+    is-glob "^2.0.0"
+
+glob-parent@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-2.0.0.tgz#81383d72db054fcccf5336daa902f182f6edbb28"
+  dependencies:
+    is-glob "^2.0.0"
+
+glob@3.x, glob@~3.2.9:
+  version "3.2.11"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-3.2.11.tgz#4a973f635b9190f715d10987d5c00fd2815ebe3d"
+  dependencies:
+    inherits "2"
+    minimatch "0.3"
+
+glob@4.0.4:
+  version "4.0.4"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-4.0.4.tgz#730ce0190d87eca7812398018e21be712b4d69d2"
+  dependencies:
+    inherits "2"
+    minimatch "^0.3.0"
+    once "^1.3.0"
+  optionalDependencies:
+    graceful-fs "^3.0.2"
+
+glob@5.0.3:
+  version "5.0.3"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.3.tgz#15528c1c727e474a8e7731541c00b00ec802952d"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^2.0.1"
+    once "^1.3.0"
+
+glob@^4.3.5, glob@^4.4.2:
+  version "4.5.3"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-4.5.3.tgz#c6cb73d3226c1efef04de3c56d012f03377ee15f"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^2.0.1"
+    once "^1.3.0"
+
+glob@^5.0.10, glob@^5.0.15:
+  version "5.0.15"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.15.tgz#1bc936b9e02f4a603fcc222ecf7633d30b8b93b1"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "2 || 3"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@^7.0.0, glob@^7.0.3, glob@^7.0.5, glob@^7.1.1, glob@~7.1.1:
+  version "7.1.1"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.1.tgz#805211df04faaf1c63a3600306cdf5ade50b2ec8"
+  dependencies:
+    fs.realpath "^1.0.0"
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^3.0.2"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@~7.0.6:
+  version "7.0.6"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-7.0.6.tgz#211bafaf49e525b8cd93260d14ab136152b3f57a"
+  dependencies:
+    fs.realpath "^1.0.0"
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^3.0.2"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+globals@^6.4.0:
+  version "6.4.1"
+  resolved "https://registry.yarnpkg.com/globals/-/globals-6.4.1.tgz#8498032b3b6d1cc81eebc5f79690d8fe29fabf4f"
+
+globule@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/globule/-/globule-1.1.0.tgz#c49352e4dc183d85893ee825385eb994bb6df45f"
+  dependencies:
+    glob "~7.1.1"
+    lodash "~4.16.4"
+    minimatch "~3.0.2"
+
+graceful-fs@^3.0.1, graceful-fs@^3.0.2, graceful-fs@^3.0.4, graceful-fs@^3.0.5:
+  version "3.0.11"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-3.0.11.tgz#7613c778a1afea62f25c630a086d7f3acbbdd818"
+  dependencies:
+    natives "^1.1.0"
+
+graceful-fs@^4.1.2, graceful-fs@^4.1.4, graceful-fs@^4.1.6, graceful-fs@~4.1.6:
+  version "4.1.11"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.11.tgz#0e8bdfe4d1ddb8854d64e04ea7c00e2a026e5658"
+
+graceful-fs@~2.0.0:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-2.0.3.tgz#7cd2cdb228a4a3f36e95efa6cc142de7d1a136d0"
+
+"graceful-readlink@>= 1.0.0":
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/graceful-readlink/-/graceful-readlink-1.0.1.tgz#4cafad76bc62f02fa039b2f94e9a3dd3a391a725"
+
+growl@^1.8.1:
+  version "1.9.2"
+  resolved "https://registry.yarnpkg.com/growl/-/growl-1.9.2.tgz#0ea7743715db8d8de2c5ede1775e1b45ac85c02f"
+
+handlebars@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-2.0.0.tgz#6e9d7f8514a3467fa5e9f82cc158ecfc1d5ac76f"
+  dependencies:
+    optimist "~0.3"
+  optionalDependencies:
+    uglify-js "~2.3"
+
+har-schema@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-1.0.5.tgz#d263135f43307c02c602afc8fe95970c0151369e"
+
+har-validator@~2.0.6:
+  version "2.0.6"
+  resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-2.0.6.tgz#cdcbc08188265ad119b6a5a7c8ab70eecfb5d27d"
+  dependencies:
+    chalk "^1.1.1"
+    commander "^2.9.0"
+    is-my-json-valid "^2.12.4"
+    pinkie-promise "^2.0.0"
+
+har-validator@~4.2.1:
+  version "4.2.1"
+  resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-4.2.1.tgz#33481d0f1bbff600dd203d75812a6a5fba002e2a"
+  dependencies:
+    ajv "^4.9.1"
+    har-schema "^1.0.5"
+
+has-ansi@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-0.1.0.tgz#84f265aae8c0e6a88a12d7022894b7568894c62e"
+  dependencies:
+    ansi-regex "^0.2.0"
+
+has-ansi@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-1.0.3.tgz#c0b5b1615d9e382b0ff67169d967b425e48ca538"
+  dependencies:
+    ansi-regex "^1.1.0"
+    get-stdin "^4.0.1"
+
+has-ansi@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91"
+  dependencies:
+    ansi-regex "^2.0.0"
+
+has-binary@0.1.7:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/has-binary/-/has-binary-0.1.7.tgz#68e61eb16210c9545a0a5cce06a873912fe1e68c"
+  dependencies:
+    isarray "0.0.1"
+
+has-color@~0.1.0:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/has-color/-/has-color-0.1.7.tgz#67144a5260c34fc3cca677d041daf52fe7b78b2f"
+
+has-cors@1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/has-cors/-/has-cors-1.1.0.tgz#5e474793f7ea9843d1bb99c23eef49ff126fff39"
+
+has-unicode@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9"
+
+hash-for-dep@^1.0.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/hash-for-dep/-/hash-for-dep-1.1.2.tgz#e3347ed92960eb0bb53a2c6c2b70e36d75b7cd0c"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    heimdalljs "^0.2.3"
+    heimdalljs-logger "^0.1.7"
+    resolve "^1.1.6"
+
+hawk@~3.1.3:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/hawk/-/hawk-3.1.3.tgz#078444bd7c1640b0fe540d2c9b73d59678e8e1c4"
+  dependencies:
+    boom "2.x.x"
+    cryptiles "2.x.x"
+    hoek "2.x.x"
+    sntp "1.x.x"
+
+heimdalljs-logger@^0.1.7:
+  version "0.1.9"
+  resolved "https://registry.yarnpkg.com/heimdalljs-logger/-/heimdalljs-logger-0.1.9.tgz#d76ada4e45b7bb6f786fc9c010a68eb2e2faf176"
+  dependencies:
+    debug "^2.2.0"
+    heimdalljs "^0.2.0"
+
+heimdalljs@^0.2.0, heimdalljs@^0.2.1, heimdalljs@^0.2.3:
+  version "0.2.4"
+  resolved "https://registry.yarnpkg.com/heimdalljs/-/heimdalljs-0.2.4.tgz#34ead16eab422c94803065d33abeba1f7b24a910"
+  dependencies:
+    rsvp "~3.2.1"
+
+hoek@2.x.x:
+  version "2.16.3"
+  resolved "https://registry.yarnpkg.com/hoek/-/hoek-2.16.3.tgz#20bb7403d3cea398e91dc4710a8ff1b8274a25ed"
+
+home-or-tmp@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/home-or-tmp/-/home-or-tmp-1.0.0.tgz#4b9f1e40800c3e50c6c27f781676afcce71f3985"
+  dependencies:
+    os-tmpdir "^1.0.1"
+    user-home "^1.1.1"
+
+hosted-git-info@^2.1.4, hosted-git-info@~2.1.5:
+  version "2.1.5"
+  resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.1.5.tgz#0ba81d90da2e25ab34a332e6ec77936e1598118b"
+
+htmlparser2@3.8.x:
+  version "3.8.3"
+  resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-3.8.3.tgz#996c28b191516a8be86501a7d79757e5c70c1068"
+  dependencies:
+    domelementtype "1"
+    domhandler "2.3"
+    domutils "1.5"
+    entities "1.0"
+    readable-stream "1.1"
+
+http-errors@~1.6.1:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.6.1.tgz#5f8b8ed98aca545656bf572997387f904a722257"
+  dependencies:
+    depd "1.1.0"
+    inherits "2.0.3"
+    setprototypeof "1.0.3"
+    statuses ">= 1.3.1 < 2"
+
+http-proxy@^1.8.1, http-proxy@^1.9.0:
+  version "1.16.2"
+  resolved "https://registry.yarnpkg.com/http-proxy/-/http-proxy-1.16.2.tgz#06dff292952bf64dbe8471fa9df73066d4f37742"
+  dependencies:
+    eventemitter3 "1.x.x"
+    requires-port "1.x.x"
+
+http-signature@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.1.1.tgz#df72e267066cd0ac67fb76adf8e134a8fbcf91bf"
+  dependencies:
+    assert-plus "^0.2.0"
+    jsprim "^1.2.2"
+    sshpk "^1.7.0"
+
+iconv-lite@0.4.15, iconv-lite@^0.4.5:
+  version "0.4.15"
+  resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.15.tgz#fe265a218ac6a57cfe854927e9d04c19825eddeb"
+
+iconv-lite@0.4.4:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.4.tgz#e95f2e41db0735fc21652f7827a5ee32e63c83a8"
+
+iferr@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/iferr/-/iferr-0.1.5.tgz#c60eed69e6d8fdb6b3104a1fcbca1c192dc5b501"
+
+imurmurhash@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea"
+
+in-publish@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/in-publish/-/in-publish-2.0.0.tgz#e20ff5e3a2afc2690320b6dc552682a9c7fadf51"
+
+indent-string@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-2.1.0.tgz#8e2d48348742121b4a8218b7a137e9a52049dc80"
+  dependencies:
+    repeating "^2.0.0"
+
+indexof@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/indexof/-/indexof-0.0.1.tgz#82dc336d232b9062179d05ab3293a66059fd435d"
+
+inflection@^1.7.0:
+  version "1.12.0"
+  resolved "https://registry.yarnpkg.com/inflection/-/inflection-1.12.0.tgz#a200935656d6f5f6bc4dc7502e1aecb703228416"
+
+inflight@^1.0.4, inflight@~1.0.4:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9"
+  dependencies:
+    once "^1.3.0"
+    wrappy "1"
+
+inherits@*, inherits@2, inherits@2.0.3, inherits@^2.0.1, inherits@^2.0.3, inherits@~2.0.0, inherits@~2.0.1, inherits@~2.0.3:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de"
+
+ini@^1.3.4, ini@~1.3.4:
+  version "1.3.4"
+  resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.4.tgz#0537cb79daf59b59a1a517dff706c86ec039162e"
+
+init-package-json@~1.9.4:
+  version "1.9.6"
+  resolved "https://registry.yarnpkg.com/init-package-json/-/init-package-json-1.9.6.tgz#789fc2b74466a4952b9ea77c0575bc78ebd60a61"
+  dependencies:
+    glob "^7.1.1"
+    npm-package-arg "^4.0.0 || ^5.0.0"
+    promzard "^0.3.0"
+    read "~1.0.1"
+    read-package-json "1 || 2"
+    semver "2.x || 3.x || 4 || 5"
+    validate-npm-package-license "^3.0.1"
+    validate-npm-package-name "^3.0.0"
+
+inquirer@0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-0.5.1.tgz#e9f2cd1ee172c7a32e054b78a03d4ddb0d7707f1"
+  dependencies:
+    async "~0.8.0"
+    chalk "~0.4.0"
+    cli-color "~0.3.2"
+    lodash "~2.4.1"
+    mute-stream "0.0.4"
+    readline2 "~0.1.0"
+    through "~2.3.4"
+
+invert-kv@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/invert-kv/-/invert-kv-1.0.0.tgz#104a8e4aaca6d3d8cd157a8ef8bfab2d7a3ffdb6"
+
+ipaddr.js@1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.3.0.tgz#1e03a52fdad83a8bbb2b25cbf4998b4cffcd3dec"
+
+is-arrayish@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d"
+
+is-buffer@^1.1.5:
+  version "1.1.5"
+  resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.5.tgz#1f3b26ef613b214b88cbca23cc6c01d87961eecc"
+
+is-builtin-module@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-builtin-module/-/is-builtin-module-1.0.0.tgz#540572d34f7ac3119f8f76c30cbc1b1e037affbe"
+  dependencies:
+    builtin-modules "^1.0.0"
+
+is-dotfile@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-dotfile/-/is-dotfile-1.0.2.tgz#2c132383f39199f8edc268ca01b9b007d205cc4d"
+
+is-equal-shallow@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/is-equal-shallow/-/is-equal-shallow-0.1.3.tgz#2238098fc221de0bcfa5d9eac4c45d638aa1c534"
+  dependencies:
+    is-primitive "^2.0.0"
+
+is-extendable@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89"
+
+is-extglob@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-1.0.0.tgz#ac468177c4943405a092fc8f29760c6ffc6206c0"
+
+is-finite@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-finite/-/is-finite-1.0.2.tgz#cc6677695602be550ef11e8b4aa6305342b6d0aa"
+  dependencies:
+    number-is-nan "^1.0.0"
+
+is-fullwidth-code-point@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb"
+  dependencies:
+    number-is-nan "^1.0.0"
+
+is-glob@^2.0.0, is-glob@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-2.0.1.tgz#d096f926a3ded5600f3fdfd91198cb0888c2d863"
+  dependencies:
+    is-extglob "^1.0.0"
+
+is-integer@^1.0.4:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/is-integer/-/is-integer-1.0.6.tgz#5273819fada880d123e1ac00a938e7172dd8d95e"
+  dependencies:
+    is-finite "^1.0.0"
+
+is-my-json-valid@^2.12.4:
+  version "2.16.0"
+  resolved "https://registry.yarnpkg.com/is-my-json-valid/-/is-my-json-valid-2.16.0.tgz#f079dd9bfdae65ee2038aae8acbc86ab109e3693"
+  dependencies:
+    generate-function "^2.0.0"
+    generate-object-property "^1.1.0"
+    jsonpointer "^4.0.0"
+    xtend "^4.0.0"
+
+is-number@^2.0.2, is-number@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/is-number/-/is-number-2.1.0.tgz#01fcbbb393463a548f2f466cce16dece49db908f"
+  dependencies:
+    kind-of "^3.0.2"
+
+is-posix-bracket@^0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/is-posix-bracket/-/is-posix-bracket-0.1.1.tgz#3334dc79774368e92f016e6fbc0a88f5cd6e6bc4"
+
+is-primitive@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/is-primitive/-/is-primitive-2.0.0.tgz#207bab91638499c07b2adf240a41a87210034575"
+
+is-property@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-property/-/is-property-1.0.2.tgz#57fe1c4e48474edd65b09911f26b1cd4095dda84"
+
+is-type@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/is-type/-/is-type-0.0.1.tgz#f651d85c365d44955d14a51d8d7061f3f6b4779c"
+  dependencies:
+    core-util-is "~1.0.0"
+
+is-typedarray@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a"
+
+is-utf8@^0.2.0:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/is-utf8/-/is-utf8-0.2.1.tgz#4b0da1442104d1b336340e80797e865cf39f7d72"
+
+isarray@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf"
+
+isarray@1.0.0, isarray@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11"
+
+isexe@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10"
+
+isobject@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/isobject/-/isobject-2.1.0.tgz#f065561096a3f1da2ef46272f815c840d87e0c89"
+  dependencies:
+    isarray "1.0.0"
+
+isstream@~0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a"
+
+istextorbinary@2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/istextorbinary/-/istextorbinary-2.1.0.tgz#dbed2a6f51be2f7475b68f89465811141b758874"
+  dependencies:
+    binaryextensions "1 || 2"
+    editions "^1.1.1"
+    textextensions "1 || 2"
+
+jju@^1.1.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/jju/-/jju-1.3.0.tgz#dadd9ef01924bc728b03f2f7979bdbd62f7a2aaa"
+
+jodid25519@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/jodid25519/-/jodid25519-1.0.2.tgz#06d4912255093419477d425633606e0e90782967"
+  dependencies:
+    jsbn "~0.1.0"
+
+js-base64@^2.1.8, js-base64@~2.1.8:
+  version "2.1.9"
+  resolved "https://registry.yarnpkg.com/js-base64/-/js-base64-2.1.9.tgz#f0e80ae039a4bd654b5f281fc93f04a914a7fcce"
+
+js-string-escape@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/js-string-escape/-/js-string-escape-1.0.1.tgz#e2625badbc0d67c7533e9edc1068c587ae4137ef"
+
+js-tokens@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-1.0.1.tgz#cc435a5c8b94ad15acb7983140fc80182c89aeae"
+
+js-yaml@^3.1.0, js-yaml@^3.2.5:
+  version "3.8.3"
+  resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.8.3.tgz#33a05ec481c850c8875929166fe1beb61c728766"
+  dependencies:
+    argparse "^1.0.7"
+    esprima "^3.1.1"
+
+jsbn@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513"
+
+jsesc@~0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-0.5.0.tgz#e7dee66e35d6fc16f710fe91d5cf69f70f08911d"
+
+jshint@^2.7.0:
+  version "2.9.4"
+  resolved "https://registry.yarnpkg.com/jshint/-/jshint-2.9.4.tgz#5e3ba97848d5290273db514aee47fe24cf592934"
+  dependencies:
+    cli "~1.0.0"
+    console-browserify "1.1.x"
+    exit "0.1.x"
+    htmlparser2 "3.8.x"
+    lodash "3.7.x"
+    minimatch "~3.0.2"
+    shelljs "0.3.x"
+    strip-json-comments "1.0.x"
+
+json-parse-helpfulerror@^1.0.2:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/json-parse-helpfulerror/-/json-parse-helpfulerror-1.0.3.tgz#13f14ce02eed4e981297b64eb9e3b932e2dd13dc"
+  dependencies:
+    jju "^1.1.0"
+
+json-schema@0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.2.3.tgz#b480c892e59a2f05954ce727bd3f2a4e882f9e13"
+
+json-stable-stringify@^1.0.0, json-stable-stringify@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/json-stable-stringify/-/json-stable-stringify-1.0.1.tgz#9a759d39c5f2ff503fd5300646ed445f88c4f9af"
+  dependencies:
+    jsonify "~0.0.0"
+
+json-stream@0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/json-stream/-/json-stream-0.2.2.tgz#096e13af872e604d915de3c5dcb87673611c38f4"
+
+json-stringify-safe@~5.0.1:
+  version "5.0.1"
+  resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb"
+
+json3@3.3.2:
+  version "3.3.2"
+  resolved "https://registry.yarnpkg.com/json3/-/json3-3.3.2.tgz#3c0434743df93e2f5c42aee7b19bcb483575f4e1"
+
+json5@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/json5/-/json5-0.4.0.tgz#054352e4c4c80c86c0923877d449de176a732c8d"
+
+jsonfile@^2.0.0, jsonfile@^2.1.0:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-2.4.0.tgz#3736a2b428b87bbda0cc83b53fa3d633a35c2ae8"
+  optionalDependencies:
+    graceful-fs "^4.1.6"
+
+jsonify@~0.0.0:
+  version "0.0.0"
+  resolved "https://registry.yarnpkg.com/jsonify/-/jsonify-0.0.0.tgz#2c74b6ee41d93ca51b7b5aaee8f503631d252a73"
+
+jsonpointer@^4.0.0:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/jsonpointer/-/jsonpointer-4.0.1.tgz#4fd92cb34e0e9db3c89c8622ecf51f9b978c6cb9"
+
+jsprim@^1.2.2:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.0.tgz#a3b87e40298d8c380552d8cc7628a0bb95a22918"
+  dependencies:
+    assert-plus "1.0.0"
+    extsprintf "1.0.2"
+    json-schema "0.2.3"
+    verror "1.3.6"
+
+kind-of@^3.0.2:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.2.0.tgz#b58abe4d5c044ad33726a8c1525b48cf891bff07"
+  dependencies:
+    is-buffer "^1.1.5"
+
+lazy-cache@^1.0.3:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/lazy-cache/-/lazy-cache-1.0.4.tgz#a1d78fc3a50474cb80845d3b3b6e1da49a446e8e"
+
+lcid@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/lcid/-/lcid-1.0.0.tgz#308accafa0bc483a3867b4b6f2b9506251d1b835"
+  dependencies:
+    invert-kv "^1.0.0"
+
+leek@0.0.18:
+  version "0.0.18"
+  resolved "https://registry.yarnpkg.com/leek/-/leek-0.0.18.tgz#12b4a0c70aa500e9099b41c73f73b7d9b23c93d1"
+  dependencies:
+    debug "^2.1.0"
+    lodash-node "^2.4.1"
+    request "^2.27.0"
+    rsvp "^3.0.6"
+
+leven@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/leven/-/leven-1.0.2.tgz#9144b6eebca5f1d0680169f1a6770dcea60b75c3"
+
+linkify-it@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/linkify-it/-/linkify-it-1.0.1.tgz#73b32a4854d52438f59c6e09b6ded5be0abdd94a"
+  dependencies:
+    uc.micro "^1.0.0"
+
+livereload-js@^2.2.0:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/livereload-js/-/livereload-js-2.2.2.tgz#6c87257e648ab475bc24ea257457edcc1f8d0bc2"
+
+load-json-file@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/load-json-file/-/load-json-file-1.1.0.tgz#956905708d58b4bab4c2261b04f59f31c99374c0"
+  dependencies:
+    graceful-fs "^4.1.2"
+    parse-json "^2.2.0"
+    pify "^2.0.0"
+    pinkie-promise "^2.0.0"
+    strip-bom "^2.0.0"
+
+lockfile@~1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/lockfile/-/lockfile-1.0.3.tgz#2638fc39a0331e9cac1a04b71799931c9c50df79"
+
+lodash-node@^2.4.1:
+  version "2.4.1"
+  resolved "https://registry.yarnpkg.com/lodash-node/-/lodash-node-2.4.1.tgz#ea82f7b100c733d1a42af76801e506105e2a80ec"
+
+lodash-node@^3.4.0:
+  version "3.10.2"
+  resolved "https://registry.yarnpkg.com/lodash-node/-/lodash-node-3.10.2.tgz#2598d5b1b54e6a68b4cb544e5c730953cbf632f7"
+
+lodash._arraycopy@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/lodash._arraycopy/-/lodash._arraycopy-3.0.0.tgz#76e7b7c1f1fb92547374878a562ed06a3e50f6e1"
+
+lodash._arrayeach@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/lodash._arrayeach/-/lodash._arrayeach-3.0.0.tgz#bab156b2a90d3f1bbd5c653403349e5e5933ef9e"
+
+lodash._basecopy@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/lodash._basecopy/-/lodash._basecopy-3.0.1.tgz#8da0e6a876cf344c0ad8a54882111dd3c5c7ca36"
+
+lodash._basefor@^3.0.0:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/lodash._basefor/-/lodash._basefor-3.0.3.tgz#7550b4e9218ef09fad24343b612021c79b4c20c2"
+
+lodash._bindcallback@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/lodash._bindcallback/-/lodash._bindcallback-3.0.1.tgz#e531c27644cf8b57a99e17ed95b35c748789392e"
+
+lodash._createassigner@^3.0.0:
+  version "3.1.1"
+  resolved "https://registry.yarnpkg.com/lodash._createassigner/-/lodash._createassigner-3.1.1.tgz#838a5bae2fdaca63ac22dee8e19fa4e6d6970b11"
+  dependencies:
+    lodash._bindcallback "^3.0.0"
+    lodash._isiterateecall "^3.0.0"
+    lodash.restparam "^3.0.0"
+
+lodash._getnative@^3.0.0:
+  version "3.9.1"
+  resolved "https://registry.yarnpkg.com/lodash._getnative/-/lodash._getnative-3.9.1.tgz#570bc7dede46d61cdcde687d65d3eecbaa3aaff5"
+
+lodash._isiterateecall@^3.0.0:
+  version "3.0.9"
+  resolved "https://registry.yarnpkg.com/lodash._isiterateecall/-/lodash._isiterateecall-3.0.9.tgz#5203ad7ba425fae842460e696db9cf3e6aac057c"
+
+lodash.assign@^4.2.0:
+  version "4.2.0"
+  resolved "https://registry.yarnpkg.com/lodash.assign/-/lodash.assign-4.2.0.tgz#0d99f3ccd7a6d261d19bdaeb9245005d285808e7"
+
+lodash.clonedeep@^4.3.2:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz#e23f3f9c4f8fbdde872529c1071857a086e5ccef"
+
+lodash.defaults@^4.1.0:
+  version "4.2.0"
+  resolved "https://registry.yarnpkg.com/lodash.defaults/-/lodash.defaults-4.2.0.tgz#d09178716ffea4dde9e5fb7b37f6f0802274580c"
+
+lodash.isarguments@^3.0.0:
+  version "3.1.0"
+  resolved "https://registry.yarnpkg.com/lodash.isarguments/-/lodash.isarguments-3.1.0.tgz#2f573d85c6a24289ff00663b491c1d338ff3458a"
+
+lodash.isarray@^3.0.0:
+  version "3.0.4"
+  resolved "https://registry.yarnpkg.com/lodash.isarray/-/lodash.isarray-3.0.4.tgz#79e4eb88c36a8122af86f844aa9bcd851b5fbb55"
+
+lodash.isplainobject@^3.0.0:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/lodash.isplainobject/-/lodash.isplainobject-3.2.0.tgz#9a8238ae16b200432960cd7346512d0123fbf4c5"
+  dependencies:
+    lodash._basefor "^3.0.0"
+    lodash.isarguments "^3.0.0"
+    lodash.keysin "^3.0.0"
+
+lodash.istypedarray@^3.0.0:
+  version "3.0.6"
+  resolved "https://registry.yarnpkg.com/lodash.istypedarray/-/lodash.istypedarray-3.0.6.tgz#c9a477498607501d8e8494d283b87c39281cef62"
+
+lodash.keys@^3.0.0:
+  version "3.1.2"
+  resolved "https://registry.yarnpkg.com/lodash.keys/-/lodash.keys-3.1.2.tgz#4dbc0472b156be50a0b286855d1bd0b0c656098a"
+  dependencies:
+    lodash._getnative "^3.0.0"
+    lodash.isarguments "^3.0.0"
+    lodash.isarray "^3.0.0"
+
+lodash.keysin@^3.0.0:
+  version "3.0.8"
+  resolved "https://registry.yarnpkg.com/lodash.keysin/-/lodash.keysin-3.0.8.tgz#22c4493ebbedb1427962a54b445b2c8a767fb47f"
+  dependencies:
+    lodash.isarguments "^3.0.0"
+    lodash.isarray "^3.0.0"
+
+lodash.merge@^3.0.2:
+  version "3.3.2"
+  resolved "https://registry.yarnpkg.com/lodash.merge/-/lodash.merge-3.3.2.tgz#0d90d93ed637b1878437bb3e21601260d7afe994"
+  dependencies:
+    lodash._arraycopy "^3.0.0"
+    lodash._arrayeach "^3.0.0"
+    lodash._createassigner "^3.0.0"
+    lodash._getnative "^3.0.0"
+    lodash.isarguments "^3.0.0"
+    lodash.isarray "^3.0.0"
+    lodash.isplainobject "^3.0.0"
+    lodash.istypedarray "^3.0.0"
+    lodash.keys "^3.0.0"
+    lodash.keysin "^3.0.0"
+    lodash.toplainobject "^3.0.0"
+
+lodash.pad@^4.1.0:
+  version "4.5.1"
+  resolved "https://registry.yarnpkg.com/lodash.pad/-/lodash.pad-4.5.1.tgz#4330949a833a7c8da22cc20f6a26c4d59debba70"
+
+lodash.padend@^4.1.0:
+  version "4.6.1"
+  resolved "https://registry.yarnpkg.com/lodash.padend/-/lodash.padend-4.6.1.tgz#53ccba047d06e158d311f45da625f4e49e6f166e"
+
+lodash.padstart@^4.1.0:
+  version "4.6.1"
+  resolved "https://registry.yarnpkg.com/lodash.padstart/-/lodash.padstart-4.6.1.tgz#d2e3eebff0d9d39ad50f5cbd1b52a7bce6bb611b"
+
+lodash.restparam@^3.0.0:
+  version "3.6.1"
+  resolved "https://registry.yarnpkg.com/lodash.restparam/-/lodash.restparam-3.6.1.tgz#936a4e309ef330a7645ed4145986c85ae5b20805"
+
+lodash.toplainobject@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/lodash.toplainobject/-/lodash.toplainobject-3.0.0.tgz#28790ad942d293d78aa663a07ecf7f52ca04198d"
+  dependencies:
+    lodash._basecopy "^3.0.0"
+    lodash.keysin "^3.0.0"
+
+lodash@3.7.x:
+  version "3.7.0"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-3.7.0.tgz#3678bd8ab995057c07ade836ed2ef087da811d45"
+
+lodash@^3.1.0, lodash@^3.10.0, lodash@^3.6.0, lodash@^3.9.3:
+  version "3.10.1"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-3.10.1.tgz#5bf45e8e49ba4189e17d482789dfd15bd140b7b6"
+
+lodash@^4.0.0, lodash@^4.14.0:
+  version "4.17.4"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.4.tgz#78203a4d1c328ae1d86dca6460e369b57f4055ae"
+
+lodash@~2.3.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-2.3.0.tgz#dfbdac99cf87a59a022c474730570d8716c267dd"
+
+lodash@~2.4.1:
+  version "2.4.2"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-2.4.2.tgz#fadd834b9683073da179b3eae6d9c0d15053f73e"
+
+lodash@~4.16.4:
+  version "4.16.6"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.16.6.tgz#d22c9ac660288f3843e16ba7d2b5d06cca27d777"
+
+longest@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/longest/-/longest-1.0.1.tgz#30a0b2da38f73770e8294a0d22e6625ed77d0097"
+
+loud-rejection@^1.0.0:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/loud-rejection/-/loud-rejection-1.6.0.tgz#5b46f80147edee578870f086d04821cf998e551f"
+  dependencies:
+    currently-unhandled "^0.4.1"
+    signal-exit "^3.0.0"
+
+lru-cache@2, lru-cache@^2.5.0:
+  version "2.7.3"
+  resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-2.7.3.tgz#6d4524e8b955f95d4f5b58851ce21dd72fb4e952"
+
+lru-cache@^4.0.1, lru-cache@~4.0.1:
+  version "4.0.2"
+  resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-4.0.2.tgz#1d17679c069cda5d040991a09dbc2c0db377e55e"
+  dependencies:
+    pseudomap "^1.0.1"
+    yallist "^2.0.0"
+
+lru-queue@0.1:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/lru-queue/-/lru-queue-0.1.0.tgz#2738bd9f0d3cf4f84490c5736c48699ac632cda3"
+  dependencies:
+    es5-ext "~0.10.2"
+
+magic-string@^0.4.9:
+  version "0.4.9"
+  resolved "https://registry.yarnpkg.com/magic-string/-/magic-string-0.4.9.tgz#f46e9aaf5959cfbbc88f68a9969cd66d120e1a22"
+  dependencies:
+    vlq "^0.2.0"
+
+makeerror@1.0.x:
+  version "1.0.11"
+  resolved "https://registry.yarnpkg.com/makeerror/-/makeerror-1.0.11.tgz#e01a5c9109f2af79660e4e8b9587790184f5a96c"
+  dependencies:
+    tmpl "1.0.x"
+
+map-obj@^1.0.0, map-obj@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/map-obj/-/map-obj-1.0.1.tgz#d933ceb9205d82bdcf4886f6742bdc2b4dea146d"
+
+markdown-it-terminal@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/markdown-it-terminal/-/markdown-it-terminal-0.0.2.tgz#fff2c4a5df237800639b468a5fdd78e21917c4f2"
+  dependencies:
+    ansi-styles "^2.0.1"
+    cardinal "^0.5.0"
+    cli-table "^0.3.1"
+    lodash-node "^3.4.0"
+    markdown-it "^4.0.0"
+
+markdown-it@4.0.3, markdown-it@^4.0.0:
+  version "4.0.3"
+  resolved "https://registry.yarnpkg.com/markdown-it/-/markdown-it-4.0.3.tgz#2a852d188a09eb9c5777b413c3c34677fa638b18"
+  dependencies:
+    argparse "~1.0.0"
+    entities "~1.1.1"
+    linkify-it "~1.0.0"
+    mdurl "~1.0.0"
+    uc.micro "^1.0.0"
+
+matcher-collection@^1.0.0, matcher-collection@^1.0.1:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/matcher-collection/-/matcher-collection-1.0.4.tgz#2f66ae0869996f29e43d0b62c83dd1d43e581755"
+  dependencies:
+    minimatch "^3.0.2"
+
+md5-hex@^1.0.2:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/md5-hex/-/md5-hex-1.3.0.tgz#d2c4afe983c4370662179b8cad145219135046c4"
+  dependencies:
+    md5-o-matic "^0.1.1"
+
+md5-o-matic@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/md5-o-matic/-/md5-o-matic-0.1.1.tgz#822bccd65e117c514fab176b25945d54100a03c3"
+
+mdurl@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/mdurl/-/mdurl-1.0.1.tgz#fe85b2ec75a59037f2adfec100fd6c601761152e"
+
+media-typer@0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748"
+
+memoizee@~0.3.8:
+  version "0.3.10"
+  resolved "https://registry.yarnpkg.com/memoizee/-/memoizee-0.3.10.tgz#4eca0d8aed39ec9d017f4c5c2f2f6432f42e5c8f"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.11"
+    es6-weak-map "~0.1.4"
+    event-emitter "~0.3.4"
+    lru-queue "0.1"
+    next-tick "~0.2.2"
+    timers-ext "0.1"
+
+meow@^3.7.0:
+  version "3.7.0"
+  resolved "https://registry.yarnpkg.com/meow/-/meow-3.7.0.tgz#72cb668b425228290abbfa856892587308a801fb"
+  dependencies:
+    camelcase-keys "^2.0.0"
+    decamelize "^1.1.2"
+    loud-rejection "^1.0.0"
+    map-obj "^1.0.1"
+    minimist "^1.1.3"
+    normalize-package-data "^2.3.4"
+    object-assign "^4.0.1"
+    read-pkg-up "^1.0.1"
+    redent "^1.0.0"
+    trim-newlines "^1.0.0"
+
+merge-descriptors@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61"
+
+merge@^1.1.3:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/merge/-/merge-1.2.0.tgz#7531e39d4949c281a66b8c5a6e0265e8b05894da"
+
+methods@~1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee"
+
+micromatch@^2.1.5:
+  version "2.3.11"
+  resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-2.3.11.tgz#86677c97d1720b363431d04d0d15293bd38c1565"
+  dependencies:
+    arr-diff "^2.0.0"
+    array-unique "^0.2.1"
+    braces "^1.8.2"
+    expand-brackets "^0.1.4"
+    extglob "^0.3.1"
+    filename-regex "^2.0.0"
+    is-extglob "^1.0.0"
+    is-glob "^2.0.1"
+    kind-of "^3.0.2"
+    normalize-path "^2.0.1"
+    object.omit "^2.0.0"
+    parse-glob "^3.0.4"
+    regex-cache "^0.4.2"
+
+mime-db@~1.12.0:
+  version "1.12.0"
+  resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.12.0.tgz#3d0c63180f458eb10d325aaa37d7c58ae312e9d7"
+
+mime-db@~1.27.0:
+  version "1.27.0"
+  resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.27.0.tgz#820f572296bbd20ec25ed55e5b5de869e5436eb1"
+
+mime-types@^2.1.11, mime-types@^2.1.12, mime-types@~2.1.11, mime-types@~2.1.15, mime-types@~2.1.7:
+  version "2.1.15"
+  resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.15.tgz#a4ebf5064094569237b8cf70046776d09fc92aed"
+  dependencies:
+    mime-db "~1.27.0"
+
+mime-types@~2.0.9:
+  version "2.0.14"
+  resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.0.14.tgz#310e159db23e077f8bb22b748dabfa4957140aa6"
+  dependencies:
+    mime-db "~1.12.0"
+
+mime@1.3.4, mime@^1.2.11:
+  version "1.3.4"
+  resolved "https://registry.yarnpkg.com/mime/-/mime-1.3.4.tgz#115f9e3b6b3daf2959983cb38f149a2d40eb5d53"
+
+minimatch@0.3, minimatch@0.x, minimatch@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-0.3.0.tgz#275d8edaac4f1bb3326472089e7949c8394699dd"
+  dependencies:
+    lru-cache "2"
+    sigmund "~1.0.0"
+
+"minimatch@2 || 3", minimatch@^3.0.0, minimatch@^3.0.2, minimatch@~3.0.2, minimatch@~3.0.3:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.3.tgz#2a4e4090b96b2db06a9d7df01055a62a77c9b774"
+  dependencies:
+    brace-expansion "^1.0.0"
+
+minimatch@^2.0.1, minimatch@^2.0.3, minimatch@^2.0.4:
+  version "2.0.10"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-2.0.10.tgz#8d087c39c6b38c001b97fca7ce6d0e1e80afbac7"
+  dependencies:
+    brace-expansion "^1.0.0"
+
+minimatch@~0.2.14, minimatch@~0.2.9:
+  version "0.2.14"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-0.2.14.tgz#c74e780574f63c6f9a090e90efbe6ef53a6a756a"
+  dependencies:
+    lru-cache "2"
+    sigmund "~1.0.0"
+
+minimist@0.0.8, minimist@~0.0.1:
+  version "0.0.8"
+  resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d"
+
+minimist@^1.1.0, minimist@^1.1.1, minimist@^1.1.3:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.0.tgz#a35008b20f41383eec1fb914f4cd5df79a264284"
+
+"mkdirp@>=0.5 0", mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@~0.5.0, mkdirp@~0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903"
+  dependencies:
+    minimist "0.0.8"
+
+mkdirp@^0.3.5:
+  version "0.3.5"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.3.5.tgz#de3e5f8961c88c787ee1368df849ac4413eca8d7"
+
+mkdirp@~0.4.0:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.4.2.tgz#427c8c18ece398b932f6f666f4e1e5b7740e78c8"
+  dependencies:
+    minimist "0.0.8"
+
+mktemp@~0.3.4:
+  version "0.3.5"
+  resolved "https://registry.yarnpkg.com/mktemp/-/mktemp-0.3.5.tgz#a1504c706d0d2b198c6a0eb645f7fdaf8181f7de"
+
+mktemp@~0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/mktemp/-/mktemp-0.4.0.tgz#6d0515611c8a8c84e484aa2000129b98e981ff0b"
+
+moment-timezone@^0.5.0:
+  version "0.5.13"
+  resolved "https://registry.yarnpkg.com/moment-timezone/-/moment-timezone-0.5.13.tgz#99ce5c7d827262eb0f1f702044177f60745d7b90"
+  dependencies:
+    moment ">= 2.9.0"
+
+"moment@>= 2.9.0", moment@^2.13.0:
+  version "2.18.1"
+  resolved "https://registry.yarnpkg.com/moment/-/moment-2.18.1.tgz#c36193dd3ce1c2eed2adb7c802dbbc77a81b1c0f"
+
+morgan@^1.5.2:
+  version "1.8.1"
+  resolved "https://registry.yarnpkg.com/morgan/-/morgan-1.8.1.tgz#f93023d3887bd27b78dfd6023cea7892ee27a4b1"
+  dependencies:
+    basic-auth "~1.1.0"
+    debug "2.6.1"
+    depd "~1.1.0"
+    on-finished "~2.3.0"
+    on-headers "~1.0.1"
+
+mout@~0.9.0:
+  version "0.9.1"
+  resolved "https://registry.yarnpkg.com/mout/-/mout-0.9.1.tgz#84f0f3fd6acc7317f63de2affdcc0cee009b0477"
+
+ms@0.6.2:
+  version "0.6.2"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.6.2.tgz#d89c2124c6fdc1353d65a8b77bf1aac4b193708c"
+
+ms@0.7.1:
+  version "0.7.1"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.1.tgz#9cd13c03adbff25b65effde7ce864ee952017098"
+
+ms@0.7.2:
+  version "0.7.2"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.2.tgz#ae25cf2512b3885a1d95d7f037868d8431124765"
+
+ms@0.7.3:
+  version "0.7.3"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.3.tgz#708155a5e44e33f5fd0fc53e81d0d40a91be1fff"
+
+mustache@^1.0.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/mustache/-/mustache-1.2.0.tgz#6cdec1cb03798792a948f8cc53ad69da0d0dd5c8"
+
+mute-stream@0.0.4, mute-stream@~0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.4.tgz#a9219960a6d5d5d046597aee51252c6655f7177e"
+
+nan@^2.3.2:
+  version "2.6.2"
+  resolved "https://registry.yarnpkg.com/nan/-/nan-2.6.2.tgz#e4ff34e6c95fdfb5aecc08de6596f43605a7db45"
+
+natives@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/natives/-/natives-1.1.0.tgz#e9ff841418a6b2ec7a495e939984f78f163e6e31"
+
+negotiator@0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.1.tgz#2b327184e8992101177b28563fb5e7102acd0ca9"
+
+next-tick@1:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/next-tick/-/next-tick-1.0.0.tgz#ca86d1fe8828169b0120208e3dc8424b9db8342c"
+
+next-tick@~0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/next-tick/-/next-tick-0.2.2.tgz#75da4a927ee5887e39065880065b7336413b310d"
+
+nextback@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/nextback/-/nextback-0.1.0.tgz#8713831b62ebed7039daa3f3d39bd3af5eb1b476"
+
+node-gyp@^3.3.1, node-gyp@~3.6.0:
+  version "3.6.0"
+  resolved "https://registry.yarnpkg.com/node-gyp/-/node-gyp-3.6.0.tgz#7474f63a3a0501161dda0b6341f022f14c423fa6"
+  dependencies:
+    fstream "^1.0.0"
+    glob "^7.0.3"
+    graceful-fs "^4.1.2"
+    minimatch "^3.0.2"
+    mkdirp "^0.5.0"
+    nopt "2 || 3"
+    npmlog "0 || 1 || 2 || 3 || 4"
+    osenv "0"
+    request "2"
+    rimraf "2"
+    semver "~5.3.0"
+    tar "^2.0.0"
+    which "1"
+
+node-int64@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/node-int64/-/node-int64-0.4.0.tgz#87a9065cdb355d3182d8f94ce11188b825c68a3b"
+
+node-sass@^3.3.3:
+  version "3.13.1"
+  resolved "https://registry.yarnpkg.com/node-sass/-/node-sass-3.13.1.tgz#7240fbbff2396304b4223527ed3020589c004fc2"
+  dependencies:
+    async-foreach "^0.1.3"
+    chalk "^1.1.1"
+    cross-spawn "^3.0.0"
+    gaze "^1.0.0"
+    get-stdin "^4.0.1"
+    glob "^7.0.3"
+    in-publish "^2.0.0"
+    lodash.assign "^4.2.0"
+    lodash.clonedeep "^4.3.2"
+    meow "^3.7.0"
+    mkdirp "^0.5.1"
+    nan "^2.3.2"
+    node-gyp "^3.3.1"
+    npmlog "^4.0.0"
+    request "^2.61.0"
+    sass-graph "^2.1.1"
+
+node-uuid@^1.4.3, node-uuid@~1.4.7:
+  version "1.4.8"
+  resolved "https://registry.yarnpkg.com/node-uuid/-/node-uuid-1.4.8.tgz#b040eb0923968afabf8d32fb1f17f1167fdab907"
+
+"nopt@2 || 3", nopt@^3.0.1, nopt@~3.0.6:
+  version "3.0.6"
+  resolved "https://registry.yarnpkg.com/nopt/-/nopt-3.0.6.tgz#c6465dbf08abcd4db359317f79ac68a646b28ff9"
+  dependencies:
+    abbrev "1"
+
+normalize-git-url@~3.0.2:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/normalize-git-url/-/normalize-git-url-3.0.2.tgz#8e5f14be0bdaedb73e07200310aa416c27350fc4"
+
+normalize-package-data@^2.0.0, normalize-package-data@^2.3.2, normalize-package-data@^2.3.4, "normalize-package-data@~1.0.1 || ^2.0.0", normalize-package-data@~2.3.5:
+  version "2.3.8"
+  resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-2.3.8.tgz#d819eda2a9dedbd1ffa563ea4071d936782295bb"
+  dependencies:
+    hosted-git-info "^2.1.4"
+    is-builtin-module "^1.0.0"
+    semver "2 || 3 || 4 || 5"
+    validate-npm-package-license "^3.0.1"
+
+normalize-path@^2.0.1:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-2.1.1.tgz#1ab28b556e198363a8c1a6f7e6fa20137fe6aed9"
+  dependencies:
+    remove-trailing-separator "^1.0.1"
+
+npm-cache-filename@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/npm-cache-filename/-/npm-cache-filename-1.0.2.tgz#ded306c5b0bfc870a9e9faf823bc5f283e05ae11"
+
+npm-install-checks@~1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/npm-install-checks/-/npm-install-checks-1.0.7.tgz#6d91aeda0ac96801f1ed7aadee116a6c0a086a57"
+  dependencies:
+    npmlog "0.1 || 1 || 2"
+    semver "^2.3.0 || 3.x || 4 || 5"
+
+"npm-package-arg@^3.0.0 || ^4.0.0", "npm-package-arg@^4.0.0 || ^5.0.0", npm-package-arg@^4.1.1, npm-package-arg@~4.1.0:
+  version "4.1.1"
+  resolved "https://registry.yarnpkg.com/npm-package-arg/-/npm-package-arg-4.1.1.tgz#86d9dca985b4c5e5d59772dfd5de6919998a495a"
+  dependencies:
+    hosted-git-info "^2.1.4"
+    semver "4 || 5"
+
+npm-registry-client@~7.2.1:
+  version "7.2.1"
+  resolved "https://registry.yarnpkg.com/npm-registry-client/-/npm-registry-client-7.2.1.tgz#c792266b088cc313f8525e7e35248626c723db75"
+  dependencies:
+    concat-stream "^1.5.2"
+    graceful-fs "^4.1.6"
+    normalize-package-data "~1.0.1 || ^2.0.0"
+    npm-package-arg "^3.0.0 || ^4.0.0"
+    once "^1.3.3"
+    request "^2.74.0"
+    retry "^0.10.0"
+    semver "2 >=2.2.1 || 3.x || 4 || 5"
+    slide "^1.1.3"
+  optionalDependencies:
+    npmlog "~2.0.0 || ~3.1.0"
+
+npm-user-validate@~0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/npm-user-validate/-/npm-user-validate-0.1.5.tgz#52465d50c2d20294a57125b996baedbf56c5004b"
+
+npm@^2.7.3:
+  version "2.15.12"
+  resolved "https://registry.yarnpkg.com/npm/-/npm-2.15.12.tgz#df7c3ed5a277c3f9d4b5d819b05311d10a200ae6"
+  dependencies:
+    abbrev "~1.0.9"
+    ansi "~0.3.1"
+    ansicolors "~0.3.2"
+    ansistyles "~0.1.3"
+    archy "~1.0.0"
+    async-some "~1.0.2"
+    block-stream "0.0.9"
+    char-spinner "~1.0.1"
+    chmodr "~1.0.2"
+    chownr "~1.0.1"
+    cmd-shim "~2.0.2"
+    columnify "~1.5.4"
+    config-chain "~1.1.10"
+    dezalgo "~1.0.3"
+    editor "~1.0.0"
+    fs-vacuum "~1.2.9"
+    fs-write-stream-atomic "~1.0.8"
+    fstream "~1.0.10"
+    fstream-npm "~1.1.1"
+    github-url-from-git "~1.4.0"
+    github-url-from-username-repo "~1.0.2"
+    glob "~7.0.6"
+    graceful-fs "~4.1.6"
+    hosted-git-info "~2.1.5"
+    inflight "~1.0.4"
+    inherits "~2.0.3"
+    ini "~1.3.4"
+    init-package-json "~1.9.4"
+    lockfile "~1.0.1"
+    lru-cache "~4.0.1"
+    minimatch "~3.0.3"
+    mkdirp "~0.5.1"
+    node-gyp "~3.6.0"
+    nopt "~3.0.6"
+    normalize-git-url "~3.0.2"
+    normalize-package-data "~2.3.5"
+    npm-cache-filename "~1.0.2"
+    npm-install-checks "~1.0.7"
+    npm-package-arg "~4.1.0"
+    npm-registry-client "~7.2.1"
+    npm-user-validate "~0.1.5"
+    npmlog "~2.0.4"
+    once "~1.4.0"
+    opener "~1.4.1"
+    osenv "~0.1.3"
+    path-is-inside "~1.0.0"
+    read "~1.0.7"
+    read-installed "~4.0.3"
+    read-package-json "~2.0.4"
+    readable-stream "~2.1.5"
+    realize-package-specifier "~3.0.1"
+    request "~2.74.0"
+    retry "~0.10.0"
+    rimraf "~2.5.4"
+    semver "~5.1.0"
+    sha "~2.0.1"
+    slide "~1.1.6"
+    sorted-object "~2.0.0"
+    spdx-license-ids "~1.2.2"
+    strip-ansi "~3.0.1"
+    tar "~2.2.1"
+    text-table "~0.2.0"
+    uid-number "0.0.6"
+    umask "~1.1.0"
+    validate-npm-package-license "~3.0.1"
+    validate-npm-package-name "~2.2.2"
+    which "~1.2.11"
+    wrappy "~1.0.2"
+    write-file-atomic "~1.1.4"
+
+"npmlog@0 || 1 || 2 || 3 || 4", npmlog@^4.0.0:
+  version "4.0.2"
+  resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-4.0.2.tgz#d03950e0e78ce1527ba26d2a7592e9348ac3e75f"
+  dependencies:
+    are-we-there-yet "~1.1.2"
+    console-control-strings "~1.1.0"
+    gauge "~2.7.1"
+    set-blocking "~2.0.0"
+
+"npmlog@0.1 || 1 || 2", "npmlog@~2.0.0 || ~3.1.0", npmlog@~2.0.4:
+  version "2.0.4"
+  resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-2.0.4.tgz#98b52530f2514ca90d09ec5b22c8846722375692"
+  dependencies:
+    ansi "~0.3.1"
+    are-we-there-yet "~1.1.2"
+    gauge "~1.2.5"
+
+npmlog@^1.0.0:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-1.2.1.tgz#28e7be619609b53f7ad1dd300a10d64d716268b6"
+  dependencies:
+    ansi "~0.3.0"
+    are-we-there-yet "~1.0.0"
+    gauge "~1.2.0"
+
+num2fraction@^1.1.0:
+  version "1.2.2"
+  resolved "https://registry.yarnpkg.com/num2fraction/-/num2fraction-1.2.2.tgz#6f682b6a027a4e9ddfa4564cd2589d1d4e669ede"
+
+number-is-nan@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/number-is-nan/-/number-is-nan-1.0.1.tgz#097b602b53422a522c1afb8790318336941a011d"
+
+oauth-sign@~0.8.1:
+  version "0.8.2"
+  resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.8.2.tgz#46a6ab7f0aead8deae9ec0565780b7d4efeb9d43"
+
+object-assign@4.1.0:
+  version "4.1.0"
+  resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.0.tgz#7a3b3d0e98063d43f4c03f2e8ae6cd51a86883a0"
+
+object-assign@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-2.1.1.tgz#43c36e5d569ff8e4816c4efa8be02d26967c18aa"
+
+object-assign@^4.0.1, object-assign@^4.1.0:
+  version "4.1.1"
+  resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863"
+
+object-component@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/object-component/-/object-component-0.0.3.tgz#f0c69aa50efc95b866c186f400a33769cb2f1291"
+
+object.omit@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/object.omit/-/object.omit-2.0.1.tgz#1a9c744829f39dbb858c76ca3579ae2a54ebd1fa"
+  dependencies:
+    for-own "^0.1.4"
+    is-extendable "^0.1.1"
+
+on-finished@2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.1.0.tgz#0c539f09291e8ffadde0c8a25850fb2cedc7022d"
+  dependencies:
+    ee-first "1.0.5"
+
+on-finished@~2.3.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.3.0.tgz#20f1336481b083cd75337992a16971aa2d906947"
+  dependencies:
+    ee-first "1.1.1"
+
+on-headers@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/on-headers/-/on-headers-1.0.1.tgz#928f5d0f470d49342651ea6794b0857c100693f7"
+
+once@^1.3.0, once@^1.3.3, once@~1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1"
+  dependencies:
+    wrappy "1"
+
+opener@~1.4.1:
+  version "1.4.3"
+  resolved "https://registry.yarnpkg.com/opener/-/opener-1.4.3.tgz#5c6da2c5d7e5831e8ffa3964950f8d6674ac90b8"
+
+optimist@~0.3, optimist@~0.3.5:
+  version "0.3.7"
+  resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.3.7.tgz#c90941ad59e4273328923074d2cf2e7cbc6ec0d9"
+  dependencies:
+    wordwrap "~0.0.2"
+
+optimist@~0.6.0:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.6.1.tgz#da3ea74686fa21a19a111c326e90eb15a0196686"
+  dependencies:
+    minimist "~0.0.1"
+    wordwrap "~0.0.2"
+
+options@>=0.0.5:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/options/-/options-0.0.6.tgz#ec22d312806bb53e731773e7cdaefcf1c643128f"
+
+os-homedir@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3"
+
+os-locale@^1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/os-locale/-/os-locale-1.4.0.tgz#20f9f17ae29ed345e8bde583b13d2009803c14d9"
+  dependencies:
+    lcid "^1.0.0"
+
+os-tmpdir@^1.0.0, os-tmpdir@^1.0.1, os-tmpdir@~1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274"
+
+osenv@0, osenv@^0.1.0, osenv@~0.1.3:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.1.4.tgz#42fe6d5953df06c8064be6f176c3d05aaaa34644"
+  dependencies:
+    os-homedir "^1.0.0"
+    os-tmpdir "^1.0.0"
+
+osenv@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.0.3.tgz#cd6ad8ddb290915ad9e22765576025d411f29cb6"
+
+output-file-sync@^1.1.0:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/output-file-sync/-/output-file-sync-1.1.2.tgz#d0a33eefe61a205facb90092e826598d5245ce76"
+  dependencies:
+    graceful-fs "^4.1.4"
+    mkdirp "^0.5.1"
+    object-assign "^4.1.0"
+
+parse-glob@^3.0.4:
+  version "3.0.4"
+  resolved "https://registry.yarnpkg.com/parse-glob/-/parse-glob-3.0.4.tgz#b2c376cfb11f35513badd173ef0bb6e3a388391c"
+  dependencies:
+    glob-base "^0.3.0"
+    is-dotfile "^1.0.0"
+    is-extglob "^1.0.0"
+    is-glob "^2.0.0"
+
+parse-json@^2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-2.2.0.tgz#f480f40434ef80741f8469099f8dea18f55a4dc9"
+  dependencies:
+    error-ex "^1.2.0"
+
+parsejson@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/parsejson/-/parsejson-0.0.3.tgz#ab7e3759f209ece99437973f7d0f1f64ae0e64ab"
+  dependencies:
+    better-assert "~1.0.0"
+
+parseqs@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/parseqs/-/parseqs-0.0.5.tgz#d5208a3738e46766e291ba2ea173684921a8b89d"
+  dependencies:
+    better-assert "~1.0.0"
+
+parseuri@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/parseuri/-/parseuri-0.0.5.tgz#80204a50d4dbb779bfdc6ebe2778d90e4bce320a"
+  dependencies:
+    better-assert "~1.0.0"
+
+parseurl@~1.3.0, parseurl@~1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.1.tgz#c8ab8c9223ba34888aa64a297b28853bec18da56"
+
+path-exists@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-1.0.0.tgz#d5a8998eb71ef37a74c34eb0d9eba6e878eea081"
+
+path-exists@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-2.1.0.tgz#0feb6c64f0fc518d9a754dd5efb62c7022761f4b"
+  dependencies:
+    pinkie-promise "^2.0.0"
+
+path-is-absolute@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f"
+
+path-is-inside@^1.0.1, path-is-inside@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/path-is-inside/-/path-is-inside-1.0.2.tgz#365417dede44430d1c11af61027facf074bdfc53"
+
+path-parse@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.5.tgz#3c1adf871ea9cd6c9431b6ea2bd74a0ff055c4c1"
+
+path-posix@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/path-posix/-/path-posix-1.0.0.tgz#06b26113f56beab042545a23bfa88003ccac260f"
+
+path-to-regexp@0.1.7:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c"
+
+path-type@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/path-type/-/path-type-1.1.0.tgz#59c44f7ee491da704da415da5a4070ba4f8fe441"
+  dependencies:
+    graceful-fs "^4.1.2"
+    pify "^2.0.0"
+    pinkie-promise "^2.0.0"
+
+performance-now@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-0.2.0.tgz#33ef30c5c77d4ea21c5a53869d91b56d8f2555e5"
+
+pify@^2.0.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c"
+
+pinkie-promise@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa"
+  dependencies:
+    pinkie "^2.0.0"
+
+pinkie@^2.0.0:
+  version "2.0.4"
+  resolved "https://registry.yarnpkg.com/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870"
+
+pleasant-progress@^1.0.2:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/pleasant-progress/-/pleasant-progress-1.1.0.tgz#c99cd730a2e50cffdd3badff845fc4d5282e266b"
+
+postcss@^4.1.11, postcss@~4.1.12:
+  version "4.1.16"
+  resolved "https://registry.yarnpkg.com/postcss/-/postcss-4.1.16.tgz#4c449b4c8af9df3caf6d37f8e1e575d0361758dc"
+  dependencies:
+    es6-promise "~2.3.0"
+    js-base64 "~2.1.8"
+    source-map "~0.4.2"
+
+preserve@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/preserve/-/preserve-0.2.0.tgz#815ed1f6ebc65926f865b310c0713bcb3315ce4b"
+
+private@^0.1.6, private@~0.1.5:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/private/-/private-0.1.7.tgz#68ce5e8a1ef0a23bb570cc28537b5332aba63ef1"
+
+process-nextick-args@~1.0.6:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-1.0.7.tgz#150e20b756590ad3f91093f25a4f2ad8bff30ba3"
+
+promise-map-series@^0.2.0, promise-map-series@^0.2.1:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/promise-map-series/-/promise-map-series-0.2.3.tgz#c2d377afc93253f6bd03dbb77755eb88ab20a847"
+  dependencies:
+    rsvp "^3.0.14"
+
+promzard@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/promzard/-/promzard-0.3.0.tgz#26a5d6ee8c7dee4cb12208305acfb93ba382a9ee"
+  dependencies:
+    read "1"
+
+proto-list@~1.2.1:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/proto-list/-/proto-list-1.2.4.tgz#212d5bfe1318306a420f6402b8e26ff39647a849"
+
+proxy-addr@~1.1.3:
+  version "1.1.4"
+  resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-1.1.4.tgz#27e545f6960a44a627d9b44467e35c1b6b4ce2f3"
+  dependencies:
+    forwarded "~0.1.0"
+    ipaddr.js "1.3.0"
+
+proxy-middleware@0.11.0:
+  version "0.11.0"
+  resolved "https://registry.yarnpkg.com/proxy-middleware/-/proxy-middleware-0.11.0.tgz#0650be600da0336a879ede8cae70062a24e8caaa"
+
+pseudomap@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/pseudomap/-/pseudomap-1.0.2.tgz#f052a28da70e618917ef0a8ac34c1ae5a68286b3"
+
+punycode@^1.4.1:
+  version "1.4.1"
+  resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e"
+
+q@^1.1.2:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/q/-/q-1.5.0.tgz#dd01bac9d06d30e6f219aecb8253ee9ebdc308f1"
+
+qs@2.2.4:
+  version "2.2.4"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-2.2.4.tgz#2e9fbcd34b540e3421c924ecd01e90aa975319c8"
+
+qs@6.4.0, qs@~6.4.0:
+  version "6.4.0"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-6.4.0.tgz#13e26d28ad6b0ffaa91312cd3bf708ed351e7233"
+
+qs@~2.2.3:
+  version "2.2.5"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-2.2.5.tgz#1088abaf9dcc0ae5ae45b709e6c6b5888b23923c"
+
+qs@~6.2.0:
+  version "6.2.3"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-6.2.3.tgz#1cfcb25c10a9b2b483053ff39f5dfc9233908cfe"
+
+quick-temp@0.1.2, quick-temp@^0.1.0, quick-temp@^0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/quick-temp/-/quick-temp-0.1.2.tgz#abd2bf0c1bf5be923b97f42ff875dbf0dfaa6349"
+  dependencies:
+    mktemp "~0.3.4"
+    rimraf "~2.2.6"
+    underscore.string "~2.3.3"
+
+quick-temp@^0.1.3:
+  version "0.1.8"
+  resolved "https://registry.yarnpkg.com/quick-temp/-/quick-temp-0.1.8.tgz#bab02a242ab8fb0dd758a3c9776b32f9a5d94408"
+  dependencies:
+    mktemp "~0.4.0"
+    rimraf "^2.5.4"
+    underscore.string "~3.3.4"
+
+randomatic@^1.1.3:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/randomatic/-/randomatic-1.1.6.tgz#110dcabff397e9dcff7c0789ccc0a49adf1ec5bb"
+  dependencies:
+    is-number "^2.0.2"
+    kind-of "^3.0.2"
+
+range-parser@~1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.0.tgz#f49be6b487894ddc40dcc94a322f611092e00d5e"
+
+raw-body@1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-1.3.0.tgz#978230a156a5548f42eef14de22d0f4f610083d1"
+  dependencies:
+    bytes "1"
+    iconv-lite "0.4.4"
+
+raw-body@~2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.2.0.tgz#994976cf6a5096a41162840492f0bdc5d6e7fb96"
+  dependencies:
+    bytes "2.4.0"
+    iconv-lite "0.4.15"
+    unpipe "1.0.0"
+
+read-installed@~4.0.3:
+  version "4.0.3"
+  resolved "https://registry.yarnpkg.com/read-installed/-/read-installed-4.0.3.tgz#ff9b8b67f187d1e4c29b9feb31f6b223acd19067"
+  dependencies:
+    debuglog "^1.0.1"
+    read-package-json "^2.0.0"
+    readdir-scoped-modules "^1.0.0"
+    semver "2 || 3 || 4 || 5"
+    slide "~1.1.3"
+    util-extend "^1.0.1"
+  optionalDependencies:
+    graceful-fs "^4.1.2"
+
+"read-package-json@1 || 2", read-package-json@^2.0.0, read-package-json@~2.0.4:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/read-package-json/-/read-package-json-2.0.5.tgz#f93a64e641529df68a08c64de46389e8a3f88845"
+  dependencies:
+    glob "^7.1.1"
+    json-parse-helpfulerror "^1.0.2"
+    normalize-package-data "^2.0.0"
+  optionalDependencies:
+    graceful-fs "^4.1.2"
+
+read-pkg-up@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-1.0.1.tgz#9d63c13276c065918d57f002a57f40a1b643fb02"
+  dependencies:
+    find-up "^1.0.0"
+    read-pkg "^1.0.0"
+
+read-pkg@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-1.1.0.tgz#f5ffaa5ecd29cb31c0474bca7d756b6bb29e3f28"
+  dependencies:
+    load-json-file "^1.0.0"
+    normalize-package-data "^2.3.2"
+    path-type "^1.0.0"
+
+read@1, read@~1.0.1, read@~1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/read/-/read-1.0.7.tgz#b3da19bd052431a97671d44a42634adf710b40c4"
+  dependencies:
+    mute-stream "~0.0.4"
+
+"readable-stream@1 || 2", "readable-stream@^2.0.0 || ^1.1.13", readable-stream@^2.0.2, readable-stream@^2.0.6, readable-stream@^2.2.2:
+  version "2.2.9"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.2.9.tgz#cf78ec6f4a6d1eb43d26488cac97f042e74b7fc8"
+  dependencies:
+    buffer-shims "~1.0.0"
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "~1.0.0"
+    process-nextick-args "~1.0.6"
+    string_decoder "~1.0.0"
+    util-deprecate "~1.0.1"
+
+readable-stream@1.1:
+  version "1.1.13"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.1.13.tgz#f6eef764f514c89e2b9e23146a75ba106756d23e"
+  dependencies:
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "0.0.1"
+    string_decoder "~0.10.x"
+
+readable-stream@~2.0.5:
+  version "2.0.6"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.0.6.tgz#8f90341e68a53ccc928788dacfcd11b36eb9b78e"
+  dependencies:
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "~1.0.0"
+    process-nextick-args "~1.0.6"
+    string_decoder "~0.10.x"
+    util-deprecate "~1.0.1"
+
+readable-stream@~2.1.5:
+  version "2.1.5"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.1.5.tgz#66fa8b720e1438b364681f2ad1a63c618448c9d0"
+  dependencies:
+    buffer-shims "^1.0.0"
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "~1.0.0"
+    process-nextick-args "~1.0.6"
+    string_decoder "~0.10.x"
+    util-deprecate "~1.0.1"
+
+readdir-scoped-modules@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/readdir-scoped-modules/-/readdir-scoped-modules-1.0.2.tgz#9fafa37d286be5d92cbaebdee030dc9b5f406747"
+  dependencies:
+    debuglog "^1.0.1"
+    dezalgo "^1.0.0"
+    graceful-fs "^4.1.2"
+    once "^1.3.0"
+
+readline2@0.1.1, readline2@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/readline2/-/readline2-0.1.1.tgz#99443ba6e83b830ef3051bfd7dc241a82728d568"
+  dependencies:
+    mute-stream "0.0.4"
+    strip-ansi "^2.0.1"
+
+realize-package-specifier@~3.0.1:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/realize-package-specifier/-/realize-package-specifier-3.0.3.tgz#d0def882952b8de3f67eba5e91199661271f41f4"
+  dependencies:
+    dezalgo "^1.0.1"
+    npm-package-arg "^4.1.1"
+
+recast@0.10.33, recast@^0.10.10:
+  version "0.10.33"
+  resolved "https://registry.yarnpkg.com/recast/-/recast-0.10.33.tgz#942808f7aa016f1fa7142c461d7e5704aaa8d697"
+  dependencies:
+    ast-types "0.8.12"
+    esprima-fb "~15001.1001.0-dev-harmony-fb"
+    private "~0.1.5"
+    source-map "~0.5.0"
+
+recast@^0.11.17:
+  version "0.11.23"
+  resolved "https://registry.yarnpkg.com/recast/-/recast-0.11.23.tgz#451fd3004ab1e4df9b4e4b66376b2a21912462d3"
+  dependencies:
+    ast-types "0.9.6"
+    esprima "~3.1.0"
+    private "~0.1.5"
+    source-map "~0.5.0"
+
+recast@^0.9.16, recast@^0.9.18:
+  version "0.9.18"
+  resolved "https://registry.yarnpkg.com/recast/-/recast-0.9.18.tgz#f70921bb9f737d8e1fb06a440315bd7ec14587c9"
+  dependencies:
+    ast-types "~0.6.1"
+    esprima-fb "~10001.1.0-dev-harmony-fb"
+    private "~0.1.5"
+    source-map "~0.1.40"
+
+redent@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/redent/-/redent-1.0.0.tgz#cf916ab1fd5f1f16dfb20822dd6ec7f730c2afde"
+  dependencies:
+    indent-string "^2.1.0"
+    strip-indent "^1.0.1"
+
+redeyed@~0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/redeyed/-/redeyed-0.5.0.tgz#7ab000e60ee3875ac115d29edb32c1403c6c25d1"
+  dependencies:
+    esprima-fb "~12001.1.0-dev-harmony-fb"
+
+regenerate@^1.2.1:
+  version "1.3.2"
+  resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.3.2.tgz#d1941c67bad437e1be76433add5b385f95b19260"
+
+regenerator@0.8.40:
+  version "0.8.40"
+  resolved "https://registry.yarnpkg.com/regenerator/-/regenerator-0.8.40.tgz#a0e457c58ebdbae575c9f8cd75127e93756435d8"
+  dependencies:
+    commoner "~0.10.3"
+    defs "~1.1.0"
+    esprima-fb "~15001.1001.0-dev-harmony-fb"
+    private "~0.1.5"
+    recast "0.10.33"
+    through "~2.3.8"
+
+regex-cache@^0.4.2:
+  version "0.4.3"
+  resolved "https://registry.yarnpkg.com/regex-cache/-/regex-cache-0.4.3.tgz#9b1a6c35d4d0dfcef5711ae651e8e9d3d7114145"
+  dependencies:
+    is-equal-shallow "^0.1.3"
+    is-primitive "^2.0.0"
+
+regexpu@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/regexpu/-/regexpu-1.3.0.tgz#e534dc991a9e5846050c98de6d7dd4a55c9ea16d"
+  dependencies:
+    esprima "^2.6.0"
+    recast "^0.10.10"
+    regenerate "^1.2.1"
+    regjsgen "^0.2.0"
+    regjsparser "^0.1.4"
+
+regjsgen@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.2.0.tgz#6c016adeac554f75823fe37ac05b92d5a4edb1f7"
+
+regjsparser@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.1.5.tgz#7ee8f84dc6fa792d3fd0ae228d24bd949ead205c"
+  dependencies:
+    jsesc "~0.5.0"
+
+remove-trailing-separator@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/remove-trailing-separator/-/remove-trailing-separator-1.0.1.tgz#615ebb96af559552d4bf4057c8436d486ab63cc4"
+
+repeat-element@^1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/repeat-element/-/repeat-element-1.1.2.tgz#ef089a178d1483baae4d93eb98b4f9e4e11d990a"
+
+repeat-string@^1.5.2:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637"
+
+repeating@^1.1.0, repeating@^1.1.2:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/repeating/-/repeating-1.1.3.tgz#3d4114218877537494f97f77f9785fab810fa4ac"
+  dependencies:
+    is-finite "^1.0.0"
+
+repeating@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/repeating/-/repeating-2.0.1.tgz#5214c53a926d3552707527fbab415dbc08d06dda"
+  dependencies:
+    is-finite "^1.0.0"
+
+request@2, request@^2.27.0, request@^2.61.0, request@^2.74.0:
+  version "2.81.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.81.0.tgz#c6928946a0e06c5f8d6f8a9333469ffda46298a0"
+  dependencies:
+    aws-sign2 "~0.6.0"
+    aws4 "^1.2.1"
+    caseless "~0.12.0"
+    combined-stream "~1.0.5"
+    extend "~3.0.0"
+    forever-agent "~0.6.1"
+    form-data "~2.1.1"
+    har-validator "~4.2.1"
+    hawk "~3.1.3"
+    http-signature "~1.1.0"
+    is-typedarray "~1.0.0"
+    isstream "~0.1.2"
+    json-stringify-safe "~5.0.1"
+    mime-types "~2.1.7"
+    oauth-sign "~0.8.1"
+    performance-now "^0.2.0"
+    qs "~6.4.0"
+    safe-buffer "^5.0.1"
+    stringstream "~0.0.4"
+    tough-cookie "~2.3.0"
+    tunnel-agent "^0.6.0"
+    uuid "^3.0.0"
+
+request@~2.74.0:
+  version "2.74.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.74.0.tgz#7693ca768bbb0ea5c8ce08c084a45efa05b892ab"
+  dependencies:
+    aws-sign2 "~0.6.0"
+    aws4 "^1.2.1"
+    bl "~1.1.2"
+    caseless "~0.11.0"
+    combined-stream "~1.0.5"
+    extend "~3.0.0"
+    forever-agent "~0.6.1"
+    form-data "~1.0.0-rc4"
+    har-validator "~2.0.6"
+    hawk "~3.1.3"
+    http-signature "~1.1.0"
+    is-typedarray "~1.0.0"
+    isstream "~0.1.2"
+    json-stringify-safe "~5.0.1"
+    mime-types "~2.1.7"
+    node-uuid "~1.4.7"
+    oauth-sign "~0.8.1"
+    qs "~6.2.0"
+    stringstream "~0.0.4"
+    tough-cookie "~2.3.0"
+    tunnel-agent "~0.4.1"
+
+require-directory@^2.1.1:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42"
+
+require-main-filename@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-1.0.1.tgz#97f717b69d48784f5f526a6c5aa8ffdda055a4d1"
+
+requires-port@1.x.x:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/requires-port/-/requires-port-1.0.0.tgz#925d2601d39ac485e091cf0da5c6e694dc3dcaff"
+
+resolve@^1.1.2, resolve@^1.1.6:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.3.3.tgz#655907c3469a8680dc2de3a275a8fdd69691f0e5"
+  dependencies:
+    path-parse "^1.0.5"
+
+retry@^0.10.0, retry@~0.10.0:
+  version "0.10.1"
+  resolved "https://registry.yarnpkg.com/retry/-/retry-0.10.1.tgz#e76388d217992c252750241d3d3956fed98d8ff4"
+
+right-align@^0.1.1:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/right-align/-/right-align-0.1.3.tgz#61339b722fe6a3515689210d24e14c96148613ef"
+  dependencies:
+    align-text "^0.1.1"
+
+rimraf@2, rimraf@^2.3.4, rimraf@^2.4.3, rimraf@^2.5.2, rimraf@~2.5.4:
+  version "2.5.4"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.5.4.tgz#96800093cbf1a0c86bd95b4625467535c29dfa04"
+  dependencies:
+    glob "^7.0.5"
+
+rimraf@2.3.2, rimraf@^2.2.8:
+  version "2.3.2"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.3.2.tgz#7304bd9275c401b89103b106b3531c1ef0c02fe9"
+  dependencies:
+    glob "^4.4.2"
+
+rimraf@^2.5.3, rimraf@^2.5.4, rimraf@^2.6.1:
+  version "2.6.1"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.1.tgz#c2338ec643df7a1b7fe5c54fa86f57428a55f33d"
+  dependencies:
+    glob "^7.0.5"
+
+rimraf@~2.2.6:
+  version "2.2.8"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.2.8.tgz#e439be2aaee327321952730f99a8929e4fc50582"
+
+rsvp@^3.0.14, rsvp@^3.0.16, rsvp@^3.0.17, rsvp@^3.0.18, rsvp@^3.0.6:
+  version "3.5.0"
+  resolved "https://registry.yarnpkg.com/rsvp/-/rsvp-3.5.0.tgz#a62c573a4ae4e1dfd0697ebc6242e79c681eaa34"
+
+rsvp@~3.2.1:
+  version "3.2.1"
+  resolved "https://registry.yarnpkg.com/rsvp/-/rsvp-3.2.1.tgz#07cb4a5df25add9e826ebc67dcc9fd89db27d84a"
+
+runforcover@~0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/runforcover/-/runforcover-0.0.2.tgz#344f057d8d45d33aebc6cc82204678f69c4857cc"
+  dependencies:
+    bunker "0.1.X"
+
+safe-buffer@^5.0.1:
+  version "5.0.1"
+  resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.0.1.tgz#d263ca54696cd8a306b5ca6551e92de57918fbe7"
+
+sander@^0.2.1:
+  version "0.2.4"
+  resolved "https://registry.yarnpkg.com/sander/-/sander-0.2.4.tgz#1dc6ba6acf7969a4304ef7f1349081a57fe3502f"
+  dependencies:
+    es6-promise "^2.0.0"
+    graceful-fs "^3.0.4"
+    mkdirp "^0.5.0"
+    rimraf "^2.2.8"
+
+sane@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/sane/-/sane-1.0.1.tgz#a617a674445c5ca9abdcc235b563ec5e21e4f661"
+  dependencies:
+    fb-watchman "0.0.0"
+    minimatch "~0.2.14"
+    walker "~1.0.5"
+    watch "~0.10.0"
+
+sane@^1.1.1:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/sane/-/sane-1.6.0.tgz#9610c452307a135d29c1fdfe2547034180c46775"
+  dependencies:
+    anymatch "^1.3.0"
+    exec-sh "^0.2.0"
+    fb-watchman "^1.8.0"
+    minimatch "^3.0.2"
+    minimist "^1.1.1"
+    walker "~1.0.5"
+    watch "~0.10.0"
+
+sanitize-filename@^1.5.3:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/sanitize-filename/-/sanitize-filename-1.6.1.tgz#612da1c96473fa02dccda92dcd5b4ab164a6772a"
+  dependencies:
+    truncate-utf8-bytes "^1.0.0"
+
+sass-graph@^2.1.1:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/sass-graph/-/sass-graph-2.2.2.tgz#f4d6c95b546ea2a09d14176d0fc1a07ee2b48354"
+  dependencies:
+    glob "^7.0.0"
+    lodash "^4.0.0"
+    scss-tokenizer "^0.2.1"
+    yargs "^6.6.0"
+
+scss-tokenizer@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/scss-tokenizer/-/scss-tokenizer-0.2.1.tgz#07c0cc577bb7ab4d08fd900185adbf4bc844141d"
+  dependencies:
+    js-base64 "^2.1.8"
+    source-map "^0.4.2"
+
+"semver@2 >=2.2.1 || 3.x || 4 || 5", "semver@2 || 3 || 4 || 5", "semver@2.x || 3.x || 4 || 5", "semver@4 || 5", "semver@^2.3.0 || 3.x || 4 || 5", semver@^5.3.0, semver@~5.3.0:
+  version "5.3.0"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-5.3.0.tgz#9b2ce5d3de02d17c6012ad326aa6b4d0cf54f94f"
+
+semver@^4.3.1:
+  version "4.3.6"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-4.3.6.tgz#300bc6e0e86374f7ba61068b5b1ecd57fc6532da"
+
+semver@~5.1.0:
+  version "5.1.1"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-5.1.1.tgz#a3292a373e6f3e0798da0b20641b9a9c5bc47e19"
+
+send@0.15.1:
+  version "0.15.1"
+  resolved "https://registry.yarnpkg.com/send/-/send-0.15.1.tgz#8a02354c26e6f5cca700065f5f0cdeba90ec7b5f"
+  dependencies:
+    debug "2.6.1"
+    depd "~1.1.0"
+    destroy "~1.0.4"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    etag "~1.8.0"
+    fresh "0.5.0"
+    http-errors "~1.6.1"
+    mime "1.3.4"
+    ms "0.7.2"
+    on-finished "~2.3.0"
+    range-parser "~1.2.0"
+    statuses "~1.3.1"
+
+serve-static@1.12.1:
+  version "1.12.1"
+  resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.12.1.tgz#7443a965e3ced647aceb5639fa06bf4d1bbe0039"
+  dependencies:
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    parseurl "~1.3.1"
+    send "0.15.1"
+
+set-blocking@^2.0.0, set-blocking@~2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7"
+
+setprototypeof@1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.0.3.tgz#66567e37043eeb4f04d91bd658c0cbefb55b8e04"
+
+sha@~2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/sha/-/sha-2.0.1.tgz#6030822fbd2c9823949f8f72ed6411ee5cf25aae"
+  dependencies:
+    graceful-fs "^4.1.2"
+    readable-stream "^2.0.2"
+
+shebang-regex@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-1.0.0.tgz#da42f49740c0b42db2ca9728571cb190c98efea3"
+
+shelljs@0.3.x:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/shelljs/-/shelljs-0.3.0.tgz#3596e6307a781544f591f37da618360f31db57b1"
+
+sigmund@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/sigmund/-/sigmund-1.0.1.tgz#3ff21f198cad2175f9f3b781853fd94d0d19b590"
+
+signal-exit@^3.0.0:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.2.tgz#b5fdc08f1287ea1178628e415e25132b73646c6d"
+
+simple-fmt@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/simple-fmt/-/simple-fmt-0.1.0.tgz#191bf566a59e6530482cb25ab53b4a8dc85c3a6b"
+
+simple-is@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/simple-is/-/simple-is-0.2.0.tgz#2abb75aade39deb5cc815ce10e6191164850baf0"
+
+slash@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/slash/-/slash-1.0.0.tgz#c41f2f6c39fc16d1cd17ad4b5d896114ae470d55"
+
+slide@*, slide@^1.1.3, slide@^1.1.5, slide@~1.1.3, slide@~1.1.6:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/slide/-/slide-1.1.6.tgz#56eb027d65b4d2dce6cb2e2d32c4d4afc9e1d707"
+
+sntp@1.x.x:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/sntp/-/sntp-1.0.9.tgz#6541184cc90aeea6c6e7b35e2659082443c66198"
+  dependencies:
+    hoek "2.x.x"
+
+socket.io-adapter@0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/socket.io-adapter/-/socket.io-adapter-0.5.0.tgz#cb6d4bb8bec81e1078b99677f9ced0046066bb8b"
+  dependencies:
+    debug "2.3.3"
+    socket.io-parser "2.3.1"
+
+socket.io-client@1.7.3:
+  version "1.7.3"
+  resolved "https://registry.yarnpkg.com/socket.io-client/-/socket.io-client-1.7.3.tgz#b30e86aa10d5ef3546601c09cde4765e381da377"
+  dependencies:
+    backo2 "1.0.2"
+    component-bind "1.0.0"
+    component-emitter "1.2.1"
+    debug "2.3.3"
+    engine.io-client "1.8.3"
+    has-binary "0.1.7"
+    indexof "0.0.1"
+    object-component "0.0.3"
+    parseuri "0.0.5"
+    socket.io-parser "2.3.1"
+    to-array "0.1.4"
+
+socket.io-parser@2.3.1:
+  version "2.3.1"
+  resolved "https://registry.yarnpkg.com/socket.io-parser/-/socket.io-parser-2.3.1.tgz#dd532025103ce429697326befd64005fcfe5b4a0"
+  dependencies:
+    component-emitter "1.1.2"
+    debug "2.2.0"
+    isarray "0.0.1"
+    json3 "3.3.2"
+
+socket.io@^1.3.4:
+  version "1.7.3"
+  resolved "https://registry.yarnpkg.com/socket.io/-/socket.io-1.7.3.tgz#b8af9caba00949e568e369f1327ea9be9ea2461b"
+  dependencies:
+    debug "2.3.3"
+    engine.io "1.8.3"
+    has-binary "0.1.7"
+    object-assign "4.1.0"
+    socket.io-adapter "0.5.0"
+    socket.io-client "1.7.3"
+    socket.io-parser "2.3.1"
+
+sorted-object@~2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/sorted-object/-/sorted-object-2.0.1.tgz#7d631f4bd3a798a24af1dffcfbfe83337a5df5fc"
+
+source-map-support@^0.2.10:
+  version "0.2.10"
+  resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.2.10.tgz#ea5a3900a1c1cb25096a0ae8cc5c2b4b10ded3dc"
+  dependencies:
+    source-map "0.1.32"
+
+source-map-url@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/source-map-url/-/source-map-url-0.3.0.tgz#7ecaf13b57bcd09da8a40c5d269db33799d4aaf9"
+
+source-map@0.1.32:
+  version "0.1.32"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.32.tgz#c8b6c167797ba4740a8ea33252162ff08591b266"
+  dependencies:
+    amdefine ">=0.0.4"
+
+source-map@^0.4.2, source-map@~0.4.2:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.4.4.tgz#eba4f5da9c0dc999de68032d8b4f76173652036b"
+  dependencies:
+    amdefine ">=0.0.4"
+
+source-map@^0.5.0, source-map@~0.5.0, source-map@~0.5.1:
+  version "0.5.6"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.6.tgz#75ce38f52bf0733c5a7f0c118d81334a2bb5f412"
+
+source-map@~0.1.40, source-map@~0.1.7:
+  version "0.1.43"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.43.tgz#c24bc146ca517c1471f5dacbe2571b2b7f9e3346"
+  dependencies:
+    amdefine ">=0.0.4"
+
+spdx-correct@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-1.0.2.tgz#4b3073d933ff51f3912f03ac5519498a4150db40"
+  dependencies:
+    spdx-license-ids "^1.0.2"
+
+spdx-expression-parse@~1.0.0:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-1.0.4.tgz#9bdf2f20e1f40ed447fbe273266191fced51626c"
+
+spdx-license-ids@^1.0.2, spdx-license-ids@~1.2.2:
+  version "1.2.2"
+  resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-1.2.2.tgz#c9df7a3424594ade6bd11900d596696dc06bac57"
+
+sprintf-js@^1.0.3, sprintf-js@~1.0.2:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c"
+
+sshpk@^1.7.0:
+  version "1.13.0"
+  resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.13.0.tgz#ff2a3e4fd04497555fed97b39a0fd82fafb3a33c"
+  dependencies:
+    asn1 "~0.2.3"
+    assert-plus "^1.0.0"
+    dashdash "^1.12.0"
+    getpass "^0.1.1"
+  optionalDependencies:
+    bcrypt-pbkdf "^1.0.0"
+    ecc-jsbn "~0.1.1"
+    jodid25519 "^1.0.0"
+    jsbn "~0.1.0"
+    tweetnacl "~0.14.0"
+
+stable@~0.1.3:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/stable/-/stable-0.1.6.tgz#910f5d2aed7b520c6e777499c1f32e139fdecb10"
+
+"statuses@>= 1.3.1 < 2", statuses@~1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.3.1.tgz#faf51b9eb74aaef3b3acf4ad5f61abf24cb7b93e"
+
+string-width@^1.0.1, string-width@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3"
+  dependencies:
+    code-point-at "^1.0.0"
+    is-fullwidth-code-point "^1.0.0"
+    strip-ansi "^3.0.0"
+
+string_decoder@~0.10.x:
+  version "0.10.31"
+  resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-0.10.31.tgz#62e203bc41766c6c28c9fc84301dab1c5310fa94"
+
+string_decoder@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.0.0.tgz#f06f41157b664d86069f84bdbdc9b0d8ab281667"
+  dependencies:
+    buffer-shims "~1.0.0"
+
+stringmap@~0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/stringmap/-/stringmap-0.2.2.tgz#556c137b258f942b8776f5b2ef582aa069d7d1b1"
+
+stringset@~0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/stringset/-/stringset-0.2.1.tgz#ef259c4e349344377fcd1c913dd2e848c9c042b5"
+
+stringstream@~0.0.4:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/stringstream/-/stringstream-0.0.5.tgz#4e484cd4de5a0bbbee18e46307710a8a81621878"
+
+strip-ansi@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-0.3.0.tgz#25f48ea22ca79187f3174a4db8759347bb126220"
+  dependencies:
+    ansi-regex "^0.2.1"
+
+strip-ansi@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-2.0.1.tgz#df62c1aa94ed2f114e1d0f21fd1d50482b79a60e"
+  dependencies:
+    ansi-regex "^1.0.0"
+
+strip-ansi@^3.0.0, strip-ansi@^3.0.1, strip-ansi@~3.0.1:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf"
+  dependencies:
+    ansi-regex "^2.0.0"
+
+strip-ansi@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-0.1.1.tgz#39e8a98d044d150660abe4a6808acf70bb7bc991"
+
+strip-bom@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-2.0.0.tgz#6219a85616520491f35788bdbf1447a99c7e6b0e"
+  dependencies:
+    is-utf8 "^0.2.0"
+
+strip-indent@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/strip-indent/-/strip-indent-1.0.1.tgz#0c7962a6adefa7bbd4ac366460a638552ae1a0a2"
+  dependencies:
+    get-stdin "^4.0.1"
+
+strip-json-comments@1.0.x:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-1.0.4.tgz#1e15fbcac97d3ee99bf2d73b4c656b082bbafb91"
+
+styled_string@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/styled_string/-/styled_string-0.0.1.tgz#d22782bd81295459bc4f1df18c4bad8e94dd124a"
+
+supports-color@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-0.2.0.tgz#d92de2694eb3f67323973d7ae3d8b55b4c22190a"
+
+supports-color@^1.3.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-1.3.1.tgz#15758df09d8ff3b4acc307539fabe27095e1042d"
+
+supports-color@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7"
+
+symlink-or-copy@^1.0.0, symlink-or-copy@^1.0.1, symlink-or-copy@^1.1.8:
+  version "1.1.8"
+  resolved "https://registry.yarnpkg.com/symlink-or-copy/-/symlink-or-copy-1.1.8.tgz#cabe61e0010c1c023c173b25ee5108b37f4b4aa3"
+
+tap@^0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/tap/-/tap-0.6.0.tgz#75557d8285b87aef61902b68fb782fa4dc65edd4"
+  dependencies:
+    buffer-equal "~0.0.0"
+    deep-equal "^1.0.0"
+    difflet "~0.2.0"
+    glob "^4.3.5"
+    inherits "*"
+    mkdirp "^0.5.0"
+    nopt "^3.0.1"
+    runforcover "~0.0.2"
+    slide "*"
+    yamlish "*"
+
+tar@^2.0.0, tar@~2.2.1:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/tar/-/tar-2.2.1.tgz#8e4d2a256c0e2185c6b18ad694aec968b83cb1d1"
+  dependencies:
+    block-stream "*"
+    fstream "^1.0.2"
+    inherits "2"
+
+temp@0.8.1:
+  version "0.8.1"
+  resolved "https://registry.yarnpkg.com/temp/-/temp-0.8.1.tgz#4b7b4ffde85bb09f2dd6ba6cc43b44213c94fd3a"
+  dependencies:
+    rimraf "~2.2.6"
+
+testem@^0.7.6:
+  version "0.7.7"
+  resolved "https://registry.yarnpkg.com/testem/-/testem-0.7.7.tgz#b548d80e5b48b64c32a8eeaa0cdda6a7853e915a"
+  dependencies:
+    async "^0.9.0"
+    backbone "^1.1.2"
+    charm "^1.0.0"
+    colors "^1.0.3"
+    commander "^2.6.0"
+    consolidate "^0.11.0"
+    cross-spawn "^0.2.6"
+    did_it_work "0.0.6"
+    express "^4.10.7"
+    fileset "^0.1.5"
+    fireworm "^0.6.6"
+    glob "^4.3.5"
+    growl "^1.8.1"
+    http-proxy "^1.8.1"
+    js-yaml "^3.2.5"
+    mkdirp "^0.5.0"
+    mustache "^1.0.0"
+    npmlog "^1.0.0"
+    rimraf "^2.2.8"
+    socket.io "^1.3.4"
+    styled_string "0.0.1"
+    tap "^0.6.0"
+    xmldom "^0.1.19"
+
+text-table@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4"
+
+"textextensions@1 || 2":
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/textextensions/-/textextensions-2.1.0.tgz#1be0dc2a0dc244d44be8a09af6a85afb93c4dbc3"
+
+through@^2.3.6, through@~2.3.4, through@~2.3.8:
+  version "2.3.8"
+  resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5"
+
+timers-ext@0.1:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/timers-ext/-/timers-ext-0.1.2.tgz#61cc47a76c1abd3195f14527f978d58ae94c5204"
+  dependencies:
+    es5-ext "~0.10.14"
+    next-tick "1"
+
+tiny-lr@0.1.5, tiny-lr@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/tiny-lr/-/tiny-lr-0.1.5.tgz#2a0c8f482e87dcd19c0bb2714905510a6a6d0024"
+  dependencies:
+    body-parser "~1.8.0"
+    debug "~2.0.0"
+    faye-websocket "~0.7.2"
+    livereload-js "^2.2.0"
+    parseurl "~1.3.0"
+    qs "~2.2.3"
+
+tmp@0.0.28:
+  version "0.0.28"
+  resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.28.tgz#172735b7f614ea7af39664fa84cf0de4e515d120"
+  dependencies:
+    os-tmpdir "~1.0.1"
+
+tmpl@1.0.x:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/tmpl/-/tmpl-1.0.4.tgz#23640dd7b42d00433911140820e5cf440e521dd1"
+
+to-array@0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/to-array/-/to-array-0.1.4.tgz#17e6c11f73dd4f3d74cda7a4ff3238e9ad9bf890"
+
+to-fast-properties@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-1.0.2.tgz#f3f5c0c3ba7299a7ef99427e44633257ade43320"
+
+tough-cookie@~2.3.0:
+  version "2.3.2"
+  resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.3.2.tgz#f081f76e4c85720e6c37a5faced737150d84072a"
+  dependencies:
+    punycode "^1.4.1"
+
+traverse@0.6.x:
+  version "0.6.6"
+  resolved "https://registry.yarnpkg.com/traverse/-/traverse-0.6.6.tgz#cbdf560fd7b9af632502fed40f918c157ea97137"
+
+traverse@~0.5.1:
+  version "0.5.2"
+  resolved "https://registry.yarnpkg.com/traverse/-/traverse-0.5.2.tgz#e203c58d5f7f0e37db6e74c0acb929bb09b61d85"
+
+trim-newlines@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/trim-newlines/-/trim-newlines-1.0.0.tgz#5887966bb582a4503a41eb524f7d35011815a613"
+
+trim-right@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/trim-right/-/trim-right-1.0.1.tgz#cb2e1203067e0c8de1f614094b9fe45704ea6003"
+
+truncate-utf8-bytes@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/truncate-utf8-bytes/-/truncate-utf8-bytes-1.0.2.tgz#405923909592d56f78a5818434b0b78489ca5f2b"
+  dependencies:
+    utf8-byte-length "^1.0.1"
+
+try-resolve@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/try-resolve/-/try-resolve-1.0.1.tgz#cfde6fabd72d63e5797cfaab873abbe8e700e912"
+
+tryor@~0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/tryor/-/tryor-0.1.2.tgz#8145e4ca7caff40acde3ccf946e8b8bb75b4172b"
+
+tunnel-agent@^0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd"
+  dependencies:
+    safe-buffer "^5.0.1"
+
+tunnel-agent@~0.4.1:
+  version "0.4.3"
+  resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.4.3.tgz#6373db76909fe570e08d73583365ed828a74eeeb"
+
+tweetnacl@^0.14.3, tweetnacl@~0.14.0:
+  version "0.14.5"
+  resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64"
+
+type-is@~1.5.1:
+  version "1.5.7"
+  resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.5.7.tgz#b9368a593cc6ef7d0645e78b2f4c64cbecd05e90"
+  dependencies:
+    media-typer "0.3.0"
+    mime-types "~2.0.9"
+
+type-is@~1.6.14:
+  version "1.6.15"
+  resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.15.tgz#cab10fb4909e441c82842eafe1ad646c81804410"
+  dependencies:
+    media-typer "0.3.0"
+    mime-types "~2.1.15"
+
+typedarray@^0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777"
+
+uc.micro@^1.0.0:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/uc.micro/-/uc.micro-1.0.3.tgz#7ed50d5e0f9a9fb0a573379259f2a77458d50192"
+
+uglify-js@^2.4.16:
+  version "2.8.22"
+  resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-2.8.22.tgz#d54934778a8da14903fa29a326fb24c0ab51a1a0"
+  dependencies:
+    source-map "~0.5.1"
+    yargs "~3.10.0"
+  optionalDependencies:
+    uglify-to-browserify "~1.0.0"
+
+uglify-js@~1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-1.1.1.tgz#ee71a97c4cefd06a1a9b20437f34118982aa035b"
+
+uglify-js@~2.3:
+  version "2.3.6"
+  resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-2.3.6.tgz#fa0984770b428b7a9b2a8058f46355d14fef211a"
+  dependencies:
+    async "~0.2.6"
+    optimist "~0.3.5"
+    source-map "~0.1.7"
+
+uglify-to-browserify@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/uglify-to-browserify/-/uglify-to-browserify-1.0.2.tgz#6e0924d6bda6b5afe349e39a6d632850a0f882b7"
+
+uid-number@0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/uid-number/-/uid-number-0.0.6.tgz#0ea10e8035e8eb5b8e4449f06da1c730663baa81"
+
+ultron@1.0.x:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/ultron/-/ultron-1.0.2.tgz#ace116ab557cd197386a4e88f4685378c8b2e4fa"
+
+umask@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/umask/-/umask-1.1.0.tgz#f29cebf01df517912bb58ff9c4e50fde8e33320d"
+
+underscore.string@~2.3.3:
+  version "2.3.3"
+  resolved "https://registry.yarnpkg.com/underscore.string/-/underscore.string-2.3.3.tgz#71c08bf6b428b1133f37e78fa3a21c82f7329b0d"
+
+underscore.string@~3.3.4:
+  version "3.3.4"
+  resolved "https://registry.yarnpkg.com/underscore.string/-/underscore.string-3.3.4.tgz#2c2a3f9f83e64762fdc45e6ceac65142864213db"
+  dependencies:
+    sprintf-js "^1.0.3"
+    util-deprecate "^1.0.2"
+
+underscore@>=1.8.3:
+  version "1.8.3"
+  resolved "https://registry.yarnpkg.com/underscore/-/underscore-1.8.3.tgz#4f3fb53b106e6097fcf9cb4109f2a5e9bdfa5022"
+
+unpipe@1.0.0, unpipe@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec"
+
+user-home@^1.0.0, user-home@^1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/user-home/-/user-home-1.1.1.tgz#2b5be23a32b63a7c9deb8d0f28d485724a3df190"
+
+utf8-byte-length@^1.0.1:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/utf8-byte-length/-/utf8-byte-length-1.0.4.tgz#f45f150c4c66eee968186505ab93fcbb8ad6bf61"
+
+util-deprecate@^1.0.2, util-deprecate@~1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf"
+
+util-extend@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/util-extend/-/util-extend-1.0.3.tgz#a7c216d267545169637b3b6edc6ca9119e2ff93f"
+
+utils-merge@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.0.tgz#0294fb922bb9375153541c4f7096231f287c8af8"
+
+uuid@^2.0.1:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/uuid/-/uuid-2.0.3.tgz#67e2e863797215530dff318e5bf9dcebfd47b21a"
+
+uuid@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.0.1.tgz#6544bba2dfda8c1cf17e629a3a305e2bb1fee6c1"
+
+validate-npm-package-license@^3.0.1, validate-npm-package-license@~3.0.1:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.1.tgz#2804babe712ad3379459acfbe24746ab2c303fbc"
+  dependencies:
+    spdx-correct "~1.0.0"
+    spdx-expression-parse "~1.0.0"
+
+validate-npm-package-name@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/validate-npm-package-name/-/validate-npm-package-name-3.0.0.tgz#5fa912d81eb7d0c74afc140de7317f0ca7df437e"
+  dependencies:
+    builtins "^1.0.3"
+
+validate-npm-package-name@~2.2.2:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/validate-npm-package-name/-/validate-npm-package-name-2.2.2.tgz#f65695b22f7324442019a3c7fa39a6e7fd299085"
+  dependencies:
+    builtins "0.0.7"
+
+vary@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.1.tgz#67535ebb694c1d52257457984665323f587e8d37"
+
+verror@1.3.6:
+  version "1.3.6"
+  resolved "https://registry.yarnpkg.com/verror/-/verror-1.3.6.tgz#cff5df12946d297d2baaefaa2689e25be01c005c"
+  dependencies:
+    extsprintf "1.0.2"
+
+vlq@^0.2.0:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/vlq/-/vlq-0.2.2.tgz#e316d5257b40b86bb43cb8d5fea5d7f54d6b0ca1"
+
+walk-sync@0.1.3, walk-sync@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/walk-sync/-/walk-sync-0.1.3.tgz#8a07261a00bda6cfb1be25e9f100fad57546f583"
+
+walk-sync@^0.2.5:
+  version "0.2.7"
+  resolved "https://registry.yarnpkg.com/walk-sync/-/walk-sync-0.2.7.tgz#b49be4ee6867657aeb736978b56a29d10fa39969"
+  dependencies:
+    ensure-posix-path "^1.0.0"
+    matcher-collection "^1.0.0"
+
+walk-sync@^0.3.0, walk-sync@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/walk-sync/-/walk-sync-0.3.1.tgz#558a16aeac8c0db59c028b73c66f397684ece465"
+  dependencies:
+    ensure-posix-path "^1.0.0"
+    matcher-collection "^1.0.0"
+
+walker@~1.0.5:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/walker/-/walker-1.0.7.tgz#2f7f9b8fd10d677262b18a884e28d19618e028fb"
+  dependencies:
+    makeerror "1.0.x"
+
+watch@~0.10.0:
+  version "0.10.0"
+  resolved "https://registry.yarnpkg.com/watch/-/watch-0.10.0.tgz#77798b2da0f9910d595f1ace5b0c2258521f21dc"
+
+wcwidth@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/wcwidth/-/wcwidth-1.0.1.tgz#f0b0dcf915bc5ff1528afadb2c0e17b532da2fe8"
+  dependencies:
+    defaults "^1.0.3"
+
+websocket-driver@>=0.3.6:
+  version "0.6.5"
+  resolved "https://registry.yarnpkg.com/websocket-driver/-/websocket-driver-0.6.5.tgz#5cb2556ceb85f4373c6d8238aa691c8454e13a36"
+  dependencies:
+    websocket-extensions ">=0.1.1"
+
+websocket-extensions@>=0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/websocket-extensions/-/websocket-extensions-0.1.1.tgz#76899499c184b6ef754377c2dbb0cd6cb55d29e7"
+
+which-module@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/which-module/-/which-module-1.0.0.tgz#bba63ca861948994ff307736089e3b96026c2a4f"
+
+which@1, which@^1.2.9, which@~1.2.11:
+  version "1.2.14"
+  resolved "https://registry.yarnpkg.com/which/-/which-1.2.14.tgz#9a87c4378f03e827cecaf1acdf56c736c01c14e5"
+  dependencies:
+    isexe "^2.0.0"
+
+wide-align@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.0.tgz#40edde802a71fea1f070da3e62dcda2e7add96ad"
+  dependencies:
+    string-width "^1.0.1"
+
+window-size@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.0.tgz#5438cd2ea93b202efa3a19fe8887aee7c94f9c9d"
+
+window-size@^0.1.2:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.4.tgz#f8e1aa1ee5a53ec5bf151ffa09742a6ad7697876"
+
+wordwrap@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.2.tgz#b79669bb42ecb409f83d583cad52ca17eaa1643f"
+
+wordwrap@~0.0.2:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.3.tgz#a3d5da6cd5c0bc0008d37234bbaf1bed63059107"
+
+wrap-ansi@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-2.1.0.tgz#d8fc3d284dd05794fe84973caecdd1cf824fdd85"
+  dependencies:
+    string-width "^1.0.1"
+    strip-ansi "^3.0.1"
+
+wrappy@1, wrappy@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f"
+
+write-file-atomic@~1.1.4:
+  version "1.1.4"
+  resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-1.1.4.tgz#b1f52dc2e8dc0e3cb04d187a25f758a38a90ca3b"
+  dependencies:
+    graceful-fs "^4.1.2"
+    imurmurhash "^0.1.4"
+    slide "^1.1.5"
+
+ws@1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/ws/-/ws-1.1.2.tgz#8a244fa052401e08c9886cf44a85189e1fd4067f"
+  dependencies:
+    options ">=0.0.5"
+    ultron "1.0.x"
+
+wtf-8@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/wtf-8/-/wtf-8-1.0.0.tgz#392d8ba2d0f1c34d1ee2d630f15d0efb68e1048a"
+
+xdg-basedir@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-1.0.1.tgz#14ff8f63a4fdbcb05d5b6eea22b36f3033b9f04e"
+  dependencies:
+    user-home "^1.0.0"
+
+xmldom@^0.1.19:
+  version "0.1.27"
+  resolved "https://registry.yarnpkg.com/xmldom/-/xmldom-0.1.27.tgz#d501f97b3bdb403af8ef9ecc20573187aadac0e9"
+
+xmlhttprequest-ssl@1.5.3:
+  version "1.5.3"
+  resolved "https://registry.yarnpkg.com/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.5.3.tgz#185a888c04eca46c3e4070d99f7b49de3528992d"
+
+xtend@^4.0.0:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.1.tgz#a5c6d532be656e23db820efb943a1f04998d63af"
+
+y18n@^3.2.0, y18n@^3.2.1:
+  version "3.2.1"
+  resolved "https://registry.yarnpkg.com/y18n/-/y18n-3.2.1.tgz#6d15fba884c08679c0d77e88e7759e811e07fa41"
+
+yallist@^2.0.0:
+  version "2.1.2"
+  resolved "https://registry.yarnpkg.com/yallist/-/yallist-2.1.2.tgz#1c11f9218f076089a47dd512f93c6699a6a81d52"
+
+yam@0.0.18:
+  version "0.0.18"
+  resolved "https://registry.yarnpkg.com/yam/-/yam-0.0.18.tgz#e5cab771f0fc80ca599814cb9c269cb8bff00e2c"
+  dependencies:
+    findup "^0.1.5"
+    fs-extra "^0.16.3"
+    lodash.merge "^3.0.2"
+
+yamlish@*:
+  version "0.0.7"
+  resolved "https://registry.yarnpkg.com/yamlish/-/yamlish-0.0.7.tgz#b4af9a1dcc63618873c3d6e451ec3213c39a57fb"
+
+yargs-parser@^4.2.0:
+  version "4.2.1"
+  resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-4.2.1.tgz#29cceac0dc4f03c6c87b4a9f217dd18c9f74871c"
+  dependencies:
+    camelcase "^3.0.0"
+
+yargs@^6.6.0:
+  version "6.6.0"
+  resolved "https://registry.yarnpkg.com/yargs/-/yargs-6.6.0.tgz#782ec21ef403345f830a808ca3d513af56065208"
+  dependencies:
+    camelcase "^3.0.0"
+    cliui "^3.2.0"
+    decamelize "^1.1.1"
+    get-caller-file "^1.0.1"
+    os-locale "^1.4.0"
+    read-pkg-up "^1.0.1"
+    require-directory "^2.1.1"
+    require-main-filename "^1.0.1"
+    set-blocking "^2.0.0"
+    string-width "^1.0.2"
+    which-module "^1.0.0"
+    y18n "^3.2.1"
+    yargs-parser "^4.2.0"
+
+yargs@~3.10.0:
+  version "3.10.0"
+  resolved "https://registry.yarnpkg.com/yargs/-/yargs-3.10.0.tgz#f7ee7bd857dd7c1d2d38c0e74efbd681d1431fd1"
+  dependencies:
+    camelcase "^1.0.2"
+    cliui "^2.1.0"
+    decamelize "^1.0.0"
+    window-size "0.1.0"
+
+yargs@~3.27.0:
+  version "3.27.0"
+  resolved "https://registry.yarnpkg.com/yargs/-/yargs-3.27.0.tgz#21205469316e939131d59f2da0c6d7f98221ea40"
+  dependencies:
+    camelcase "^1.2.1"
+    cliui "^2.1.0"
+    decamelize "^1.0.0"
+    os-locale "^1.4.0"
+    window-size "^0.1.2"
+    y18n "^3.2.0"
+
+yeast@0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/yeast/-/yeast-0.1.2.tgz#008e06d8094320c372dbc2f8ed76a0ca6c8ac419"
diff --git a/contrib/views/hive20/pom.xml b/contrib/views/hive20/pom.xml
index a583d23..7ea95ad 100644
--- a/contrib/views/hive20/pom.xml
+++ b/contrib/views/hive20/pom.xml
@@ -278,29 +278,35 @@
       <plugin>
         <groupId>com.github.eirslett</groupId>
         <artifactId>frontend-maven-plugin</artifactId>
-        <version>1.3</version>
+        <version>1.4</version>
         <configuration>
           <nodeVersion>v4.5.0</nodeVersion>
-          <npmVersion>2.15.0</npmVersion>
+          <yarnVersion>v0.23.2</yarnVersion>
           <workingDirectory>src/main/resources/ui/</workingDirectory>
           <npmInheritsProxyConfigFromMaven>false</npmInheritsProxyConfigFromMaven>
+          <!-- setting npm_config_tmp environment variable is a workaround for 
+               https://github.com/Medium/phantomjs/issues/673 -->
+          <environmentVariables>
+            <npm_config_tmp>/tmp/npm_config_tmp</npm_config_tmp>
+          </environmentVariables>
         </configuration>
         <executions>
           <execution>
-            <id>install node and npm</id>
+            <id>install node and yarn</id>
             <phase>generate-sources</phase>
             <goals>
-              <goal>install-node-and-npm</goal>
+              <goal>install-node-and-yarn</goal>
             </goals>
           </execution>
           <execution>
-            <id>npm install</id>
+            <id>yarn install --pure-lockfile</id>
             <phase>generate-sources</phase>
             <goals>
-              <goal>npm</goal>
+              <goal>yarn</goal>
             </goals>
             <configuration>
               <arguments>install --python="${project.basedir}/../src/main/unix/ambari-python-wrap" --unsafe-perm</arguments>
+              <arguments>--ignore-engines</arguments>
             </configuration>
           </execution>
         </executions>
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/actor/HiveActor.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/actor/HiveActor.java
index 384b798..b955650 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/actor/HiveActor.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/actor/HiveActor.java
@@ -25,26 +25,22 @@
 
 public abstract class HiveActor extends UntypedActor {
 
-    private final Logger LOG = LoggerFactory.getLogger(getClass());
+  private static final Logger LOG = LoggerFactory.getLogger(HiveActor.class);
 
-    @Override
-    final public void onReceive(Object message) throws Exception {
-        HiveMessage hiveMessage = new HiveMessage(message);
-        if(LOG.isDebugEnabled()){
-            LOG.debug("Received message: " + message.getClass().getName() + ", generated id: " + hiveMessage.getId() +
-                    " sent by: " + sender() + ", recieved by" + self());
-        }
-
-        handleMessage(hiveMessage);
-
-        if(LOG.isDebugEnabled()){
-            LOG.debug("Message submitted: " + hiveMessage.getId());
-
-        }
+  @Override
+  final public void onReceive(Object message) throws Exception {
+    HiveMessage hiveMessage = new HiveMessage(message);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Received message: " + message.getClass().getName() + ", generated id: " + hiveMessage.getId() +
+          " sent by: " + sender() + ", recieved by" + self());
     }
 
-    public abstract void handleMessage(HiveMessage hiveMessage);
+    handleMessage(hiveMessage);
 
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Message submitted: " + hiveMessage.getId());
+    }
+  }
 
-
+  public abstract void handleMessage(HiveMessage hiveMessage);
 }
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/actor/JdbcConnector.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/actor/JdbcConnector.java
index f93ecbf..5a744e2 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/actor/JdbcConnector.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/actor/JdbcConnector.java
@@ -578,12 +578,6 @@
   }
 
   private void checkTerminationInactivity() {
-    if (!isAsync()) {
-      // Should not terminate if job is sync. Will terminate after the job is finished.
-      stopTerminateInactivityScheduler();
-      return;
-    }
-
     LOG.debug("Termination check, executing status: {}", executing);
     if (executing) {
       keepAlive();
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/actor/OperationController.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/actor/OperationController.java
index f751d8f..3b1397c 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/actor/OperationController.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/actor/OperationController.java
@@ -63,7 +63,7 @@
  */
 public class OperationController extends HiveActor {
 
-  private final Logger LOG = LoggerFactory.getLogger(getClass());
+  private static final Logger LOG = LoggerFactory.getLogger(OperationController.class);
 
   private final ActorSystem system;
   private final ActorRef deathWatch;
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/dto/TableStats.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/dto/TableStats.java
index 5d0f307..6af9359 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/dto/TableStats.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/dto/TableStats.java
@@ -34,11 +34,11 @@
 
   private DatabaseMetadataWrapper databaseMetadata;
   private Boolean isTableStatsEnabled;
-  private Integer numFiles;
-  private Integer numRows;
+  private Long numFiles;
+  private Long numRows;
   private String columnStatsAccurate;
-  private Integer rawDataSize;
-  private Integer totalSize;
+  private Long rawDataSize;
+  private Long totalSize;
 
   public Boolean getTableStatsEnabled() {
     return isTableStatsEnabled;
@@ -48,11 +48,11 @@
     isTableStatsEnabled = tableStatsEnabled;
   }
 
-  public Integer getNumFiles() {
+  public Long getNumFiles() {
     return numFiles;
   }
 
-  public void setNumFiles(Integer numFiles) {
+  public void setNumFiles(Long numFiles) {
     this.numFiles = numFiles;
   }
 
@@ -64,27 +64,27 @@
     this.columnStatsAccurate = columnStatsAccurate;
   }
 
-  public Integer getRawDataSize() {
+  public Long getRawDataSize() {
     return rawDataSize;
   }
 
-  public void setRawDataSize(Integer rawDataSize) {
+  public void setRawDataSize(Long rawDataSize) {
     this.rawDataSize = rawDataSize;
   }
 
-  public Integer getTotalSize() {
+  public Long getTotalSize() {
     return totalSize;
   }
 
-  public void setTotalSize(Integer totalSize) {
+  public void setTotalSize(Long totalSize) {
     this.totalSize = totalSize;
   }
 
-  public Integer getNumRows() {
+  public Long getNumRows() {
     return numRows;
   }
 
-  public void setNumRows(Integer numRows) {
+  public void setNumRows(Long numRows) {
     this.numRows = numRows;
   }
 
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/parsers/TableMetaParserImpl.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/parsers/TableMetaParserImpl.java
index 711cab9..ef1b464 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/parsers/TableMetaParserImpl.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/parsers/TableMetaParserImpl.java
@@ -102,22 +102,22 @@
 
     if(!Strings.isNullOrEmpty(numFiles) && !Strings.isNullOrEmpty(numFiles.trim())){
       tableStats.setTableStatsEnabled(true);
-      tableStats.setNumFiles(Integer.valueOf(numFiles.trim()));
+      tableStats.setNumFiles(Long.valueOf(numFiles.trim()));
     }
 
     if(!Strings.isNullOrEmpty(numRows) && !Strings.isNullOrEmpty(numRows.trim())){
       tableStats.setTableStatsEnabled(true);
-      tableStats.setNumRows(Integer.valueOf(numRows.trim()));
+      tableStats.setNumRows(Long.valueOf(numRows.trim()));
     }
 
     if(!Strings.isNullOrEmpty(rawDataSize) && !Strings.isNullOrEmpty(rawDataSize.trim())){
       tableStats.setTableStatsEnabled(true);
-      tableStats.setRawDataSize(Integer.valueOf(rawDataSize.trim()));
+      tableStats.setRawDataSize(Long.valueOf(rawDataSize.trim()));
     }
 
     if(!Strings.isNullOrEmpty(totalSize) && !Strings.isNullOrEmpty(totalSize.trim())){
       tableStats.setTableStatsEnabled(true);
-      tableStats.setTotalSize(Integer.valueOf(totalSize.trim()));
+      tableStats.setTotalSize(Long.valueOf(totalSize.trim()));
     }
 
     if(!Strings.isNullOrEmpty(columnStatsAccurate) && !Strings.isNullOrEmpty(columnStatsAccurate.trim())) {
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/query/generators/InsertFromQueryGenerator.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/query/generators/InsertFromQueryGenerator.java
index 8e22fc7..e761425 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/query/generators/InsertFromQueryGenerator.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/query/generators/InsertFromQueryGenerator.java
@@ -18,7 +18,10 @@
 
 package org.apache.ambari.view.hive20.internal.query.generators;
 
+import com.google.common.base.Function;
+import com.google.common.base.Joiner;
 import com.google.common.base.Optional;
+import com.google.common.collect.FluentIterable;
 import org.apache.ambari.view.hive20.client.ColumnDescription;
 import org.apache.ambari.view.hive20.exceptions.ServiceException;
 import org.apache.ambari.view.hive20.internal.dto.ColumnInfo;
@@ -26,6 +29,10 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import javax.annotation.Nullable;
+import java.util.LinkedList;
+import java.util.List;
+
 public class InsertFromQueryGenerator implements QueryGenerator{
   protected final static Logger LOG =
       LoggerFactory.getLogger(InsertFromQueryGenerator.class);
@@ -38,12 +45,34 @@
 
   @Override
   public Optional<String> getQuery() throws ServiceException {
-    StringBuilder insertQuery = new StringBuilder("INSERT INTO TABLE `").append(insertFromQueryInput.getToDatabase()).append('`').append(".")
-        .append("`").append(insertFromQueryInput.getToTable()).append("`")
-        .append(" SELECT ");
+    StringBuilder insertQuery = new StringBuilder();
+    //Dynamic partition strict mode requires at least one static partition column. To turn this off set hive.exec.dynamic.partition.mode=nonstrict
+    insertQuery.append("set hive.exec.dynamic.partition.mode=nonstrict;").append("\n");
 
+    insertQuery.append(" FROM ").append("`").append(insertFromQueryInput.getFromDatabase()).append("`.`")
+        .append(insertFromQueryInput.getFromTable()).append("` tempTable");
+
+    insertQuery.append(" INSERT INTO TABLE `").append(insertFromQueryInput.getToDatabase()).append('`').append(".")
+        .append("`").append(insertFromQueryInput.getToTable()).append("`");
+        // PARTITION (partcol1[=val1], partcol2[=val2] ...)
+        if(insertFromQueryInput.getPartitionedColumns() != null && insertFromQueryInput.getPartitionedColumns().size() > 0){
+          insertQuery.append(" PARTITION ").append("(");
+          insertQuery.append(Joiner.on(",").join(FluentIterable.from(insertFromQueryInput.getPartitionedColumns()).transform(new Function<ColumnInfo, String>() {
+            @Override
+            public String apply(ColumnInfo columnInfo) {
+              return "`" + columnInfo.getName() + "`";
+            }
+          })));
+          insertQuery.append(" ) ");
+        }
+
+    insertQuery.append(" SELECT ");
+
+    List<ColumnInfo> allColumns = new LinkedList<>(insertFromQueryInput.getNormalColumns());
+    // this order matters or first normal columns and in the last partitioned columns matters.
+    allColumns.addAll(insertFromQueryInput.getPartitionedColumns());
     boolean first = true;
-    for(ColumnInfo column : insertFromQueryInput.getHeader()){
+    for(ColumnInfo column : allColumns){
       String type = column.getType();
       boolean unhex = insertFromQueryInput.getUnhexInsert() && (
           ColumnDescription.DataTypes.STRING.toString().equals(type)
@@ -59,6 +88,7 @@
         insertQuery.append("UNHEX(");
       }
 
+      insertQuery.append("tempTable.");
       insertQuery.append('`').append(column.getName()).append('`');
 
       if(unhex) {
@@ -68,8 +98,7 @@
       first = false;
     }
 
-    insertQuery.append(" FROM ").append("`").append(insertFromQueryInput.getFromDatabase()).append(".")
-        .append(insertFromQueryInput.getFromTable()).append("` ").append(";");
+    insertQuery.append(";");
     String query = insertQuery.toString();
     LOG.info("Insert From Query : {}", query);
     return Optional.of(query);
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/jobs/JobService.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/jobs/JobService.java
index f2e4ee9..d99938f 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/jobs/JobService.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/jobs/JobService.java
@@ -181,11 +181,11 @@
    * Get job results in csv format
    */
   @GET
-  @Path("{jobId}/results/csv")
+  @Path("{jobId}/results/csv/{fileName}")
   @Produces("text/csv")
   public Response getResultsCSV(@PathParam("jobId") String jobId,
                                 @Context HttpServletResponse response,
-                                @QueryParam("fileName") String fileName,
+                                @PathParam("fileName") String fileName,
                                 @QueryParam("columns") final String requestedColumns) {
     try {
 
@@ -228,18 +228,12 @@
         }
       };
 
-      if (fileName == null || fileName.isEmpty()) {
-        fileName = "results.csv";
-      }
-
-      return Response.ok(stream).
-          header("Content-Disposition", String.format("attachment; filename=\"%s\"", fileName)).
-          build();
-
-
+      return Response.ok(stream).build();
     } catch (WebApplicationException ex) {
+      LOG.error("Error occurred while downloading result with fileName : {}", fileName ,ex);
       throw ex;
     }  catch (Throwable ex) {
+      LOG.error("Error occurred while downloading result with fileName : {}", fileName ,ex);
       throw new ServiceFormattedException(ex.getMessage(), ex);
     }
   }
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/system/ranger/RangerService.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/system/ranger/RangerService.java
index 816d0e0..a5222ce 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/system/ranger/RangerService.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/system/ranger/RangerService.java
@@ -33,6 +33,7 @@
 import org.slf4j.LoggerFactory;
 
 import javax.inject.Inject;
+import javax.ws.rs.core.Response;
 import java.io.IOException;
 import java.io.InputStream;
 import java.util.ArrayList;
@@ -45,7 +46,10 @@
  */
 public class RangerService {
 
+  public static final String RANGER_HIVE_AUTHORIZER_FACTORY_CLASSNAME = "org.apache.ranger.authorization.hive.authorizer.RangerHiveAuthorizerFactory";
   private static final String RANGER_CONFIG_URL = "/api/v1/clusters/%s/configurations/service_config_versions?service_name=RANGER&is_current=true";
+  public static final String HIVESERVER2_SITE = "hiveserver2-site";
+  public static final String AUTHORIZATION_MANAGER_KEY = "hive.security.authorization.manager";
 
   protected final Logger LOG = LoggerFactory.getLogger(getClass());
 
@@ -74,6 +78,12 @@
   }
 
   private List<Policy> getPoliciesFromAmbariCluster(String database, String table) {
+
+    if (!isHiveRangerPluginEnabled()) {
+      LOG.error("Ranger authorization is not enabled for Hive");
+      throw new RangerException("Ranger authorization is not enabled for Hive", "CONFIGURATION_ERROR", 500);
+    }
+
     String rangerUrl = null;
     try {
       rangerUrl = getRangerUrlFromAmbari();
@@ -119,7 +129,9 @@
     if (parsedResult instanceof JSONObject) {
       JSONObject obj = (JSONObject) parsedResult;
       LOG.error("Bad response from Ranger: {}", rangerResponse);
-      throw new RangerException((String)obj.get("msgDesc"), "RANGER_ERROR", ((Long)obj.get("statusCode")).intValue());
+      int status = ((Long) obj.get("statusCode")).intValue();
+      status = status == Response.Status.UNAUTHORIZED.getStatusCode() ? Response.Status.FORBIDDEN.getStatusCode() : status;
+      throw new RangerException((String) obj.get("msgDesc"), "RANGER_ERROR", status);
     }
     JSONArray jsonArray = (JSONArray) parsedResult;
     if (jsonArray.size() == 0) {
@@ -143,7 +155,7 @@
     JSONArray policyItems = (JSONArray) policyJson.get("policyItems");
     Policy policy = new Policy(name);
 
-    for(Object item: policyItems) {
+    for (Object item : policyItems) {
       PolicyCondition condition = new PolicyCondition();
       JSONObject policyItem = (JSONObject) item;
       JSONArray usersJson = (JSONArray) policyItem.get("users");
@@ -239,7 +251,7 @@
 
   private RangerCred getRangerCredFromConfig() {
     return new RangerCred(context.getProperties().get("hive.ranger.username"),
-        context.getProperties().get("hive.ranger.password"));
+      context.getProperties().get("hive.ranger.password"));
   }
 
   public String getRangerUrlFromAmbari() throws AmbariHttpException {
@@ -271,6 +283,14 @@
   }
 
   /**
+   * Check if the ranger plugin is enable for hive
+   */
+  private boolean isHiveRangerPluginEnabled() {
+    String authManagerConf = context.getCluster().getConfigurationValue(HIVESERVER2_SITE, AUTHORIZATION_MANAGER_KEY);
+    return !StringUtils.isEmpty(authManagerConf) && authManagerConf.equals(RANGER_HIVE_AUTHORIZER_FACTORY_CLASSNAME);
+  }
+
+  /**
    * POJO class to store the policy information from Ranger
    */
   public static class Policy {
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/uploads/query/InsertFromQueryInput.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/uploads/query/InsertFromQueryInput.java
index 4ff61b4..b74ba9b 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/uploads/query/InsertFromQueryInput.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/uploads/query/InsertFromQueryInput.java
@@ -28,28 +28,38 @@
   private String fromTable;
   private String toDatabase;
   private String toTable;
-  private List<ColumnInfo> header;
+  private List<ColumnInfo> partitionedColumns;
+  private List<ColumnInfo> normalColumns;
   private Boolean unhexInsert = Boolean.FALSE;
 
   public InsertFromQueryInput() {
   }
 
   public InsertFromQueryInput(String fromDatabase, String fromTable, String toDatabase, String toTable,
-                              List<ColumnInfo> header, Boolean unhexInsert) {
+                              List<ColumnInfo> partitionedColumns, List<ColumnInfo> normalColumns, Boolean unhexInsert) {
     this.fromDatabase = fromDatabase;
     this.fromTable = fromTable;
     this.toDatabase = toDatabase;
     this.toTable = toTable;
-    this.header = header;
+    this.partitionedColumns = partitionedColumns;
+    this.normalColumns = normalColumns;
     this.unhexInsert = unhexInsert;
   }
 
-  public List<ColumnInfo> getHeader() {
-    return header;
+  public List<ColumnInfo> getPartitionedColumns() {
+    return partitionedColumns;
   }
 
-  public void setHeader(List<ColumnInfo> header) {
-    this.header = header;
+  public void setPartitionedColumns(List<ColumnInfo> partitionedColumns) {
+    this.partitionedColumns = partitionedColumns;
+  }
+
+  public List<ColumnInfo> getNormalColumns() {
+    return normalColumns;
+  }
+
+  public void setNormalColumns(List<ColumnInfo> normalColumns) {
+    this.normalColumns = normalColumns;
   }
 
   public Boolean getUnhexInsert() {
diff --git a/contrib/views/hive20/src/main/resources/ui/app/adapters/application.js b/contrib/views/hive20/src/main/resources/ui/app/adapters/application.js
index 42935fc..696f861 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/adapters/application.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/adapters/application.js
@@ -57,7 +57,7 @@
       // In development mode when the UI is served using ember serve the xhr requests are proxied to ambari server
       // by setting the proxyurl parameter in ember serve and for ambari to authenticate the requests, it needs this
       // basic authorization. This is for default admin/admin username/password combination.
-      //headers['Authorization'] = 'Basic YWRtaW46YWRtaW4=';
+      headers['Authorization'] = 'Basic YWRtaW46YWRtaW4=';
       //headers['Authorization'] = 'Basic aGl2ZTpoaXZl';
       //headers['Authorization'] = 'Basic ZGlwYXlhbjpkaXBheWFu';
     }
diff --git a/contrib/views/hive20/src/main/resources/ui/app/adapters/job.js b/contrib/views/hive20/src/main/resources/ui/app/adapters/job.js
index e40c0ba..8491cf2 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/adapters/job.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/adapters/job.js
@@ -73,7 +73,7 @@
   },
 
   downloadAsCsv(jobId, path){
-    let resultUrl = this.urlForFindRecord(jobId, 'job') + "/results/csv/?fileName=" + path + ".csv";
+    let resultUrl = this.urlForFindRecord(jobId, 'job') + "/results/csv/" + path + ".csv";
     return resultUrl;
   }
 
diff --git a/contrib/views/hive20/src/main/resources/ui/app/components/create-table.js b/contrib/views/hive20/src/main/resources/ui/app/components/create-table.js
index 322aab5..1e8e673 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/components/create-table.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/components/create-table.js
@@ -66,7 +66,8 @@
     }
     if (!(this.checkColumnsExists() &&
       this.checkColumnUniqueness() &&
-      this.validateColumns())) {
+      this.validateColumns() &&
+      this.checkClusteringIfTransactional())) {
       this.selectTab("create.table.columns");
       return false;
     }
@@ -96,11 +97,11 @@
   },
 
   checkColumnsExists() {
-    this.set('hasEmptyColumnsError');
-    this.set('emptyColumnsErrorText');
+    this.set('hasTableConfigurationError');
+    this.set('tableConfigurationErrorText');
     if (this.get('columns.length') === 0) {
-      this.set('hasEmptyColumnsError', true);
-      this.set('emptyColumnsErrorText', 'No columns configured. Add some column definitions.');
+      this.set('hasTableConfigurationError', true);
+      this.set('tableConfigurationErrorText', 'No columns configured. Add some column definitions.');
       return false;
     }
     return true;
@@ -132,6 +133,16 @@
     return true;
   },
 
+  checkClusteringIfTransactional() {
+    let clusteredColumns = this.get('columns').filterBy('isClustered', true);
+    if (this.get('settings.transactional') && clusteredColumns.get('length') === 0) {
+      this.set('hasTableConfigurationError', true);
+      this.set('tableConfigurationErrorText', 'Table is marked as transactional but no clustered column defined. Add some clustered column definitions.');
+      return false;
+    }
+    return true;
+  },
+
   validateTableProperties() {
     for (let i = 0; i < this.get('properties.length'); i++) {
       let property = this.get('properties').objectAt(i);
@@ -144,9 +155,14 @@
 
   validateNumBuckets() {
     let clusteredColumns = this.get('columns').filterBy('isClustered', true);
-    if(clusteredColumns.get('length') > 0 &&
-      (Ember.isEmpty(this.get('settings.numBuckets')) ||
-      !Helper.isInteger(this.get('settings.numBuckets')))) {
+
+
+    function isNumBucketsPresentAndIsAnInteger(context) {
+      return (Ember.isEmpty(context.get('settings.numBuckets')) ||
+      !Helper.isInteger(context.get('settings.numBuckets')));
+    }
+
+    if(clusteredColumns.get('length') > 0 && isNumBucketsPresentAndIsAnInteger(this)) {
       this.get('settingErrors').pushObject({type: 'numBuckets', error: "Some columns are clustered, Number of buckets are required."});
       return false;
     }
diff --git a/contrib/views/hive20/src/main/resources/ui/app/components/jobs-browser.js b/contrib/views/hive20/src/main/resources/ui/app/components/jobs-browser.js
index be2771c..7e24843 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/components/jobs-browser.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/components/jobs-browser.js
@@ -24,7 +24,12 @@
   maxEndTime: null,
   statusFilter: null,
   titleFilter: null,
-
+  jobId: {'asc':true},
+  title: {'noSort':true},
+  status: {'noSort':true},
+  dateSubmitted: {'noSort':true},
+  duration: {'noSort':true},
+  sortProp:['id:desc'],
   sortedJobs: Ember.computed.sort('jobs', function (m1, m2) {
     if (m1.get('dateSubmitted') < m2.get('dateSubmitted')) {
       return 1;
@@ -42,7 +47,7 @@
     }
   }),
 
-  filteredJobs: Ember.computed('titleFilteredJobs', 'statusFilter', function () {
+  filteredJobs: Ember.computed('titleFilteredJobs', 'statusFilter', 'sortProp', function () {
     if (this.get('statusFilter')) {
       return  this.get('titleFilteredJobs').filter((entry) => entry.get('status').toLowerCase() === this.get('statusFilter'));
     } else {
@@ -50,6 +55,8 @@
     }
   }),
 
+  filteredJobsSorted: Ember.computed.sort('filteredJobs', 'sortProp'),
+
   statusCounts: Ember.computed('titleFilteredJobs', function () {
     return this.get('titleFilteredJobs').reduce((acc, item, index) => {
       let status = item.get('status').toLowerCase();
@@ -64,6 +71,13 @@
 
 
   actions: {
+    sort(sortProp, sortField, key) {
+      let perm = {};
+      perm[key] = true;
+      this.set(sortField, perm);
+      this.set('sortProp', [sortProp]);
+    },
+
     setDateRange(startDate, endDate) {
       this.sendAction('filterChanged', startDate, endDate);
     },
diff --git a/contrib/views/hive20/src/main/resources/ui/app/components/setting-list.js b/contrib/views/hive20/src/main/resources/ui/app/components/setting-list.js
index 72a83a3..c410a2a 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/components/setting-list.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/components/setting-list.js
@@ -20,7 +20,16 @@
 
 export default Ember.Component.extend({
   settings: [],
+  settingsConf: {'noSort':true},
+  sortProp:['id:desc'],
+  settingsSorted: Ember.computed.sort('settings', 'sortProp'),
   actions: {
+    sort(sortProp, sortField, key) {
+      let perm = {};
+      perm[key] = true;
+      this.set(sortField, perm);
+      this.set('sortProp', [sortProp]);
+    },
     addNewSettings() {
       this.sendAction('newSettings');
     }
diff --git a/contrib/views/hive20/src/main/resources/ui/app/components/udf-item.js b/contrib/views/hive20/src/main/resources/ui/app/components/udf-item.js
index 6005444..51fd094 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/components/udf-item.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/components/udf-item.js
@@ -122,6 +122,7 @@
 
     cancelEditUdf(){
       this.set('expandedEdit', false);
+      this.set('isAddingNewFileResource', false);
     },
 
     saveUDf(name, classname, udfid, udfFileResourceName, udfFileResourcePath){
diff --git a/contrib/views/hive20/src/main/resources/ui/app/controllers/savedqueries.js b/contrib/views/hive20/src/main/resources/ui/app/controllers/savedqueries.js
new file mode 100644
index 0000000..6a8c07c
--- /dev/null
+++ b/contrib/views/hive20/src/main/resources/ui/app/controllers/savedqueries.js
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Controller.extend({
+	"sortProp": ['id:desc'],
+	sortedSavedQueries : Ember.computed.sort("savedQuerylist", "sortProp")
+});
diff --git a/contrib/views/hive20/src/main/resources/ui/app/controllers/udfs.js b/contrib/views/hive20/src/main/resources/ui/app/controllers/udfs.js
index dc99fd1..98c4dc8 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/controllers/udfs.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/controllers/udfs.js
@@ -19,4 +19,6 @@
 import Ember from 'ember';
 
 export default Ember.Controller.extend({
+   "sortProp": ['id:desc'],
+   sortedUDF : Ember.computed.sort("udflist", "sortProp")
 });
diff --git a/contrib/views/hive20/src/main/resources/ui/app/routes/databases/database/tables/table.js b/contrib/views/hive20/src/main/resources/ui/app/routes/databases/database/tables/table.js
index 53055cf..42a9a33 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/routes/databases/database/tables/table.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/routes/databases/database/tables/table.js
@@ -45,8 +45,16 @@
   },
 
   actions: {
-    deleteTable(table) {
-      this.deleteTable(table);
+    deleteTable() {
+      this.deleteTable(this.currentModel);
+    },
+
+    deleteTableWarning(){
+      this.deleteTableWarning();
+    },
+
+    cancelDeleteTableWarning(){
+      this.cancelDeleteTableWarning();
     },
 
     editTable(table) {
@@ -58,7 +66,16 @@
     }
   },
 
+  deleteTableWarning(){
+    this.controller.set('showDeleteTableWarningModal', true);
+  },
+
+  cancelDeleteTableWarning(){
+    this.controller.set('showDeleteTableWarningModal', false);
+  },
+
   deleteTable(tableInfo) {
+    this.controller.set('showDeleteTableWarningModal', false);
     this.controller.set('showDeleteTableModal', true);
     this.controller.set('deleteTableMessage', 'Submitting request to delete table');
     let databaseModel = this.controllerFor('databases.database').get('model');
@@ -88,6 +105,7 @@
         this.get('logger').danger(`Failed to delete table '${tableInfo.get('table')}'`, this.extractError(error));
         this.controller.set('showDeleteTableModal', true);
       });
+
   },
 
   _removeTableLocally(database, table) {
diff --git a/contrib/views/hive20/src/main/resources/ui/app/routes/databases/database/tables/upload-table.js b/contrib/views/hive20/src/main/resources/ui/app/routes/databases/database/tables/upload-table.js
index 5fee140..f47d820 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/routes/databases/database/tables/upload-table.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/routes/databases/database/tables/upload-table.js
@@ -279,6 +279,7 @@
         column.set("scale");
         column.set("precision");
       }
+      column.set("isPartitioned", false); // remove all partitioning information from temp table.
       return column;
     });
 
@@ -421,7 +422,17 @@
     console.log("insertIntoTable");
     this.pushUploadProgressInfos(this.formatMessage('hive.messages.startingToInsertRows'));
 
-    let headers = tableData.get("tableMeta").columns.map(function(column){
+    let partitionedColumns = tableData.get("tableMeta").columns.filter(function(column){
+      return column.isPartitioned;
+    }).map(function(column){
+        var header = JSON.parse(JSON.stringify(column));
+        header.type = column.type.label;
+        return header;
+    });
+
+    let normalColumns = tableData.get("tableMeta").columns.filter(function(column){
+      return !column.isPartitioned;
+    }).map(function(column){
         var header = JSON.parse(JSON.stringify(column));
         header.type = column.type.label;
         return header;
@@ -432,7 +443,8 @@
       "fromTable": tableData.get("tempTableMeta").name,
       "toDatabase": tableData.get("database"),
       "toTable": tableData.get("tableMeta").name,
-      "header": headers,
+      "partitionedColumns": partitionedColumns,
+      "normalColumns": normalColumns,
       "unhexInsert": tableData.fileFormatInfo.containsEndlines
     });
   },
diff --git a/contrib/views/hive20/src/main/resources/ui/app/routes/savedqueries.js b/contrib/views/hive20/src/main/resources/ui/app/routes/savedqueries.js
index e896348..463768e 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/routes/savedqueries.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/routes/savedqueries.js
@@ -34,10 +34,19 @@
 
     controller.set('showDeleteSaveQueryModal', false);
     controller.set('selectedSavedQueryId', null);
+    controller.set('preview', {"noSort":true});
+    controller.set('title', {"noSort":true});
+    controller.set('dataBase', {"noSort":true});
+    controller.set('owner', {"noSort":true});
   },
 
   actions: {
-
+    sort(sortProp, sortField, key) {
+      let perm = {};
+      perm[key] = true;
+      this.get('controller').set(sortField, perm);
+      this.get('controller').set('sortProp', [sortProp]);
+    },
     deleteSavedQuery(){
       let queryId = this.get('controller').get('selectedSavedQueryId');
       let self = this;
diff --git a/contrib/views/hive20/src/main/resources/ui/app/routes/udfs.js b/contrib/views/hive20/src/main/resources/ui/app/routes/udfs.js
index 68e06a9..2e89793 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/routes/udfs.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/routes/udfs.js
@@ -44,10 +44,18 @@
       fileResourceList.push({'name':'Add New File Resource', 'action':'addNewFileResource'});
       controller.set('fileResourceList', fileResourceList);
     });
+    controller.set('name', {'noSort':true});
+    controller.set('classname', {'noSort':true});
+    controller.set('owner', {'noSort':true});
   },
 
   actions:{
-
+    sort(sortProp, sortField, key) {
+      let perm = {};
+      perm[key] = true;
+      this.get('controller').set(sortField, perm);
+      this.get('controller').set('sortProp', [sortProp]);
+    },
     refreshUdfList(){
       this.get('store').findAll('udf').then((data) => {
         let udfList = [];
diff --git a/contrib/views/hive20/src/main/resources/ui/app/services/auto-refresh.js b/contrib/views/hive20/src/main/resources/ui/app/services/auto-refresh.js
index b3276e7..4a4a4a2 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/services/auto-refresh.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/services/auto-refresh.js
@@ -84,6 +84,7 @@
 
 
     console.log("Starting tables auto refresh for " + databaseName);
+    this.set('tablesRefresherRunningStatus',{});
 
     this.get('tablesRefresherRunningStatus')[databaseName] = {};
     this.get('tablesRefresherRunningStatus')[databaseName]["started"] = true;
diff --git a/contrib/views/hive20/src/main/resources/ui/app/styles/app.scss b/contrib/views/hive20/src/main/resources/ui/app/styles/app.scss
index f4b63c5..83fac86 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/styles/app.scss
+++ b/contrib/views/hive20/src/main/resources/ui/app/styles/app.scss
@@ -42,6 +42,9 @@
 
 .top-application-header{
   background-color: #e1e1e0;
+  ul{
+    padding: 0 15px;
+  }
 }
 
 .fa-1-5{
@@ -737,6 +740,7 @@
 }
 
 .worksheet-nav {
+  padding: 0 9px;
   li{
     margin-left:5px;
     a {
@@ -948,7 +952,7 @@
   overflow: auto;
 }
 
-.explain--error {
+.explain--error, .no-results {
   display: flex;
   align-items: center;
   justify-content: center;
@@ -977,3 +981,35 @@
 .CodeMirror-scroll {
   padding-bottom: 20px;
 }
+
+.ellipsis-node {
+  white-space: nowrap;
+  overflow: hidden;
+  text-overflow: ellipsis;
+  width: 100px;
+}
+
+.progress {
+  margin-top: 15px;
+}
+
+.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) {
+  border-bottom-right-radius: 3px;
+  border-top-right-radius: 3px;
+}
+
+.middle-align {
+  vertical-align: middle;
+}
+
+.table-title {
+  padding: 0 15px;
+}
+
+.button-container {
+  margin-top: 10px;
+}
+
+.sort-icon {
+  padding-left: 5px;
+}
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/components/create-table.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/components/create-table.hbs
index 2ffbd2a..80e05de 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/templates/components/create-table.hbs
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/components/create-table.hbs
@@ -39,9 +39,9 @@
     {{#each tabs as |tab|}}
       {{#if tab.active}}
         {{#if (eq tab.link "create.table.columns")}}
-          {{#if hasEmptyColumnsError}}
+          {{#if hasTableConfigurationError}}
             <div class="alert alert-danger create-table-error">
-              {{emptyColumnsErrorText}}
+              {{tableConfigurationErrorText}}
             </div>
           {{/if}}
           {{table-columns columns=columns shouldAddBuckets=shouldAddBuckets options=options}}
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/components/edit-table.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/components/edit-table.hbs
index 7a02d3a..25048e7 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/templates/components/edit-table.hbs
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/components/edit-table.hbs
@@ -26,11 +26,6 @@
       {{#each tabs as |tab|}}
         {{#if tab.active}}
           {{#if (eq tab.link "edit.table.columns")}}
-            {{#if hasEmptyColumnsError}}
-              <div class="alert alert-danger create-table-error">
-                {{emptyColumnsErrorText}}
-              </div>
-            {{/if}}
             {{table-columns columns=columns shouldAddBuckets=shouldAddBuckets editMode=true}}
           {{/if}}
           {{#if (eq tab.link "edit.table.properties")}}
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/components/jobs-browser.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/components/jobs-browser.hbs
index 014691e..d29ca73 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/templates/components/jobs-browser.hbs
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/components/jobs-browser.hbs
@@ -54,16 +54,46 @@
     <table class="table table-striped">
       <thead>
         <tr>
-          <th width="10%">Job Id</th>
-          <th width="30%">Title</th>
-          <th width="10%">status</th>
-          <th width="25%">Start time</th>
-          <th width="20%" >Duration</th>
+          <th width="10%">Job ID
+          <span class="sort-icon">
+            {{#if jobId.noSort}}<i class="fa fa-chevron-right" {{action "sort" "id" "jobId" "desc"}}></i>{{/if}}
+            {{#if jobId.desc}}<i class="fa fa-chevron-down" {{action "sort" "id:desc" "jobId" "asc"}}></i>{{/if}}
+            {{#if jobId.asc}}<i class="fa fa-chevron-up" {{action "sort" "''" "jobId" "noSort"}}></i>{{/if}}
+          </span>
+          </th>
+          <th width="30%">Title
+          <span class="sort-icon">
+            {{#if title.noSort}}<i class="fa fa-chevron-right" {{action "sort" "title" "title" "desc"}}></i>{{/if}}
+            {{#if title.desc}}<i class="fa fa-chevron-down" {{action "sort" "title:desc" "title" "asc"}}></i>{{/if}}
+            {{#if title.asc}}<i class="fa fa-chevron-up" {{action "sort" "" "title" "noSort"}}></i>{{/if}}
+          </span>
+          </th>
+          <th width="10%">Status
+          <span class="sort-icon">
+            {{#if status.noSort}}<i class="fa fa-chevron-right" {{action "sort" "status" "status" "desc"}}></i>{{/if}}
+            {{#if status.desc}}<i class="fa fa-chevron-down" {{action "sort" "status:desc" "status" "asc"}}></i>{{/if}}
+            {{#if status.asc}}<i class="fa fa-chevron-up" {{action "sort" "" "status" "noSort"}}></i>{{/if}}
+          </span>
+          </th>
+          <th width="25%">Start time
+          <span class="sort-icon">
+            {{#if dateSubmitted.noSort}}<i class="fa fa-chevron-right" {{action "sort" "dateSubmitted" "dateSubmitted" "desc"}}></i>{{/if}}
+            {{#if dateSubmitted.desc}}<i class="fa fa-chevron-down" {{action "sort" "dateSubmitted:desc" "dateSubmitted" "asc"}}></i>{{/if}}
+            {{#if dateSubmitted.asc}}<i class="fa fa-chevron-up" {{action "sort" "" "dateSubmitted" "noSort"}}></i>{{/if}}
+          </span>
+          </th>
+          <th width="20%" >Duration(in seconds)
+          <span class="sort-icon">
+            {{#if duration.noSort}}<i class="fa fa-chevron-right" {{action "sort" "duration" "duration" "desc"}}></i>{{/if}}
+            {{#if duration.desc}}<i class="fa fa-chevron-down" {{action "sort" "duration:desc" "duration" "asc"}}></i>{{/if}}
+            {{#if duration.asc}}<i class="fa fa-chevron-up" {{action "sort" "" "duration" "noSort"}}></i>{{/if}}
+          </span>
+          </th>
           <th>Action</th>
         </tr>
       </thead>
       <tbody>
-        {{#each filteredJobs as |job| }}
+        {{#each filteredJobsSorted as |job| }}
           {{job-item job=job}}
         {{/each}}
       </tbody>
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/components/notification-message.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/components/notification-message.hbs
index c2b32a1..70354d9 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/templates/components/notification-message.hbs
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/components/notification-message.hbs
@@ -27,7 +27,7 @@
   {{#if displayBody}}
     <div class="panel-body">
       {{#if showStatus}}
-        <p><strong>Status:</strong> {{notification.status}}</p>
+        <div><strong>Status:</strong> {{notification.status}}</div>
         <hr/>
       {{/if}}
       {{#if notification.responseMessage}}
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/components/setting-list.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/components/setting-list.hbs
index c3f9c76..5c91550 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/templates/components/setting-list.hbs
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/components/setting-list.hbs
@@ -19,13 +19,17 @@
 <table class="table table-bordered table-hover">
   <thead>
   <tr>
-    <th width="40%">KEY</th>
+    <th width="40%">KEY
+      {{#if settingsConf.noSort}}<i class="fa fa-chevron-right" {{action "sort" "key" "settingsConf" "desc"}}></i>{{/if}}
+      {{#if settingsConf.desc}}<i class="fa fa-chevron-down" {{action "sort" "key:desc" "settingsConf" "asc"}}></i>{{/if}}
+      {{#if settingsConf.asc}}<i class="fa fa-chevron-up" {{action "sort" "" "settingsConf" "noSort"}}></i>{{/if}}
+    </th>
     <th width="40%">VALUE</th>
     <th width="20%">ACTIONS</th>
   </tr>
   </thead>
   <tbody>
-  {{#each settings as |setting|}}
+  {{#each settingsSorted as |setting|}}
     {{yield setting}}
   {{/each}}
   <tr class="new-settings text-center">
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/components/table-rename-form.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/components/table-rename-form.hbs
index b692a9c..827ee53 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/templates/components/table-rename-form.hbs
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/components/table-rename-form.hbs
@@ -17,7 +17,7 @@
 }}
 
 <div class="form-group">
-  <label class="col-sm-4 control-label">New Table Name</label>
+  <label class="col-sm-4 control-label">New table name</label>
   <div class="col-sm-8 {{if error 'has-error'}}">
     {{input value=newTableName class="form-control" placeholder="Table Name"}}
     <span class="help-block">{{errorMessage}}</span>
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/components/table-statistics.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/components/table-statistics.hbs
index cdcf762..1bcae93 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/templates/components/table-statistics.hbs
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/components/table-statistics.hbs
@@ -19,17 +19,15 @@
 {{#if showStats}}
 <div class="row">
   <div class="alert">
-    <p class="lead">
-      {{fa-icon "line-chart" size=2}}&nbsp;&nbsp;&nbsp;&nbsp; STATISTICS
-      <div class="pull-right">
+    <p >
+      <div class="pull-left">
         <button class="btn btn-success"
           {{action "analyseTable"}}>{{fa-icon "location-arrow"}} {{#if (not tableStatisticsEnabled)}}
           Compute {{else}} Recompute {{/if}}</button>
 
         <label>
           {{input type="checkbox" checked=analyseWithStatistics}}
-          <small>include columns</small>
-        </label>
+        </label>&nbsp;<span>INCLUDE COLUMNS</span>
       </div>
     </p>
 
@@ -44,7 +42,7 @@
       </div>
     {{/if}}
     <div class="stats-section">
-      <p><strong>TABLE STATISTICS</strong></p>
+      <p class="table-title"><strong>TABLE STATISTICS</strong></p>
       <table class="table table-bordered table-hover">
         <thead>
         <tr>
@@ -81,7 +79,7 @@
 </div>
 
 <div class="row stats-section">
-  <p><strong>COLUMNS STATISTICS</strong></p>
+  <p class="table-title"><strong>COLUMNS STATISTICS</strong></p>
   <table class="table table-bordered table-hover">
     <thead>
     <tr>
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/components/udf-edit.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/components/udf-edit.hbs
index 24cb495..9565535 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/templates/components/udf-edit.hbs
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/components/udf-edit.hbs
@@ -18,19 +18,26 @@
 
 <td colspan="4" style="padding: 0;">
   <div class="udf-container">
-  <form class="form-inline">
-    <table class="table" style="background-color: transparent">
-      <tr>
-        <td width="20%" >
-          <label>UDF Name</label><br />
-          {{input type="text" class="form-control mb-2 mr-sm-2 mb-sm-0" placeholder="UDF Name" value=name }}
-        </td>
-        <td width="20%">
-          <label >UDF Class Name</label><br />
-          {{input type="text" class="form-control mb-2 mr-sm-2 mb-sm-0" placeholder="UDF Class Name" value=classname }}
-        </td>
-        <td width="40%">
-          <label >File Resource</label><br />
+
+    <h4>Edit UDF</h4>
+
+    <form class="form-horizontal">
+      <div class="form-group">
+        <label class="col-sm-2 control-label">UDF Name</label>
+        <div class="col-sm-10">
+          {{input type="text" class="form-control" placeholder="UDF Name" value=name }}
+        </div>
+      </div>
+      <div class="form-group">
+        <label class="col-sm-2 control-label">UDF Class Name</label>
+        <div class="col-sm-10">
+          {{input type="text" class="form-control" placeholder="UDF Class Name" value=classname }}
+        </div>
+      </div>
+
+      <div class="form-group">
+        <label class="col-sm-2 control-label">File Resource</label>
+        <div class="col-sm-10">
           {{#unless isAddingNewFileResource}}
             {{#power-select
             options=fileResourceList
@@ -43,23 +50,34 @@
             {{/power-select}}
           {{/unless}}
           {{#if isAddingNewFileResource}}
-            <label class="sr-only">Resource name</label>
-            {{input type="text" class="form-control mb-2 mr-sm-2 mb-sm-0" placeholder="File Resource Name" value=udfFileResourceName }}
 
-            <label class="sr-only">Resource Path</label>
-            {{input type="text" class="form-control mb-2 mr-sm-2 mb-sm-0" placeholder="File Resource path" value=udfFileResourcePath }}
+            <div class="form-group">
+              <label class="col-sm-2 control-label">Resource name</label>
+              <div class="col-sm-10">
+                {{input type="text" class="form-control" placeholder="File Resource Name" value=udfFileResourceName }}
+              </div>
+            </div>
+
+            <div class="form-group">
+              <label class="col-sm-2 control-label">Resource Path</label>
+              <div class="col-sm-10">
+                {{input type="text" class="form-control" placeholder="File Resource path" value=udfFileResourcePath }}
+              </div>
+            </div>
+
           {{/if}}
-        </td>
+        </div>
+      </div>
 
-        <td width="20%" align="center">
-          <br />
+      <div class="form-group">
+        <div class="col-sm-offset-2 col-sm-10">
           <button class="btn btn-success" type="button" {{action 'saveUDf' name classname udfid udfFileResourceName udfFileResourcePath}}>Save UDF</button>
           <button class="btn btn-warning" type="button" {{action 'cancelEditUdf'}}>Cancel</button>
-        </td>
+        </div>
+      </div>
+    </form>
 
-      </tr>
-    </table>
-  </form>
+
   </div>
 </td>
 
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/components/udf-item.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/components/udf-item.hbs
index 929b91a..e83ee5d 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/templates/components/udf-item.hbs
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/components/udf-item.hbs
@@ -17,10 +17,10 @@
 }}
 
 <tr>
-  <td> {{udf.name}}</td>
+  <td><a class="hand" {{action "showEditUdf" udf.id udf.fileResource }}>{{udf.name}}</a></td>
   <td>{{udf.classname}} </td>
   <td>{{udf.owner}}</td>
-  <td class="center">
+  <td>
     <a class="hand" {{action "toggleExpandUdf" udf.fileResource }}>{{fa-icon "expand"}}</a>&nbsp;&nbsp;
     <a class="hand" {{action "showEditUdf" udf.id udf.fileResource }}>{{fa-icon "edit"}}</a>&nbsp;&nbsp;
     <a class="hand" {{action "showRemoveUdfModal" udf.id }}>{{fa-icon "remove"}}</a>
@@ -59,9 +59,9 @@
 {{#if showDeleteUdfModal}}
   {{confirm-dialog
   title="Confirm"
-  label="Do You want to delete the UDF?"
+  label="Do you want to delete the UDF?"
   titleIcon="minus"
-  labelIcon="save"
+  labelIcon="remove"
   rejectIcon="times"
   confirmIcon="check"
   closable=false
@@ -71,6 +71,4 @@
   }}
 {{/if}}
 
-
-
 {{yield}}
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/components/udf-new.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/components/udf-new.hbs
index ddf34d7..3cb9295 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/templates/components/udf-new.hbs
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/components/udf-new.hbs
@@ -16,46 +16,63 @@
 * limitations under the License.
 }}
 
-<form class="form-inline">
-  <table class="table">
-    <tr>
-      <td width="20%" >
-        <label>UDF Name</label><br />
-        {{input type="text" class="form-control mb-2 mr-sm-2 mb-sm-0" placeholder="UDF Name" value=udfName }}
-      </td>
-      <td width="20%">
-        <label >UDF Class Name</label><br />
-        {{input type="text" class="form-control mb-2 mr-sm-2 mb-sm-0" placeholder="UDF Class Name" value=udfClassName }}
-      </td>
-      <td width="40%">
-        <label >File Resource</label><br />
-        {{#unless isAddingNewFileResource}}
-          {{#power-select
-          options=fileResourceList
-          placeholder="File Resource"
-          searchField="name"
-          selected=selectedFileResource
-          searchPlaceholder="Type the paramter"
-          onchange=(action "handleFileResourceChange" ) as |filter| }}
-            <b>{{filter.name}}</b>
-          {{/power-select}}
-        {{/unless}}
-        {{#if isAddingNewFileResource}}
-          <label class="sr-only">Resource name</label>
-          {{input type="text" class="form-control mb-2 mr-sm-2 mb-sm-0"  placeholder="File Resource Name" value=resourceName }}
+<h4>Add new UDF</h4>
 
-          <label class="sr-only">Resource Path</label>
-          {{input type="text" class="form-control mb-2 mr-sm-2 mb-sm-0"  placeholder="File Resource path" value=resourcePath }}
-        {{/if}}
-      </td>
+<form class="form-horizontal">
+  <div class="form-group">
+    <label class="col-sm-2 control-label">UDF Name</label>
+    <div class="col-sm-10">
+      {{input type="text" class="form-control mb-2 mr-sm-2 mb-sm-0" placeholder="UDF Name" value=udfName }}
+    </div>
+  </div>
+  <div class="form-group">
+    <label class="col-sm-2 control-label">UDF Class Name</label>
+    <div class="col-sm-10">
+      {{input type="text" class="form-control mb-2 mr-sm-2 mb-sm-0" placeholder="UDF Class Name" value=udfClassName }}
+    </div>
+  </div>
 
-      <td width="20%" align="center">
-        <br />
-        <button class="btn btn-success" type="button" {{action 'saveUDf'}}>Add UDF</button>
-        <button class="btn btn-warning" type="button" {{action 'cancelSaveUDf'}}>Cancel</button>
-      </td>
-    </tr>
-  </table>
+  <div class="form-group">
+    <label class="col-sm-2 control-label">File Resource</label>
+    <div class="col-sm-10">
+      {{#unless isAddingNewFileResource}}
+        {{#power-select
+        options=fileResourceList
+        placeholder="File Resource"
+        searchField="name"
+        selected=selectedFileResource
+        searchPlaceholder="Type the paramter"
+        onchange=(action "handleFileResourceChange" ) as |filter| }}
+          <b>{{filter.name}}</b>
+        {{/power-select}}
+      {{/unless}}
+      {{#if isAddingNewFileResource}}
+
+        <div class="form-group">
+          <label class="col-sm-2 control-label">Resource name</label>
+          <div class="col-sm-10">
+            {{input type="text" class="form-control mb-2 mr-sm-2 mb-sm-0"  placeholder="File Resource Name" value=resourceName }}
+          </div>
+        </div>
+
+        <div class="form-group">
+          <label class="col-sm-2 control-label">Resource Path</label>
+          <div class="col-sm-10">
+            {{input type="text" class="form-control mb-2 mr-sm-2 mb-sm-0"  placeholder="File Resource path" value=resourcePath }}
+          </div>
+        </div>
+
+      {{/if}}
+    </div>
+  </div>
+
+  <div class="form-group">
+    <div class="col-sm-offset-2 col-sm-10">
+      <button class="btn btn-success" type="button" {{action 'saveUDf'}}>Add UDF</button>
+      <button class="btn btn-warning" type="button" {{action 'cancelSaveUDf'}}>Cancel</button>
+    </div>
+  </div>
 </form>
 
+
 {{yield}}
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/components/visual-explain.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/components/visual-explain.hbs
index 2edeab2..8445582 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/templates/components/visual-explain.hbs
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/components/visual-explain.hbs
@@ -16,9 +16,10 @@
 * limitations under the License.
 }}
 
-
-<div>
-  <button class="btn btn-default" title="Expand/Collspse" {{action "expandQueryResultPanel" }} style="position: absolute;top: 10px; right: 10px;">{{fa-icon "expand"}}</button>
+<div class="pull-right button-container">
+  <button class="btn btn-default" type="button" id="zoom_in"> + </button>
+  <button class="btn btn-default" type="button" id="zoom_out"> - </button>
+  <button class="btn btn-default" title="Expand/Collspse" {{action "expandQueryResultPanel" }} >{{fa-icon "expand"}}</button>
 </div>
 
 {{#if isQueryRunning}}
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/databases.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/databases.hbs
index ecedef5..0f365e6 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/templates/databases.hbs
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/databases.hbs
@@ -43,7 +43,7 @@
 {{#if confirmDropDatabase}}
   {{confirm-dialog
     title="Confirm"
-    label="Do You want to delete the database?"
+    label="Do you want to delete the database?"
     titleIcon="minus"
     labelIcon="database"
     rejectIcon="times"
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table.hbs
index 2fe3cfb..d461a95 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table.hbs
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table.hbs
@@ -27,7 +27,7 @@
         <ul class="dropdown-menu dropdown-menu-right">
           <li>{{#link-to "databases.database.tables.table.edit" class="text-uppercase"}}{{fa-icon "edit"}} Edit{{/link-to}}</li>
           <li>{{#link-to "databases.database.tables.table.rename" class="text-uppercase"}}{{fa-icon "edit"}} Rename{{/link-to}}</li>
-          <li><a href="#" class="text-uppercase" {{action "deleteTable" model}}>{{fa-icon "trash"}} Delete</a></li>
+          <li><a href="#" class="text-uppercase" {{action "deleteTableWarning" }}>{{fa-icon "trash"}} Delete</a></li>
         </ul>
       </div>
     </div>
@@ -46,6 +46,23 @@
       </div><!-- /.modal-content -->
     {{/modal-dialog}}
   {{/if}}
+
+  {{#if showDeleteTableWarningModal}}
+    {{confirm-dialog
+    title="Confirm"
+    label="Do You want to delete the Table?"
+    titleIcon="minus"
+    labelIcon="save"
+    rejectIcon="times"
+    confirmIcon="check"
+    closable=false
+    confirmClass="success"
+    confirm="deleteTable"
+    reject="cancelDeleteTableWarning"
+    }}
+  {{/if}}
+
+
   <div class="table-body">
     {{#tabs-pane tabs=tabs inverse= true as |tab|}}
       {{tabs-item tab=tab tabs=tabs}}
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/queries/query.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/queries/query.hbs
index 989cfc3..7b27ba7 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/templates/queries/query.hbs
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/queries/query.hbs
@@ -51,7 +51,7 @@
         <button class="btn btn-default" {{action "visualExplainQuery" }} disabled={{ worksheet.isQueryRunning}}>{{fa-icon "link"}} Visual Explain</button>
 
         {{#if worksheet.isQueryRunning}}
-          {{fa-icon "spinner fa-1-5" spin=true}}
+          {{fa-icon "spinner fa-1-5 middle-align" spin=true}}
         {{/if}}
 
       </div>
@@ -78,7 +78,7 @@
                  aria-controls={{concat 'db_body_' tableModel.dbname}}>
                 {{ tableModel.dbname }} {{#if (eq tableModel.dbname worksheet.selectedDb)}} {{fa-icon "check"}}  {{/if}}
               </a>
-              <small class="pull-right">Tables({{tableModel.tables.length}})</small>
+              <span class="pull-right">Tables({{tableModel.tables.length}})</span>
             </h4>
           </div>
           <div id={{concat 'db_body_' tableModel.dbname}} class="db-tables collapse
@@ -106,12 +106,15 @@
 </div>
 
 {{#if showWorksheetModal}}
-  {{#modal-dialog translucentOverlay=true clickOutsideToClose=true container-class="modal-dialog  modal-sm"}}
+  {{#modal-dialog translucentOverlay=true clickOutsideToClose=true container-class="modal-dialog"}}
     <div class="modal-content">
       <div class="modal-header">
-        <h4 class="modal-title">Saving worksheet</h4>
+        {{fa-icon "minus fa-lg"}} Confirm
       </div>
       <div class="modal-body">
+
+        <p class="lead">{{fa-icon "save fa-lg"}} Do you want to save the worksheet?</p>
+
         <div class="form-horizontal">
           <div class="form-group">
             <label for="title" class="col-sm-2 control-label">Title</label>
@@ -129,9 +132,8 @@
       </div>
 
       <div class="modal-footer">
-        <button type="button" class="btn btn-default" disabled={{not worksheetTitle}} {{action "saveWorksheetModal"}}>{{fa-icon "check"}} Save</button>
-        <button type="button" class="btn btn-default" {{action "closeWorksheetModal"}}>{{fa-icon "close"}}Cancel
-        </button>
+        <button type="button" class="btn btn-default" {{action "closeWorksheetModal"}}>{{fa-icon "close"}} Reject</button>
+        <button type="button" class="btn btn-success" disabled={{not worksheetTitle}} {{action "saveWorksheetModal"}}>{{fa-icon "check"}} Save</button>
       </div>
     </div>
   {{/modal-dialog}}
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/savedqueries.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/savedqueries.hbs
index c0b5e82..ca533e0 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/templates/savedqueries.hbs
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/savedqueries.hbs
@@ -18,43 +18,73 @@
 
 <div class="row jobs-table">
   <div class="col-md-12">
+
+    {{#if sortedSavedQueries.length}}
+
     <table class="table table-striped">
       <thead>
       <tr>
-        <th width="30%">Preview</th>
-        <th width="20%">Title</th>
-        <th width="20%">Database</th>
-        <th width="20%">Owner</th>
+        <th width="30%">Preview
+          <span class="sort-icon">
+            {{#if preview.noSort}}<i class="fa fa-chevron-right" {{action "sort" "shortQuery:asc" "preview" "desc"}}></i>{{/if}}
+            {{#if preview.desc}}<i class="fa fa-chevron-down" {{action "sort" "shortQuery:desc" "preview" "asc"}}></i>{{/if}}
+            {{#if preview.asc}}<i class="fa fa-chevron-up" {{action "sort" "" "preview" "noSort"}}></i>{{/if}}
+          </span>
+        </th>
+        <th width="20%">Title
+          <span class="sort-icon">
+            {{#if title.noSort}}<i class="fa fa-chevron-right" {{action "sort" "title" "title" "desc"}}></i>{{/if}}
+            {{#if title.desc}}<i class="fa fa-chevron-down" {{action "sort" "title:desc" "title" "asc"}}></i>{{/if}}
+            {{#if title.asc}}<i class="fa fa-chevron-up" {{action "sort" "" "title" "noSort"}}></i>{{/if}}
+          </span>
+        </th>
+        <th width="20%">Database
+          <span class="sort-icon">
+            {{#if dataBase.noSort}}<i class="fa fa-chevron-right" {{action "sort" "dataBase" "dataBase" "desc"}}></i>{{/if}}
+            {{#if dataBase.desc}}<i class="fa fa-chevron-down" {{action "sort" "dataBase:desc" "dataBase" "asc"}}></i>{{/if}}
+            {{#if dataBase.asc}}<i class="fa fa-chevron-up" {{action "sort" "" "dataBase" "noSort"}}></i>{{/if}}
+          </span>
+        </th>
+        <th width="20%">Owner
+          <span class="sort-icon">
+            {{#if owner.noSort}}<i class="fa fa-chevron-right" {{action "sort" "owner" "owner" "desc"}}></i>{{/if}}
+            {{#if owner.desc}}<i class="fa fa-chevron-down" {{action "sort" "owner:desc" "owner" "asc"}}></i>{{/if}}
+            {{#if owner.asc}}<i class="fa fa-chevron-up" {{action "sort" "" "owner" "noSort"}}></i>{{/if}}
+          </span>
+        </th>
         <th width="10%">Action</th>
-      </tr>
-      </thead>
-      <tbody>
-      {{#each savedQuerylist as |savedQuery| }}
-        <tr>
-          <td>{{savedQuery.shortQuery}}</td>
-          <td class="break-word">{{savedQuery.title}}</td>
-          <td>{{savedQuery.dataBase}}</td>
-          <td>{{savedQuery.owner}}</td>
-          <td>
-              <div class="dropdown">
-                <a class="dropdown-toggle" id="dropdownMenu1121" data-toggle="dropdown" aria-haspopup="true" aria-expanded="true">{{fa-icon "cog"}}</a>
-                <ul class="dropdown-menu dropdown-menu-right" aria-labelledby="dropdownMenu">
-                  <li><a href="#" {{action "openDeleteSavedQueryModal" savedQuery.id}} class="text-uppercase">{{fa-icon "remove"}} Delete</a></li>
-                  <li><a href="#" {{action "openAsWorksheet" savedQuery }} class="text-uppercase">{{fa-icon "folder-open-o"}} Open as worksheet</a></li>
-                </ul>
-              </div>
-          </td>
         </tr>
-      {{/each}}
-      </tbody>
-    </table>
+        </thead>
+        <tbody>
+        {{#each sortedSavedQueries as |savedQuery| }}
+          <tr>
+            <td><a href="#" {{action "openAsWorksheet" savedQuery }} >{{savedQuery.shortQuery}}</a></td>
+            <td class="break-word">{{savedQuery.title}}</td>
+            <td>{{savedQuery.dataBase}}</td>
+            <td>{{savedQuery.owner}}</td>
+            <td>
+              <a href="#" {{action "openDeleteSavedQueryModal" savedQuery.id}} class="text-uppercase">{{fa-icon "remove"}}</a>
+            </td>
+          </tr>
+        {{/each}}
+        </tbody>
+      </table>
+    {{/if}}
+
+    {{#unless sortedSavedQueries.length}}
+      <div class="no-results">
+        <div>There is no saved query.</div>
+      </div>
+    {{/unless}}
+
+
   </div>
 </div>
 
 {{#if showDeleteSaveQueryModal}}
   {{confirm-dialog
   title="Confirm"
-  label="Do You want to delete the saved query?"
+  label="Do you want to delete the saved query?"
   titleIcon="minus"
   labelIcon="save"
   rejectIcon="times"
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/udfs.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/udfs.hbs
index ef6a01c..c4fe34c 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/templates/udfs.hbs
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/udfs.hbs
@@ -19,27 +19,58 @@
 <div class="pull-right">
     {{#link-to 'udfs.new' class="btn btn-sm btn-success"}}{{fa-icon "plus"}} NEW UDF{{/link-to}}
 </div>
+
+
+
 <div class="row jobs-table">
   <div class="col-md-12">
-    <table class="table table-striped">
-      <thead>
-      <tr>
-        <th width="20%">UDF Name</th>
-        <th width="20%">UDF Class Name</th>
-        <th width="40%">Owner</th>
+
+    {{#if sortedUDF.length}}
+      <table class="table table-striped">
+        <thead>
+        <tr>
+        <th width="20%">UDF Name
+          <span class="sort-icon">
+            {{#if name.noSort}}<i class="fa fa-chevron-right" {{action "sort" "name:asc" "name" "desc"}}></i>{{/if}}
+            {{#if name.desc}}<i class="fa fa-chevron-down" {{action "sort" "name:desc" "name" "asc"}}></i>{{/if}}
+            {{#if name.asc}}<i class="fa fa-chevron-up" {{action "sort" "" "name" "noSort"}}></i>{{/if}}
+          </span>
+        </th>
+        <th width="20%">UDF Class Name
+          <span class="sort-icon">
+            {{#if classname.noSort}}<i class="fa fa-chevron-right" {{action "sort" "classname:asc" "classname" "desc"}}></i>{{/if}}
+            {{#if classname.desc}}<i class="fa fa-chevron-down" {{action "sort" "classname:desc" "classname" "asc"}}></i>{{/if}}
+            {{#if classname.asc}}<i class="fa fa-chevron-up" {{action "sort" "" "classname" "noSort"}}></i>{{/if}}
+          </span>
+        </th>
+        <th width="40%">Owner
+          <span class="sort-icon">
+            {{#if owner.noSort}}<i class="fa fa-chevron-right" {{action "sort" "owner:asc" "owner" "desc"}}></i>{{/if}}
+            {{#if owner.desc}}<i class="fa fa-chevron-down" {{action "sort" "owner:desc" "owner" "asc"}}></i>{{/if}}
+            {{#if owner.asc}}<i class="fa fa-chevron-up" {{action "sort" "" "owner" "noSort"}}></i>{{/if}}
+          </span>
+        </th>
         <th width="20%" class="center">Action</th>
       </tr>
       </thead>
       <tbody>
-        {{#each udflist as |udf| }}
+        {{#each sortedUDF as |udf| }}
           {{udf-item
           udf=udf
           fileResourceList=fileResourceList
           refreshUdfList='refreshUdfList'
           }}
         {{/each}}
-      </tbody>
-    </table>
+        </tbody>
+      </table>
+    {{/if}}
+
+    {{#unless sortedUDF.length}}
+      <div class="no-results">
+        <div>There is No UDF.</div>
+      </div>
+    {{/unless}}
+
   </div>
 </div>
 
diff --git a/contrib/views/hive20/src/main/resources/ui/app/utils/hive-explainer/processor.js b/contrib/views/hive20/src/main/resources/ui/app/utils/hive-explainer/processor.js
index 298366f..79d110f 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/utils/hive-explainer/processor.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/utils/hive-explainer/processor.js
@@ -263,12 +263,11 @@
 
 function findAllOutputOperators(vertices, outputOperatorsList, edges, patterns) {
     vertices.forEach(cEdge => {
-      console.log(cEdge);
       edges.push(cEdge);
-      let outputOperator = cEdge["OutputOperators:"];
-      if(outputOperator) {
-        patterns.push({outputOperator:outputOperator.substring(1, outputOperator.length-1), cEdge:[edges[edges.length-4], edges[edges.length-3], edges[edges.length-2], edges[edges.length-1]]});
-        outputOperatorsList.push({outputOperator:outputOperator.substring(1, outputOperator.length-1), cEdge:edges});
+      let outputOperator = cEdge["outputOperator:"];
+      if(outputOperator && outputOperator.length) {
+        patterns.push({outputOperator:outputOperator[0], cEdge:[edges[edges.length-4], edges[edges.length-3], edges[edges.length-2], edges[edges.length-1]]});
+        outputOperatorsList.push({outputOperator:outputOperator[0], cEdge:edges});
       }
       findAllOutputOperators(cEdge._children, outputOperatorsList, edges, patterns);
     });
@@ -304,7 +303,7 @@
       if(cSubChild && cSubChild["OperatorId:"] === patternArray[0]["OperatorId:"]){
         if(cChild._children.length>1){
           cChild._children = [cChild._children[0]];
-          cChild["OutputOperators:"] = patternArray[1]["OutputOperators:"];
+          cChild["outputOperator:"] = patternArray[1]["outputOperator:"];
           newVertex = Object.assign(patternArray[2], {
            "_operator":"Build Bloom Filter",
            "_children":[],
@@ -315,7 +314,7 @@
            ...patternArray[3].groups||[doCloneAndOmit(patternArray[3], ['_groups'])]]
           });
         }
-      } else if(cSubChild && cSubChild["OperatorId:"] === patternArray[3]["OperatorId:"]){
+      } else if(cSubChild && patternArray[patternArray.length-1] && cSubChild["OperatorId:"] === patternArray[patternArray.length-1]["OperatorId:"]){
           cChild._children = newVertex ? [newVertex]:[];
       } else {
         findPatternParent(cChild, patternArray);
diff --git a/contrib/views/hive20/src/main/resources/ui/app/utils/hive-explainer/renderer.js b/contrib/views/hive20/src/main/resources/ui/app/utils/hive-explainer/renderer.js
index 78aa39d..c09ad6b 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/utils/hive-explainer/renderer.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/utils/hive-explainer/renderer.js
@@ -21,10 +21,11 @@
 
 export default function doRender(data, selector, onRequestDetail, draggable) {
 
-  const width = '1570', height = '800';
-
+  const width = '100%', height = '800';
+  var zoomInit = null;
   d3.select(selector).select('*').remove();
   var isSingleReducer = isSingleReducerAvailable(data);
+
   const svg =
     d3.select(selector)
       .append('svg')
@@ -32,28 +33,72 @@
         .attr('height', height);
 
   const container = svg.append('g');
-  const zoom =
-    d3.behavior.zoom()
-      .scale(1/10)
-      .scaleExtent([1 / 10, 1])
-      .on('zoom', () => {
-        container.attr('transform', `translate(${d3.event.translate}) scale(${d3.event.scale})`);
-        draggable.set('zoom' , true);
-      });
+
+  d3.selectAll('button').on('click', function() {
+    if (this.id === 'zoom_in') {
+      transition(1.2); // increase on 0.2 each time
+    }
+    if (this.id === 'zoom_out') {
+      transition(0.8); // deacrease on 0.2 each time
+    }
+  });
+
+  function transition(zoomLevel) {
+
+    zoomInit = zoomInit || 1;
+    let newScale = parseFloat(zoomInit*zoomLevel).toFixed(5) ;
+
+    if(newScale < 0.2){
+      newScale = 0.2;
+    }else {
+      newScale = newScale;
+    }
+
+    zoomInit = newScale;
+
+    container.transition()
+      .duration(100)
+      .attr("transform", "translate(" + zoom.translate()[0] + "," + zoom.translate()[1] +")scale(" + newScale + ")");
+  }
+
+  const zoom = d3.behavior.zoom()
+      .scale(zoomInit)
+      .on('zoom', zoomed );
+
+  function zoomed() {
+    var presentScale = d3.transform(container[0][0].getAttribute('transform')).scale[0] || d3.event.scale ;
+    container.attr('transform', 'translate(' + d3.event.translate + ') scale(' + presentScale + ')');
+    draggable.set('zoom' , true);
+  };
+
+  var currentTransform = null;
 
   const drag = d3.behavior.drag()
-    .on("dragstart", () => {
+    .on("dragstart", (event) => {
       draggable.set('dragstart', true);
       draggable.set('zoom',false);
+
+      let evt = window.event || event;
+      currentTransform = d3.transform(evt.currentTarget.firstElementChild.getAttribute('transform'));
     })
     .on("dragend", () => {
       draggable.set('dragend', true);
+
+      var latestTransformation = d3.transform(container[0][0].getAttribute('transform'));
+      container.transition()
+        .duration(100)
+        .attr("transform", "translate(" + latestTransformation.translate[0] + "," + latestTransformation.translate[1] +")scale(" + currentTransform.scale[0] + ")");
     });
 
     svg
       .call(zoom)
       .call(drag);
 
+    svg
+      .on("mousewheel.zoom", null)
+      .on("DOMMouseScroll.zoom", null) // disables older versions of Firefox
+      .on("wheel.zoom", null) // disables newer versions of Firefox
+
   const root =
     container
       .selectAll('g.vertex')
@@ -74,13 +119,22 @@
     .enter()
       .insert('path', ':first-child')
     .attr('class', 'edge')
-    .attr('d', d => (navigator.userAgent.toLowerCase().indexOf('firefox') > -1) ? getConnectionPathFF(d, svg, container) : getConnectionPath(d, svg, container));
+    .attr('d', d => (navigator.userAgent.toLowerCase().indexOf('firefox') > -1) ? getConnectionPathFF(d, svg, container, data) : getConnectionPath(d, svg, container, data));
+  if(navigator.userAgent.toLowerCase().indexOf('firefox') === -1) {
+    root.selectAll('path.arrow')
+      .data(data.connections)
+      .enter()
+      .insert('path')
+      .attr('class', 'arrow')
+      .style("stroke-width", function(d) { return Math.sqrt(d.value); })
+      .attr('d', d => getConnectionPathArrow(d, svg, container, data));
+  }
 
   reset(zoom, svg, container);
 
 }
 function isSingleReducerAvailable(data){
-  let reducerCount = data.vertices.filter(function(item){
+  let reducerCount = data.verticesData.filter(function(item){
     return item['_vertex'].indexOf("Reducer") === 0;
   });
   if(reducerCount && reducerCount.length === 1) {
@@ -138,7 +192,8 @@
         .attr('height', d => d._operator === 'Fetch Operator' ? 150 : 55)
         .attr('width', 140)
           .append('xhtml:body')
-        .style('margin', 0)
+        .style('height', d => d._operator === 'Fetch Operator' ? '150px' : 'auto')
+        .style('margin', 0 )
           .html(d => getRenderer(d._operator, isSingleReducer)(d))
         .on('click', d => {
           const vertex = d3.select(Ember.$(d3.select(this).node()).closest('.vertex').get(0)).data()[0];
@@ -159,7 +214,7 @@
             <i class='fa ${getOperatorIcon(d._operator)}' aria-hidden='true'></i>
           </div>
           <div class='operator-body' style='margin-left: 10px;'>
-            <div>${getOperatorLabel(d, isSingleReducer)}</div>
+            <div class="ellipsis-node" title=${getOperatorLabel(d, isSingleReducer)}>${getOperatorLabel(d, isSingleReducer)}</div>
             ${(d['limit:'] && d['limit:'] > -1) ? '<div><span style="font-weight: lighter;">Limit:</span> ' + d['limit:'] + ' </div>' : ''}
           </div>
         </div>
@@ -175,7 +230,7 @@
           <i class='fa ${getOperatorIcon(d._operator)}' aria-hidden='true'></i>
         </div>
         <div class='operator-body' style='margin-left: 10px;'>
-          <div>${getOperatorLabel(d, isSingleReducer)}</div>
+          <div class="ellipsis-node" title=${getOperatorLabel(d, isSingleReducer)}>${getOperatorLabel(d, isSingleReducer)}</div>
           ${stats}
         </div>
       </div>
@@ -303,7 +358,7 @@
     .call( zoom.event );
 }
 
-function getConnectionPathFF(connector, svg, container) {
+function getConnectionPathFF(connector, svg, container, data) {
   const source = container.select(`#${connector._source._uuid}`).node();
   const target = container.select(`#${connector._target._uuid}`).node();
   const rSource = d3.select(source).data()[0];
@@ -312,14 +367,19 @@
   const rTargetVertex = d3.select(Ember.$(target).closest('.vertex').get(0)).data()[0];
 
   const offsetBox = Ember.$(container.node()).children('.vertex').get(0).getBoundingClientRect();
+  let connectionComplexity, connection = data.nodes.find((item)=>item["filterExpr:"]);
 
-
+  if(connection){
+    connectionComplexity = 0;
+  } else {
+    connectionComplexity = data.connections.length;
+  }
   const pSource = {
-    x: offsetBox.left - 200 + (rSourceVertex._X + (rSourceVertex._widthOfSelf - (rSource._indexX + 1))) * 200 + 140 / 2,
+    x: offsetBox.left - 200 - 200*connectionComplexity + (rSourceVertex._X + (rSourceVertex._widthOfSelf - (rSource._indexX + 1))) * 200 + 140 / 2,
     y: offsetBox.top + (rSourceVertex._Y + rSource._indexY) * 100 + 55 / 2,
   };
   const pTarget = {
-    x: offsetBox.left - 200 + (rTargetVertex._X + (rTargetVertex._widthOfSelf - (rTarget._indexX + 1))) * 200 + 140 / 2,
+    x: offsetBox.left - 200 - 200*connectionComplexity + (rTargetVertex._X + (rTargetVertex._widthOfSelf - (rTarget._indexX + 1))) * 200 + 140 / 2,
     y: offsetBox.top + (rTargetVertex._Y + rTarget._indexY) * 100 + 55 / 2,
   };
   const path = [
@@ -346,8 +406,119 @@
   }, '');
 }
 
+function getConnectionPathFFArrow(connector, svg, container, data) {
+  const source = container.select(`#${connector._source._uuid}`).node();
+  const target = container.select(`#${connector._target._uuid}`).node();
+  const rSource = d3.select(source).data()[0];
+  const rTarget = d3.select(target).data()[0];
+  const rSourceVertex = d3.select(Ember.$(source).closest('.vertex').get(0)).data()[0];
+  const rTargetVertex = d3.select(Ember.$(target).closest('.vertex').get(0)).data()[0];
 
-function getConnectionPath(connector, svg, container){
+  const offsetBox = Ember.$(container.node()).children('.vertex').get(0).getBoundingClientRect();
+
+  let connectionComplexity, connection = data.nodes.find((item)=>item["filterExpr:"]);
+  if(connection){
+    connectionComplexity = 0;
+  } else {
+    connectionComplexity = data.connections.length;
+  }
+  const pSource = {
+    x: offsetBox.left - 200 - 200*connectionComplexity + (rSourceVertex._X + (rSourceVertex._widthOfSelf - (rSource._indexX + 1))) * 200 + 140 / 2,
+    y: offsetBox.top + (rSourceVertex._Y + rSource._indexY) * 100 + 55 / 2,
+  };
+  const pTarget = {
+    x: offsetBox.left - 200 - 200*connectionComplexity + (rTargetVertex._X + (rTargetVertex._widthOfSelf - (rTarget._indexX + 1))) * 200 + 140 / 2,
+    y: offsetBox.top + (rTargetVertex._Y + rTarget._indexY) * 100 + 55 / 2,
+  };
+  const path = [
+    pTarget
+  ];
+  const junctionXMultiplier = (pTarget.x - pSource.x < 0) ? +1 : -1;
+  if(pSource.y !== pTarget.y) {
+    path.push({
+      x: pTarget.x + junctionXMultiplier * 90,
+      y: pTarget.y
+    }, {
+      x: pTarget.x + junctionXMultiplier * 90,
+      y: pSource.y
+    });
+  }
+  path.push(pSource);
+  const offsetY = svg.node().getBoundingClientRect().top;
+  return path.reduce((accumulator, cPoint, index) => {
+    if(path.length === 2){
+      if(index === 0) {
+        if(cPoint.x > 0){
+          return accumulator + `M ${cPoint.x + 60}, ${cPoint.y - offsetY+10} V 10, ${(cPoint.x+200)%(cPoint.x) === 0 ? 40:100}  L ${cPoint.x + 50}, ${cPoint.y - offsetY} Z\n`;
+        } else {
+          return accumulator + `M ${cPoint.x + 60}, ${cPoint.y - offsetY-10} V 10, ${Math.ceil((cPoint.x))%Math.ceil((cPoint.x+200)) === -0 ? 40:100}  L ${cPoint.x + 50}, ${cPoint.y - offsetY} Z\n`;
+        }
+      } else {
+        return accumulator;
+      }
+    } else {
+      if(index === 0) {
+        if(cPoint.x > 0) {
+          return accumulator + `M ${cPoint.x + 60}, ${cPoint.y - offsetY-10} V 10, ${40} L ${cPoint.x + 50}, ${cPoint.y - offsetY} Z\n`;
+        } else {
+          return accumulator + `M ${cPoint.x + 60}, ${cPoint.y - offsetY+10} V 10, ${40} L ${cPoint.x + 50}, ${cPoint.y - offsetY} Z\n`;
+        }
+      } else {
+        return accumulator;
+      }
+    }
+
+  }, '');
+}
+
+function getConnectionPath(connector, svg, container, data){
+  const operators = container.selectAll('.operator');
+  const source = container.select(`#${connector._source._uuid}`);
+  const target = container.select(`#${connector._target._uuid}`);
+  const rSource = source.node().getBoundingClientRect();
+  const rTarget = target.node().getBoundingClientRect();
+  const pSource = {
+    x: (rSource.left + rSource.right) / 2,
+    y: (rSource.top + rSource.bottom) / 2,
+  };
+  const pTarget = {
+    x: (rTarget.left + rTarget.right) / 2,
+    y: (rTarget.top + rTarget.bottom) / 2,
+  };
+  const path = [
+    pTarget
+  ];
+  const junctionXMultiplier = (pTarget.x - pSource.x < 0) ? +1 : -1;
+  if(pSource.y !== pTarget.y) {
+    path.push({
+      x: pTarget.x + junctionXMultiplier * 90,
+      y: pTarget.y
+    }, {
+      x: pTarget.x + junctionXMultiplier * 90,
+      y: pSource.y
+    });
+  }
+  path.push(pSource);
+  const offsetY = svg.node().getBoundingClientRect().top;
+  let isEdgeReversed = false, edgeReversalVal;
+  return path.reduce((accumulator, cPoint, index) => {
+    if(index === 0) {
+      if(cPoint.x > (cPoint.y - offsetY)) {
+        edgeReversalVal = cPoint.x;
+        isEdgeReversed = true;
+      }
+      return accumulator + `M ${cPoint.x}, ${cPoint.y - offsetY}\n`;
+    } else {
+      if(isEdgeReversed && path.length === 4 && index !== path.length-1 && edgeReversalVal > cPoint.x){
+        return accumulator + `L ${cPoint.x+150}, ${cPoint.y - offsetY}\n`;
+      }
+      return accumulator + `L ${cPoint.x}, ${cPoint.y - offsetY}\n`;
+    }
+  }, '');
+}
+
+function getConnectionPathArrow(connector, svg, container){
+
   const operators = container.selectAll('.operator');
   const source = container.select(`#${connector._source._uuid}`);
   const target = container.select(`#${connector._target._uuid}`);
@@ -377,12 +548,29 @@
   path.push(pSource);
   const offsetY = svg.node().getBoundingClientRect().top;
   return path.reduce((accumulator, cPoint, index) => {
-    if(index === 0) {
-      return accumulator + `M ${cPoint.x}, ${cPoint.y - offsetY}\n`;
+    if(path.length === 2){
+      if(index === 0) {
+        if(cPoint.x > 0){
+          return accumulator + `M ${cPoint.x + 45}, ${cPoint.y - offsetY-7} V 0, ${(((cPoint.y - offsetY-15)/100)*100)+23}  L ${cPoint.x + 40}, ${cPoint.y - offsetY} Z\n`;
+        } else {
+          return accumulator + `M ${cPoint.x + 45}, ${cPoint.y - offsetY-7} V 0, ${(((cPoint.y - offsetY-15)/100)*100)+23}  L ${cPoint.x + 40}, ${cPoint.y - offsetY} Z\n`;
+        }
+      } else {
+        return accumulator;
+      }
     } else {
-      return accumulator + `L ${cPoint.x}, ${cPoint.y - offsetY}\n`;
+      if(index === 0) {
+        if(cPoint.x > 0) {
+          return accumulator + `M ${cPoint.x + 45}, ${cPoint.y - offsetY-7} V 0, ${(((cPoint.y - offsetY-15)/100)*100)+23} L ${cPoint.x + 40}, ${cPoint.y - offsetY} Z\n`;
+        } else {
+          return accumulator + `M ${cPoint.x + 45}, ${cPoint.y - offsetY+9} V 0, 67 L ${cPoint.x + 40}, ${cPoint.y - offsetY} Z\n`;
+        }
+      } else {
+        return accumulator;
+      }
     }
   }, '');
+
 }
 
 function doClean(node) {
diff --git a/contrib/views/hive20/src/main/resources/ui/app/utils/hive-explainer/transformer.js b/contrib/views/hive20/src/main/resources/ui/app/utils/hive-explainer/transformer.js
index 57fff1e..d786d0f 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/utils/hive-explainer/transformer.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/utils/hive-explainer/transformer.js
@@ -28,7 +28,7 @@
     getFetchVertex(fetch),
   ];
 
-  let edges = [];
+  let edges = [], verticesData;
   if(tez) {
     edges = getEdges(tez, vertices);
     edges = getEdgesWithCorrectedUnion(edges);
@@ -38,7 +38,7 @@
   vertices = doEnhance(vertices);
 
   vertices = getProcessedVertices(vertices, edges);
-
+  verticesData = vertices;
   const {adjustedVertices, adjustedEdges} = getAdjustedVerticesAndEdges(vertices, edges);
   vertices = adjustedVertices;
   edges = adjustedEdges;
@@ -60,6 +60,7 @@
     tree,
     nodes,
     connections,
+    verticesData
   });
 }
 
@@ -506,10 +507,10 @@
 }
 
 function findAllOperatorsInSourceVertex(node, resultsAggregator, srcNode) {
-  let outputOperator = node["OutputOperators:"];
+  let outputOperator = node["outputOperator:"];
   if(outputOperator) {
-    resultsAggregator.push(outputOperator.substring(1, outputOperator.length-1));
-    srcNode[outputOperator.substring(1, outputOperator.length-1)] = node;
+    resultsAggregator.push(outputOperator[0]);
+    srcNode[outputOperator[0]] = node;
   }
   node._children.forEach(cChild => findAllOperatorsInSourceVertex(cChild, resultsAggregator, srcNode));
   if(!node._children) {
diff --git a/contrib/views/hive20/src/main/resources/ui/package.json b/contrib/views/hive20/src/main/resources/ui/package.json
index 0453a52..a409111 100644
--- a/contrib/views/hive20/src/main/resources/ui/package.json
+++ b/contrib/views/hive20/src/main/resources/ui/package.json
@@ -11,7 +11,7 @@
     "build": "ember build",
     "start": "ember server",
     "test": "ember test",
-    "preinstall": "chmod +x node/node_modules/npm/bin/node-gyp-bin/node-gyp",
+    "preinstall": "",
     "postinstall": "node node_modules/.bin/bower --allow-root install"
   },
   "repository": "",
diff --git a/contrib/views/hive20/src/main/resources/ui/yarn.lock b/contrib/views/hive20/src/main/resources/ui/yarn.lock
new file mode 100644
index 0000000..477a15c
--- /dev/null
+++ b/contrib/views/hive20/src/main/resources/ui/yarn.lock
@@ -0,0 +1,6032 @@
+# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
+# yarn lockfile v1
+
+
+abbrev@1:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.0.tgz#d0554c2256636e2f56e7c2e5ad183f859428d81f"
+
+abbrev@~1.0.7:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.0.9.tgz#91b4792588a7738c25f35dd6f63752a2f8776135"
+
+accepts@1.3.3, accepts@~1.3.3:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.3.tgz#c3ca7434938648c3e0d9c1e328dd68b622c284ca"
+  dependencies:
+    mime-types "~2.1.11"
+    negotiator "0.6.1"
+
+acorn@^4.0.3:
+  version "4.0.11"
+  resolved "https://registry.yarnpkg.com/acorn/-/acorn-4.0.11.tgz#edcda3bd937e7556410d42ed5860f67399c794c0"
+
+after@0.8.1:
+  version "0.8.1"
+  resolved "https://registry.yarnpkg.com/after/-/after-0.8.1.tgz#ab5d4fb883f596816d3515f8f791c0af486dd627"
+
+ajv@^4.9.1:
+  version "4.11.8"
+  resolved "https://registry.yarnpkg.com/ajv/-/ajv-4.11.8.tgz#82ffb02b29e662ae53bdc20af15947706739c536"
+  dependencies:
+    co "^4.6.0"
+    json-stable-stringify "^1.0.1"
+
+align-text@^0.1.1, align-text@^0.1.3:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/align-text/-/align-text-0.1.4.tgz#0cd90a561093f35d0a99256c22b7069433fad117"
+  dependencies:
+    kind-of "^3.0.2"
+    longest "^1.0.1"
+    repeat-string "^1.5.2"
+
+alter@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/alter/-/alter-0.2.0.tgz#c7588808617572034aae62480af26b1d4d1cb3cd"
+  dependencies:
+    stable "~0.1.3"
+
+amd-name-resolver@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/amd-name-resolver/-/amd-name-resolver-0.0.5.tgz#76962dac876ed3311b05d29c6a58c14e1ef3304b"
+  dependencies:
+    ensure-posix-path "^1.0.1"
+
+amd-name-resolver@0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/amd-name-resolver/-/amd-name-resolver-0.0.6.tgz#d3e4ba2dfcaab1d820c1be9de947c67828cfe595"
+  dependencies:
+    ensure-posix-path "^1.0.1"
+
+amdefine@>=0.0.4:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5"
+
+ansi-escapes@^1.1.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-1.4.0.tgz#d3a8a83b319aa67793662b13e761c7911422306e"
+
+ansi-regex@^0.2.0, ansi-regex@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-0.2.1.tgz#0d8e946967a3d8143f93e24e298525fc1b2235f9"
+
+ansi-regex@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df"
+
+ansi-styles@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-1.1.0.tgz#eaecbf66cd706882760b2f4691582b8f55d7a7de"
+
+ansi-styles@^2.1.0, ansi-styles@^2.2.1:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe"
+
+ansi-styles@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-1.0.0.tgz#cb102df1c56f5123eab8b67cd7b98027a0279178"
+
+ansi@^0.3.0, ansi@~0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/ansi/-/ansi-0.3.1.tgz#0c42d4fb17160d5a9af1e484bace1c66922c1b21"
+
+ansicolors@~0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/ansicolors/-/ansicolors-0.2.1.tgz#be089599097b74a5c9c4a84a0cdbcdb62bd87aef"
+
+ansicolors@~0.3.2:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/ansicolors/-/ansicolors-0.3.2.tgz#665597de86a9ffe3aa9bfbe6cae5c6ea426b4979"
+
+ansistyles@~0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/ansistyles/-/ansistyles-0.1.3.tgz#5de60415bda071bb37127854c864f41b23254539"
+
+anymatch@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-1.3.0.tgz#a3e52fa39168c825ff57b0248126ce5a8ff95507"
+  dependencies:
+    arrify "^1.0.0"
+    micromatch "^2.1.5"
+
+aproba@^1.0.3:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.1.1.tgz#95d3600f07710aa0e9298c726ad5ecf2eacbabab"
+
+archy@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/archy/-/archy-1.0.0.tgz#f9c8c13757cc1dd7bc379ac77b2c62a5c2868c40"
+
+are-we-there-yet@~1.1.2:
+  version "1.1.4"
+  resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.4.tgz#bb5dca382bb94f05e15194373d16fd3ba1ca110d"
+  dependencies:
+    delegates "^1.0.0"
+    readable-stream "^2.0.6"
+
+argparse@^1.0.7, argparse@~1.0.2:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.9.tgz#73d83bc263f86e97f8cc4f6bae1b0e90a7d22c86"
+  dependencies:
+    sprintf-js "~1.0.2"
+
+arr-diff@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-2.0.0.tgz#8f3b827f955a8bd669697e4a4256ac3ceae356cf"
+  dependencies:
+    arr-flatten "^1.0.1"
+
+arr-flatten@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.0.3.tgz#a274ed85ac08849b6bd7847c4580745dc51adfb1"
+
+array-equal@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/array-equal/-/array-equal-1.0.0.tgz#8c2a5ef2472fd9ea742b04c77a75093ba2757c93"
+
+array-find-index@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/array-find-index/-/array-find-index-1.0.2.tgz#df010aa1287e164bbda6f9723b0a96a1ec4187a1"
+
+array-flatten@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2"
+
+array-index@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/array-index/-/array-index-1.0.0.tgz#ec56a749ee103e4e08c790b9c353df16055b97f9"
+  dependencies:
+    debug "^2.2.0"
+    es6-symbol "^3.0.2"
+
+array-to-error@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/array-to-error/-/array-to-error-1.1.1.tgz#d68812926d14097a205579a667eeaf1856a44c07"
+  dependencies:
+    array-to-sentence "^1.1.0"
+
+array-to-sentence@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/array-to-sentence/-/array-to-sentence-1.1.0.tgz#c804956dafa53232495b205a9452753a258d39fc"
+
+array-unique@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.2.1.tgz#a1d97ccafcbc2625cc70fadceb36a50c58b01a53"
+
+arraybuffer.slice@0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/arraybuffer.slice/-/arraybuffer.slice-0.0.6.tgz#f33b2159f0532a3f3107a272c0ccfbd1ad2979ca"
+
+arrify@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d"
+
+asap@^2.0.0:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/asap/-/asap-2.0.5.tgz#522765b50c3510490e52d7dcfe085ef9ba96958f"
+
+asn1@~0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.3.tgz#dac8787713c9966849fc8180777ebe9c1ddf3b86"
+
+assert-plus@1.0.0, assert-plus@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525"
+
+assert-plus@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.2.0.tgz#d74e1b87e7affc0db8aadb7021f3fe48101ab234"
+
+ast-traverse@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/ast-traverse/-/ast-traverse-0.1.1.tgz#69cf2b8386f19dcda1bb1e05d68fe359d8897de6"
+
+ast-types@0.8.12:
+  version "0.8.12"
+  resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.8.12.tgz#a0d90e4351bb887716c83fd637ebf818af4adfcc"
+
+ast-types@0.9.6:
+  version "0.9.6"
+  resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.9.6.tgz#102c9e9e9005d3e7e3829bf0c4fa24ee862ee9b9"
+
+async-disk-cache@^1.2.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/async-disk-cache/-/async-disk-cache-1.3.1.tgz#3394010d9448b16205b01e0e2e704180805413d3"
+  dependencies:
+    debug "^2.1.3"
+    heimdalljs "^0.2.3"
+    istextorbinary "2.1.0"
+    mkdirp "^0.5.0"
+    rimraf "^2.5.3"
+    rsvp "^3.0.18"
+
+async-foreach@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/async-foreach/-/async-foreach-0.1.3.tgz#36121f845c0578172de419a97dbeb1d16ec34542"
+
+async-some@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/async-some/-/async-some-1.0.2.tgz#4d8a81620d5958791b5b98f802d3207776e95509"
+  dependencies:
+    dezalgo "^1.0.2"
+
+async@^1.4.0, async@^1.5.2:
+  version "1.5.2"
+  resolved "https://registry.yarnpkg.com/async/-/async-1.5.2.tgz#ec6a61ae56480c0c3cb241c95618e20892f9672a"
+
+async@^2.0.1:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/async/-/async-2.3.0.tgz#1013d1051047dd320fe24e494d5c66ecaf6147d9"
+  dependencies:
+    lodash "^4.14.0"
+
+async@~0.2.9:
+  version "0.2.10"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.2.10.tgz#b6bbe0b0674b9d719708ca38de8c237cb526c3d1"
+
+asynckit@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79"
+
+aws-sign2@~0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.6.0.tgz#14342dd38dbcc94d0e5b87d763cd63612c0e794f"
+
+aws4@^1.2.1:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.6.0.tgz#83ef5ca860b2b32e4a0deedee8c771b9db57471e"
+
+babel-code-frame@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-code-frame/-/babel-code-frame-6.22.0.tgz#027620bee567a88c32561574e7fd0801d33118e4"
+  dependencies:
+    chalk "^1.1.0"
+    esutils "^2.0.2"
+    js-tokens "^3.0.0"
+
+babel-core@^5.0.0:
+  version "5.8.38"
+  resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-5.8.38.tgz#1fcaee79d7e61b750b00b8e54f6dfc9d0af86558"
+  dependencies:
+    babel-plugin-constant-folding "^1.0.1"
+    babel-plugin-dead-code-elimination "^1.0.2"
+    babel-plugin-eval "^1.0.1"
+    babel-plugin-inline-environment-variables "^1.0.1"
+    babel-plugin-jscript "^1.0.4"
+    babel-plugin-member-expression-literals "^1.0.1"
+    babel-plugin-property-literals "^1.0.1"
+    babel-plugin-proto-to-assign "^1.0.3"
+    babel-plugin-react-constant-elements "^1.0.3"
+    babel-plugin-react-display-name "^1.0.3"
+    babel-plugin-remove-console "^1.0.1"
+    babel-plugin-remove-debugger "^1.0.1"
+    babel-plugin-runtime "^1.0.7"
+    babel-plugin-undeclared-variables-check "^1.0.2"
+    babel-plugin-undefined-to-void "^1.1.6"
+    babylon "^5.8.38"
+    bluebird "^2.9.33"
+    chalk "^1.0.0"
+    convert-source-map "^1.1.0"
+    core-js "^1.0.0"
+    debug "^2.1.1"
+    detect-indent "^3.0.0"
+    esutils "^2.0.0"
+    fs-readdir-recursive "^0.1.0"
+    globals "^6.4.0"
+    home-or-tmp "^1.0.0"
+    is-integer "^1.0.4"
+    js-tokens "1.0.1"
+    json5 "^0.4.0"
+    lodash "^3.10.0"
+    minimatch "^2.0.3"
+    output-file-sync "^1.1.0"
+    path-exists "^1.0.0"
+    path-is-absolute "^1.0.0"
+    private "^0.1.6"
+    regenerator "0.8.40"
+    regexpu "^1.3.0"
+    repeating "^1.1.2"
+    resolve "^1.1.6"
+    shebang-regex "^1.0.0"
+    slash "^1.0.0"
+    source-map "^0.5.0"
+    source-map-support "^0.2.10"
+    to-fast-properties "^1.0.0"
+    trim-right "^1.0.0"
+    try-resolve "^1.0.0"
+
+babel-core@^6.14.0, babel-core@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-6.24.1.tgz#8c428564dce1e1f41fb337ec34f4c3b022b5ad83"
+  dependencies:
+    babel-code-frame "^6.22.0"
+    babel-generator "^6.24.1"
+    babel-helpers "^6.24.1"
+    babel-messages "^6.23.0"
+    babel-register "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+    babylon "^6.11.0"
+    convert-source-map "^1.1.0"
+    debug "^2.1.1"
+    json5 "^0.5.0"
+    lodash "^4.2.0"
+    minimatch "^3.0.2"
+    path-is-absolute "^1.0.0"
+    private "^0.1.6"
+    slash "^1.0.0"
+    source-map "^0.5.0"
+
+babel-generator@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-generator/-/babel-generator-6.24.1.tgz#e715f486c58ded25649d888944d52aa07c5d9497"
+  dependencies:
+    babel-messages "^6.23.0"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+    detect-indent "^4.0.0"
+    jsesc "^1.3.0"
+    lodash "^4.2.0"
+    source-map "^0.5.0"
+    trim-right "^1.0.1"
+
+babel-helper-builder-binary-assignment-operator-visitor@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-builder-binary-assignment-operator-visitor/-/babel-helper-builder-binary-assignment-operator-visitor-6.24.1.tgz#cce4517ada356f4220bcae8a02c2b346f9a56664"
+  dependencies:
+    babel-helper-explode-assignable-expression "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-helper-call-delegate@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-call-delegate/-/babel-helper-call-delegate-6.24.1.tgz#ece6aacddc76e41c3461f88bfc575bd0daa2df8d"
+  dependencies:
+    babel-helper-hoist-variables "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-helper-define-map@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-define-map/-/babel-helper-define-map-6.24.1.tgz#7a9747f258d8947d32d515f6aa1c7bd02204a080"
+  dependencies:
+    babel-helper-function-name "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+    lodash "^4.2.0"
+
+babel-helper-explode-assignable-expression@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-explode-assignable-expression/-/babel-helper-explode-assignable-expression-6.24.1.tgz#f25b82cf7dc10433c55f70592d5746400ac22caa"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-helper-function-name@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-function-name/-/babel-helper-function-name-6.24.1.tgz#d3475b8c03ed98242a25b48351ab18399d3580a9"
+  dependencies:
+    babel-helper-get-function-arity "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-helper-get-function-arity@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-get-function-arity/-/babel-helper-get-function-arity-6.24.1.tgz#8f7782aa93407c41d3aa50908f89b031b1b6853d"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-helper-hoist-variables@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-hoist-variables/-/babel-helper-hoist-variables-6.24.1.tgz#1ecb27689c9d25513eadbc9914a73f5408be7a76"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-helper-optimise-call-expression@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-optimise-call-expression/-/babel-helper-optimise-call-expression-6.24.1.tgz#f7a13427ba9f73f8f4fa993c54a97882d1244257"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-helper-regex@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-regex/-/babel-helper-regex-6.24.1.tgz#d36e22fab1008d79d88648e32116868128456ce8"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+    lodash "^4.2.0"
+
+babel-helper-remap-async-to-generator@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-remap-async-to-generator/-/babel-helper-remap-async-to-generator-6.24.1.tgz#5ec581827ad723fecdd381f1c928390676e4551b"
+  dependencies:
+    babel-helper-function-name "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-helper-replace-supers@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-replace-supers/-/babel-helper-replace-supers-6.24.1.tgz#bf6dbfe43938d17369a213ca8a8bf74b6a90ab1a"
+  dependencies:
+    babel-helper-optimise-call-expression "^6.24.1"
+    babel-messages "^6.23.0"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-helpers@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helpers/-/babel-helpers-6.24.1.tgz#3471de9caec388e5c850e597e58a26ddf37602b2"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+
+babel-messages@^6.23.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-messages/-/babel-messages-6.23.0.tgz#f3cdf4703858035b2a2951c6ec5edf6c62f2630e"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-check-es2015-constants@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-check-es2015-constants/-/babel-plugin-check-es2015-constants-6.22.0.tgz#35157b101426fd2ffd3da3f75c7d1e91835bbf8a"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-constant-folding@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-constant-folding/-/babel-plugin-constant-folding-1.0.1.tgz#8361d364c98e449c3692bdba51eff0844290aa8e"
+
+babel-plugin-dead-code-elimination@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/babel-plugin-dead-code-elimination/-/babel-plugin-dead-code-elimination-1.0.2.tgz#5f7c451274dcd7cccdbfbb3e0b85dd28121f0f65"
+
+babel-plugin-debug-macros@^0.1.6:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/babel-plugin-debug-macros/-/babel-plugin-debug-macros-0.1.7.tgz#69f5a3dc7d72f781354f18c611a3b007bb223511"
+  dependencies:
+    semver "^5.3.0"
+
+babel-plugin-eval@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-eval/-/babel-plugin-eval-1.0.1.tgz#a2faed25ce6be69ade4bfec263f70169195950da"
+
+babel-plugin-feature-flags@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-feature-flags/-/babel-plugin-feature-flags-0.3.1.tgz#9c827cf9a4eb9a19f725ccb239e85cab02036fc1"
+
+babel-plugin-filter-imports@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-filter-imports/-/babel-plugin-filter-imports-0.3.1.tgz#e7859b56886b175dd2616425d277b219e209ea8b"
+
+babel-plugin-htmlbars-inline-precompile@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-htmlbars-inline-precompile/-/babel-plugin-htmlbars-inline-precompile-0.1.0.tgz#b784723bd1f108796b56faf9f1c05eb5ca442983"
+
+babel-plugin-inline-environment-variables@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-inline-environment-variables/-/babel-plugin-inline-environment-variables-1.0.1.tgz#1f58ce91207ad6a826a8bf645fafe68ff5fe3ffe"
+
+babel-plugin-jscript@^1.0.4:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/babel-plugin-jscript/-/babel-plugin-jscript-1.0.4.tgz#8f342c38276e87a47d5fa0a8bd3d5eb6ccad8fcc"
+
+babel-plugin-member-expression-literals@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-member-expression-literals/-/babel-plugin-member-expression-literals-1.0.1.tgz#cc5edb0faa8dc927170e74d6d1c02440021624d3"
+
+babel-plugin-property-literals@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-property-literals/-/babel-plugin-property-literals-1.0.1.tgz#0252301900192980b1c118efea48ce93aab83336"
+
+babel-plugin-proto-to-assign@^1.0.3:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/babel-plugin-proto-to-assign/-/babel-plugin-proto-to-assign-1.0.4.tgz#c49e7afd02f577bc4da05ea2df002250cf7cd123"
+  dependencies:
+    lodash "^3.9.3"
+
+babel-plugin-react-constant-elements@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/babel-plugin-react-constant-elements/-/babel-plugin-react-constant-elements-1.0.3.tgz#946736e8378429cbc349dcff62f51c143b34e35a"
+
+babel-plugin-react-display-name@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/babel-plugin-react-display-name/-/babel-plugin-react-display-name-1.0.3.tgz#754fe38926e8424a4e7b15ab6ea6139dee0514fc"
+
+babel-plugin-remove-console@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-remove-console/-/babel-plugin-remove-console-1.0.1.tgz#d8f24556c3a05005d42aaaafd27787f53ff013a7"
+
+babel-plugin-remove-debugger@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-remove-debugger/-/babel-plugin-remove-debugger-1.0.1.tgz#fd2ea3cd61a428ad1f3b9c89882ff4293e8c14c7"
+
+babel-plugin-runtime@^1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/babel-plugin-runtime/-/babel-plugin-runtime-1.0.7.tgz#bf7c7d966dd56ecd5c17fa1cb253c9acb7e54aaf"
+
+babel-plugin-syntax-async-functions@^6.8.0:
+  version "6.13.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-syntax-async-functions/-/babel-plugin-syntax-async-functions-6.13.0.tgz#cad9cad1191b5ad634bf30ae0872391e0647be95"
+
+babel-plugin-syntax-exponentiation-operator@^6.8.0:
+  version "6.13.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-syntax-exponentiation-operator/-/babel-plugin-syntax-exponentiation-operator-6.13.0.tgz#9ee7e8337290da95288201a6a57f4170317830de"
+
+babel-plugin-syntax-trailing-function-commas@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-syntax-trailing-function-commas/-/babel-plugin-syntax-trailing-function-commas-6.22.0.tgz#ba0360937f8d06e40180a43fe0d5616fff532cf3"
+
+babel-plugin-transform-async-to-generator@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-async-to-generator/-/babel-plugin-transform-async-to-generator-6.24.1.tgz#6536e378aff6cb1d5517ac0e40eb3e9fc8d08761"
+  dependencies:
+    babel-helper-remap-async-to-generator "^6.24.1"
+    babel-plugin-syntax-async-functions "^6.8.0"
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-arrow-functions@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-arrow-functions/-/babel-plugin-transform-es2015-arrow-functions-6.22.0.tgz#452692cb711d5f79dc7f85e440ce41b9f244d221"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-block-scoped-functions@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-block-scoped-functions/-/babel-plugin-transform-es2015-block-scoped-functions-6.22.0.tgz#bbc51b49f964d70cb8d8e0b94e820246ce3a6141"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-block-scoping@^6.23.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-block-scoping/-/babel-plugin-transform-es2015-block-scoping-6.24.1.tgz#76c295dc3a4741b1665adfd3167215dcff32a576"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+    lodash "^4.2.0"
+
+babel-plugin-transform-es2015-classes@^6.23.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-classes/-/babel-plugin-transform-es2015-classes-6.24.1.tgz#5a4c58a50c9c9461e564b4b2a3bfabc97a2584db"
+  dependencies:
+    babel-helper-define-map "^6.24.1"
+    babel-helper-function-name "^6.24.1"
+    babel-helper-optimise-call-expression "^6.24.1"
+    babel-helper-replace-supers "^6.24.1"
+    babel-messages "^6.23.0"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-computed-properties@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-computed-properties/-/babel-plugin-transform-es2015-computed-properties-6.24.1.tgz#6fe2a8d16895d5634f4cd999b6d3480a308159b3"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+
+babel-plugin-transform-es2015-destructuring@^6.23.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-destructuring/-/babel-plugin-transform-es2015-destructuring-6.23.0.tgz#997bb1f1ab967f682d2b0876fe358d60e765c56d"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-duplicate-keys@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-duplicate-keys/-/babel-plugin-transform-es2015-duplicate-keys-6.24.1.tgz#73eb3d310ca969e3ef9ec91c53741a6f1576423e"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-for-of@^6.23.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-for-of/-/babel-plugin-transform-es2015-for-of-6.23.0.tgz#f47c95b2b613df1d3ecc2fdb7573623c75248691"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-function-name@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-function-name/-/babel-plugin-transform-es2015-function-name-6.24.1.tgz#834c89853bc36b1af0f3a4c5dbaa94fd8eacaa8b"
+  dependencies:
+    babel-helper-function-name "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-literals@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-literals/-/babel-plugin-transform-es2015-literals-6.22.0.tgz#4f54a02d6cd66cf915280019a31d31925377ca2e"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-modules-amd@^6.22.0, babel-plugin-transform-es2015-modules-amd@^6.24.0, babel-plugin-transform-es2015-modules-amd@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-amd/-/babel-plugin-transform-es2015-modules-amd-6.24.1.tgz#3b3e54017239842d6d19c3011c4bd2f00a00d154"
+  dependencies:
+    babel-plugin-transform-es2015-modules-commonjs "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+
+babel-plugin-transform-es2015-modules-commonjs@^6.23.0, babel-plugin-transform-es2015-modules-commonjs@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-commonjs/-/babel-plugin-transform-es2015-modules-commonjs-6.24.1.tgz#d3e310b40ef664a36622200097c6d440298f2bfe"
+  dependencies:
+    babel-plugin-transform-strict-mode "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-modules-systemjs@^6.23.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-systemjs/-/babel-plugin-transform-es2015-modules-systemjs-6.24.1.tgz#ff89a142b9119a906195f5f106ecf305d9407d23"
+  dependencies:
+    babel-helper-hoist-variables "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+
+babel-plugin-transform-es2015-modules-umd@^6.23.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-umd/-/babel-plugin-transform-es2015-modules-umd-6.24.1.tgz#ac997e6285cd18ed6176adb607d602344ad38468"
+  dependencies:
+    babel-plugin-transform-es2015-modules-amd "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+
+babel-plugin-transform-es2015-object-super@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-object-super/-/babel-plugin-transform-es2015-object-super-6.24.1.tgz#24cef69ae21cb83a7f8603dad021f572eb278f8d"
+  dependencies:
+    babel-helper-replace-supers "^6.24.1"
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-parameters@^6.23.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-parameters/-/babel-plugin-transform-es2015-parameters-6.24.1.tgz#57ac351ab49caf14a97cd13b09f66fdf0a625f2b"
+  dependencies:
+    babel-helper-call-delegate "^6.24.1"
+    babel-helper-get-function-arity "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-shorthand-properties@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-shorthand-properties/-/babel-plugin-transform-es2015-shorthand-properties-6.24.1.tgz#24f875d6721c87661bbd99a4622e51f14de38aa0"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-spread@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-spread/-/babel-plugin-transform-es2015-spread-6.22.0.tgz#d6d68a99f89aedc4536c81a542e8dd9f1746f8d1"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-sticky-regex@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-sticky-regex/-/babel-plugin-transform-es2015-sticky-regex-6.24.1.tgz#00c1cdb1aca71112cdf0cf6126c2ed6b457ccdbc"
+  dependencies:
+    babel-helper-regex "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-template-literals@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-template-literals/-/babel-plugin-transform-es2015-template-literals-6.22.0.tgz#a84b3450f7e9f8f1f6839d6d687da84bb1236d8d"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-typeof-symbol@^6.23.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-typeof-symbol/-/babel-plugin-transform-es2015-typeof-symbol-6.23.0.tgz#dec09f1cddff94b52ac73d505c84df59dcceb372"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-unicode-regex@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-unicode-regex/-/babel-plugin-transform-es2015-unicode-regex-6.24.1.tgz#d38b12f42ea7323f729387f18a7c5ae1faeb35e9"
+  dependencies:
+    babel-helper-regex "^6.24.1"
+    babel-runtime "^6.22.0"
+    regexpu-core "^2.0.0"
+
+babel-plugin-transform-exponentiation-operator@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-exponentiation-operator/-/babel-plugin-transform-exponentiation-operator-6.24.1.tgz#2ab0c9c7f3098fa48907772bb813fe41e8de3a0e"
+  dependencies:
+    babel-helper-builder-binary-assignment-operator-visitor "^6.24.1"
+    babel-plugin-syntax-exponentiation-operator "^6.8.0"
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-regenerator@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-regenerator/-/babel-plugin-transform-regenerator-6.24.1.tgz#b8da305ad43c3c99b4848e4fe4037b770d23c418"
+  dependencies:
+    regenerator-transform "0.9.11"
+
+babel-plugin-transform-strict-mode@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-strict-mode/-/babel-plugin-transform-strict-mode-6.24.1.tgz#d5faf7aa578a65bbe591cf5edae04a0c67020758"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-plugin-undeclared-variables-check@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/babel-plugin-undeclared-variables-check/-/babel-plugin-undeclared-variables-check-1.0.2.tgz#5cf1aa539d813ff64e99641290af620965f65dee"
+  dependencies:
+    leven "^1.0.2"
+
+babel-plugin-undefined-to-void@^1.1.6:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/babel-plugin-undefined-to-void/-/babel-plugin-undefined-to-void-1.1.6.tgz#7f578ef8b78dfae6003385d8417a61eda06e2f81"
+
+babel-polyfill@^6.16.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-polyfill/-/babel-polyfill-6.23.0.tgz#8364ca62df8eafb830499f699177466c3b03499d"
+  dependencies:
+    babel-runtime "^6.22.0"
+    core-js "^2.4.0"
+    regenerator-runtime "^0.10.0"
+
+babel-preset-env@^1.2.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/babel-preset-env/-/babel-preset-env-1.4.0.tgz#c8e02a3bcc7792f23cded68e0355b9d4c28f0f7a"
+  dependencies:
+    babel-plugin-check-es2015-constants "^6.22.0"
+    babel-plugin-syntax-trailing-function-commas "^6.22.0"
+    babel-plugin-transform-async-to-generator "^6.22.0"
+    babel-plugin-transform-es2015-arrow-functions "^6.22.0"
+    babel-plugin-transform-es2015-block-scoped-functions "^6.22.0"
+    babel-plugin-transform-es2015-block-scoping "^6.23.0"
+    babel-plugin-transform-es2015-classes "^6.23.0"
+    babel-plugin-transform-es2015-computed-properties "^6.22.0"
+    babel-plugin-transform-es2015-destructuring "^6.23.0"
+    babel-plugin-transform-es2015-duplicate-keys "^6.22.0"
+    babel-plugin-transform-es2015-for-of "^6.23.0"
+    babel-plugin-transform-es2015-function-name "^6.22.0"
+    babel-plugin-transform-es2015-literals "^6.22.0"
+    babel-plugin-transform-es2015-modules-amd "^6.22.0"
+    babel-plugin-transform-es2015-modules-commonjs "^6.23.0"
+    babel-plugin-transform-es2015-modules-systemjs "^6.23.0"
+    babel-plugin-transform-es2015-modules-umd "^6.23.0"
+    babel-plugin-transform-es2015-object-super "^6.22.0"
+    babel-plugin-transform-es2015-parameters "^6.23.0"
+    babel-plugin-transform-es2015-shorthand-properties "^6.22.0"
+    babel-plugin-transform-es2015-spread "^6.22.0"
+    babel-plugin-transform-es2015-sticky-regex "^6.22.0"
+    babel-plugin-transform-es2015-template-literals "^6.22.0"
+    babel-plugin-transform-es2015-typeof-symbol "^6.23.0"
+    babel-plugin-transform-es2015-unicode-regex "^6.22.0"
+    babel-plugin-transform-exponentiation-operator "^6.22.0"
+    babel-plugin-transform-regenerator "^6.22.0"
+    browserslist "^1.4.0"
+    invariant "^2.2.2"
+
+babel-register@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-register/-/babel-register-6.24.1.tgz#7e10e13a2f71065bdfad5a1787ba45bca6ded75f"
+  dependencies:
+    babel-core "^6.24.1"
+    babel-runtime "^6.22.0"
+    core-js "^2.4.0"
+    home-or-tmp "^2.0.0"
+    lodash "^4.2.0"
+    mkdirp "^0.5.1"
+    source-map-support "^0.4.2"
+
+babel-runtime@^6.18.0, babel-runtime@^6.22.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-runtime/-/babel-runtime-6.23.0.tgz#0a9489f144de70efb3ce4300accdb329e2fc543b"
+  dependencies:
+    core-js "^2.4.0"
+    regenerator-runtime "^0.10.0"
+
+babel-template@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-template/-/babel-template-6.24.1.tgz#04ae514f1f93b3a2537f2a0f60a5a45fb8308333"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+    babylon "^6.11.0"
+    lodash "^4.2.0"
+
+babel-traverse@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-traverse/-/babel-traverse-6.24.1.tgz#ab36673fd356f9a0948659e7b338d5feadb31695"
+  dependencies:
+    babel-code-frame "^6.22.0"
+    babel-messages "^6.23.0"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+    babylon "^6.15.0"
+    debug "^2.2.0"
+    globals "^9.0.0"
+    invariant "^2.2.0"
+    lodash "^4.2.0"
+
+babel-types@^6.19.0, babel-types@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-types/-/babel-types-6.24.1.tgz#a136879dc15b3606bda0d90c1fc74304c2ff0975"
+  dependencies:
+    babel-runtime "^6.22.0"
+    esutils "^2.0.2"
+    lodash "^4.2.0"
+    to-fast-properties "^1.0.1"
+
+babel6-plugin-strip-class-callcheck@^6.0.0:
+  version "6.0.0"
+  resolved "https://registry.yarnpkg.com/babel6-plugin-strip-class-callcheck/-/babel6-plugin-strip-class-callcheck-6.0.0.tgz#de841c1abebbd39f78de0affb2c9a52ee228fddf"
+
+babel6-plugin-strip-heimdall@^6.0.1:
+  version "6.0.1"
+  resolved "https://registry.yarnpkg.com/babel6-plugin-strip-heimdall/-/babel6-plugin-strip-heimdall-6.0.1.tgz#35f80eddec1f7fffdc009811dfbd46d9965072b6"
+
+babylon@^5.8.38:
+  version "5.8.38"
+  resolved "https://registry.yarnpkg.com/babylon/-/babylon-5.8.38.tgz#ec9b120b11bf6ccd4173a18bf217e60b79859ffd"
+
+babylon@^6.11.0, babylon@^6.15.0:
+  version "6.17.0"
+  resolved "https://registry.yarnpkg.com/babylon/-/babylon-6.17.0.tgz#37da948878488b9c4e3c4038893fa3314b3fc932"
+
+backbone@^1.1.2:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/backbone/-/backbone-1.3.3.tgz#4cc80ea7cb1631ac474889ce40f2f8bc683b2999"
+  dependencies:
+    underscore ">=1.8.3"
+
+backo2@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/backo2/-/backo2-1.0.2.tgz#31ab1ac8b129363463e35b3ebb69f4dfcfba7947"
+
+balanced-match@^0.4.1:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-0.4.2.tgz#cb3f3e3c732dc0f01ee70b403f302e61d7709838"
+
+base64-arraybuffer@0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/base64-arraybuffer/-/base64-arraybuffer-0.1.5.tgz#73926771923b5a19747ad666aa5cd4bf9c6e9ce8"
+
+base64id@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/base64id/-/base64id-0.1.0.tgz#02ce0fdeee0cef4f40080e1e73e834f0b1bfce3f"
+
+basic-auth@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/basic-auth/-/basic-auth-1.1.0.tgz#45221ee429f7ee1e5035be3f51533f1cdfd29884"
+
+bcrypt-pbkdf@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.1.tgz#63bc5dcb61331b92bc05fd528953c33462a06f8d"
+  dependencies:
+    tweetnacl "^0.14.3"
+
+better-assert@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/better-assert/-/better-assert-1.0.2.tgz#40866b9e1b9e0b55b481894311e68faffaebc522"
+  dependencies:
+    callsite "1.0.0"
+
+"binaryextensions@1 || 2":
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/binaryextensions/-/binaryextensions-2.0.0.tgz#e597d1a7a6a3558a2d1c7241a16c99965e6aa40f"
+
+bl@~1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/bl/-/bl-1.1.2.tgz#fdca871a99713aa00d19e3bbba41c44787a65398"
+  dependencies:
+    readable-stream "~2.0.5"
+
+blank-object@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/blank-object/-/blank-object-1.0.2.tgz#f990793fbe9a8c8dd013fb3219420bec81d5f4b9"
+
+blob@0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/blob/-/blob-0.0.4.tgz#bcf13052ca54463f30f9fc7e95b9a47630a94921"
+
+block-stream@*, block-stream@0.0.9:
+  version "0.0.9"
+  resolved "https://registry.yarnpkg.com/block-stream/-/block-stream-0.0.9.tgz#13ebfe778a03205cfe03751481ebb4b3300c126a"
+  dependencies:
+    inherits "~2.0.0"
+
+bluebird@^2.9.33:
+  version "2.11.0"
+  resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-2.11.0.tgz#534b9033c022c9579c56ba3b3e5a5caafbb650e1"
+
+bluebird@^3.1.1, bluebird@^3.4.6:
+  version "3.5.0"
+  resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.5.0.tgz#791420d7f551eea2897453a8a77653f96606d67c"
+
+body-parser@~1.14.0:
+  version "1.14.2"
+  resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.14.2.tgz#1015cb1fe2c443858259581db53332f8d0cf50f9"
+  dependencies:
+    bytes "2.2.0"
+    content-type "~1.0.1"
+    debug "~2.2.0"
+    depd "~1.1.0"
+    http-errors "~1.3.1"
+    iconv-lite "0.4.13"
+    on-finished "~2.3.0"
+    qs "5.2.0"
+    raw-body "~2.1.5"
+    type-is "~1.6.10"
+
+boom@2.x.x:
+  version "2.10.1"
+  resolved "https://registry.yarnpkg.com/boom/-/boom-2.10.1.tgz#39c8918ceff5799f83f9492a848f625add0c766f"
+  dependencies:
+    hoek "2.x.x"
+
+bootstrap-daterangepicker@2.1.24:
+  version "2.1.24"
+  resolved "https://registry.yarnpkg.com/bootstrap-daterangepicker/-/bootstrap-daterangepicker-2.1.24.tgz#a8499c726f229cea445c57f6d476e549723709e5"
+  dependencies:
+    jquery ">=1.10"
+    moment "^2.9.0"
+
+bootstrap-sass@^3.3.6:
+  version "3.3.7"
+  resolved "https://registry.yarnpkg.com/bootstrap-sass/-/bootstrap-sass-3.3.7.tgz#6596c7ab40f6637393323ab0bc80d064fc630498"
+
+bower-config@^1.3.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/bower-config/-/bower-config-1.4.0.tgz#16c38c1135f8071c19f25938d61b0d8cbf18d3f1"
+  dependencies:
+    graceful-fs "^4.1.3"
+    mout "^1.0.0"
+    optimist "^0.6.1"
+    osenv "^0.1.3"
+    untildify "^2.1.0"
+
+bower-endpoint-parser@0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/bower-endpoint-parser/-/bower-endpoint-parser-0.2.2.tgz#00b565adbfab6f2d35addde977e97962acbcb3f6"
+
+bower@^1.3.12, bower@^1.7.9:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/bower/-/bower-1.8.0.tgz#55dbebef0ad9155382d9e9d3e497c1372345b44a"
+
+brace-expansion@^1.0.0:
+  version "1.1.7"
+  resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.7.tgz#3effc3c50e000531fb720eaff80f0ae8ef23cf59"
+  dependencies:
+    balanced-match "^0.4.1"
+    concat-map "0.0.1"
+
+braces@^1.8.2:
+  version "1.8.5"
+  resolved "https://registry.yarnpkg.com/braces/-/braces-1.8.5.tgz#ba77962e12dff969d6b76711e914b737857bf6a7"
+  dependencies:
+    expand-range "^1.8.1"
+    preserve "^0.2.0"
+    repeat-element "^1.1.2"
+
+breakable@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/breakable/-/breakable-1.0.0.tgz#784a797915a38ead27bad456b5572cb4bbaa78c1"
+
+broccoli-asset-rev@^2.4.2:
+  version "2.5.0"
+  resolved "https://registry.yarnpkg.com/broccoli-asset-rev/-/broccoli-asset-rev-2.5.0.tgz#f5f66eac962bf9f086286921f0eaeaab6d00d819"
+  dependencies:
+    broccoli-asset-rewrite "^1.1.0"
+    broccoli-filter "^1.2.2"
+    json-stable-stringify "^1.0.0"
+    matcher-collection "^1.0.1"
+    rsvp "^3.0.6"
+
+broccoli-asset-rewrite@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-asset-rewrite/-/broccoli-asset-rewrite-1.1.0.tgz#77a5da56157aa318c59113245e8bafb4617f8830"
+  dependencies:
+    broccoli-filter "^1.2.3"
+
+broccoli-babel-transpiler@^5.4.5, broccoli-babel-transpiler@^5.5.0, broccoli-babel-transpiler@^5.6.0, broccoli-babel-transpiler@^5.6.2:
+  version "5.6.2"
+  resolved "https://registry.yarnpkg.com/broccoli-babel-transpiler/-/broccoli-babel-transpiler-5.6.2.tgz#958c72e43575b2f0a862a5096dba1ce1ebc7d74d"
+  dependencies:
+    babel-core "^5.0.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-persistent-filter "^1.0.1"
+    clone "^0.2.0"
+    hash-for-dep "^1.0.2"
+    json-stable-stringify "^1.0.0"
+
+broccoli-babel-transpiler@^6.0.0:
+  version "6.0.0"
+  resolved "https://registry.yarnpkg.com/broccoli-babel-transpiler/-/broccoli-babel-transpiler-6.0.0.tgz#a52c5404bf36236849da503b011fd41fe64a00a2"
+  dependencies:
+    babel-core "^6.14.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-persistent-filter "^1.0.1"
+    clone "^2.0.0"
+    hash-for-dep "^1.0.2"
+    json-stable-stringify "^1.0.0"
+
+broccoli-caching-writer@^2.0.4, broccoli-caching-writer@^2.2.0, broccoli-caching-writer@^2.3.1:
+  version "2.3.1"
+  resolved "https://registry.yarnpkg.com/broccoli-caching-writer/-/broccoli-caching-writer-2.3.1.tgz#b93cf58f9264f003075868db05774f4e7f25bd07"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.2.5"
+    broccoli-plugin "1.1.0"
+    debug "^2.1.1"
+    rimraf "^2.2.8"
+    rsvp "^3.0.17"
+    walk-sync "^0.2.5"
+
+broccoli-caching-writer@^3.0.3:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/broccoli-caching-writer/-/broccoli-caching-writer-3.0.3.tgz#0bd2c96a9738d6a6ab590f07ba35c5157d7db476"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-plugin "^1.2.1"
+    debug "^2.1.1"
+    rimraf "^2.2.8"
+    rsvp "^3.0.17"
+    walk-sync "^0.3.0"
+
+broccoli-clean-css@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-clean-css/-/broccoli-clean-css-1.1.0.tgz#9db143d9af7e0ae79c26e3ac5a9bb2d720ea19fa"
+  dependencies:
+    broccoli-persistent-filter "^1.1.6"
+    clean-css-promise "^0.1.0"
+    inline-source-map-comment "^1.0.5"
+    json-stable-stringify "^1.0.0"
+
+broccoli-concat@^2.0.4, broccoli-concat@^2.2.0:
+  version "2.3.8"
+  resolved "https://registry.yarnpkg.com/broccoli-concat/-/broccoli-concat-2.3.8.tgz#590cdcc021bb905b6c121d87c2d1d57df44a2a48"
+  dependencies:
+    broccoli-caching-writer "^2.3.1"
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-stew "^1.3.3"
+    fast-sourcemap-concat "^1.0.1"
+    fs-extra "^0.30.0"
+    lodash.merge "^4.3.0"
+    lodash.omit "^4.1.0"
+    lodash.uniq "^4.2.0"
+
+broccoli-config-loader@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/broccoli-config-loader/-/broccoli-config-loader-1.0.0.tgz#c3cf5ecfaffc04338c6f1d5d38dc36baeaa131ba"
+  dependencies:
+    broccoli-caching-writer "^2.0.4"
+
+broccoli-config-replace@^1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/broccoli-config-replace/-/broccoli-config-replace-1.1.2.tgz#6ea879d92a5bad634d11329b51fc5f4aafda9c00"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-plugin "^1.2.0"
+    debug "^2.2.0"
+    fs-extra "^0.24.0"
+
+broccoli-file-creator@^1.0.0, broccoli-file-creator@^1.0.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/broccoli-file-creator/-/broccoli-file-creator-1.1.1.tgz#1b35b67d215abdfadd8d49eeb69493c39e6c3450"
+  dependencies:
+    broccoli-kitchen-sink-helpers "~0.2.0"
+    broccoli-plugin "^1.1.0"
+    broccoli-writer "~0.1.1"
+    mkdirp "^0.5.1"
+    rsvp "~3.0.6"
+    symlink-or-copy "^1.0.1"
+
+broccoli-filter@^1.2.2, broccoli-filter@^1.2.3:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/broccoli-filter/-/broccoli-filter-1.2.4.tgz#409afb94b9a3a6da9fac8134e91e205f40cc7330"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-plugin "^1.0.0"
+    copy-dereference "^1.0.0"
+    debug "^2.2.0"
+    mkdirp "^0.5.1"
+    promise-map-series "^0.2.1"
+    rsvp "^3.0.18"
+    symlink-or-copy "^1.0.1"
+    walk-sync "^0.3.1"
+
+broccoli-funnel-reducer@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/broccoli-funnel-reducer/-/broccoli-funnel-reducer-1.0.0.tgz#11365b2a785aec9b17972a36df87eef24c5cc0ea"
+
+broccoli-funnel@^1.0.0, broccoli-funnel@^1.0.1, broccoli-funnel@^1.0.6:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/broccoli-funnel/-/broccoli-funnel-1.2.0.tgz#cddc3afc5ff1685a8023488fff74ce6fb5a51296"
+  dependencies:
+    array-equal "^1.0.0"
+    blank-object "^1.0.1"
+    broccoli-plugin "^1.3.0"
+    debug "^2.2.0"
+    exists-sync "0.0.4"
+    fast-ordered-set "^1.0.0"
+    fs-tree-diff "^0.5.3"
+    heimdalljs "^0.2.0"
+    minimatch "^3.0.0"
+    mkdirp "^0.5.0"
+    path-posix "^1.0.0"
+    rimraf "^2.4.3"
+    symlink-or-copy "^1.0.0"
+    walk-sync "^0.3.1"
+
+broccoli-jshint@^1.0.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/broccoli-jshint/-/broccoli-jshint-1.2.0.tgz#8cd565d11a04bfd32cb8f85a0f7ede1e5be7a6a2"
+  dependencies:
+    broccoli-persistent-filter "^1.2.0"
+    chalk "~0.4.0"
+    findup-sync "^0.3.0"
+    jshint "^2.7.0"
+    json-stable-stringify "^1.0.0"
+    mkdirp "~0.4.0"
+
+broccoli-kitchen-sink-helpers@^0.2.5, broccoli-kitchen-sink-helpers@~0.2.0:
+  version "0.2.9"
+  resolved "https://registry.yarnpkg.com/broccoli-kitchen-sink-helpers/-/broccoli-kitchen-sink-helpers-0.2.9.tgz#a5e0986ed8d76fb5984b68c3f0450d3a96e36ecc"
+  dependencies:
+    glob "^5.0.10"
+    mkdirp "^0.5.1"
+
+broccoli-kitchen-sink-helpers@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/broccoli-kitchen-sink-helpers/-/broccoli-kitchen-sink-helpers-0.3.1.tgz#77c7c18194b9664163ec4fcee2793444926e0c06"
+  dependencies:
+    glob "^5.0.10"
+    mkdirp "^0.5.1"
+
+broccoli-merge-trees@^1.0.0, broccoli-merge-trees@^1.1.0, broccoli-merge-trees@^1.1.1, broccoli-merge-trees@^1.1.5:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/broccoli-merge-trees/-/broccoli-merge-trees-1.2.4.tgz#a001519bb5067f06589d91afa2942445a2d0fdb5"
+  dependencies:
+    broccoli-plugin "^1.3.0"
+    can-symlink "^1.0.0"
+    fast-ordered-set "^1.0.2"
+    fs-tree-diff "^0.5.4"
+    heimdalljs "^0.2.1"
+    heimdalljs-logger "^0.1.7"
+    rimraf "^2.4.3"
+    symlink-or-copy "^1.0.0"
+
+broccoli-persistent-filter@^1.0.1, broccoli-persistent-filter@^1.0.3, broccoli-persistent-filter@^1.1.6, broccoli-persistent-filter@^1.2.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/broccoli-persistent-filter/-/broccoli-persistent-filter-1.3.1.tgz#d02556a135c77dfb859bba7844bc3539be7168e1"
+  dependencies:
+    async-disk-cache "^1.2.1"
+    broccoli-plugin "^1.0.0"
+    fs-tree-diff "^0.5.2"
+    hash-for-dep "^1.0.2"
+    heimdalljs "^0.2.1"
+    heimdalljs-logger "^0.1.7"
+    md5-hex "^1.0.2"
+    mkdirp "^0.5.1"
+    promise-map-series "^0.2.1"
+    rimraf "^2.6.1"
+    rsvp "^3.0.18"
+    symlink-or-copy "^1.0.1"
+    walk-sync "^0.3.1"
+
+broccoli-plugin@1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-plugin/-/broccoli-plugin-1.1.0.tgz#73e2cfa05f8ea1e3fc1420c40c3d9e7dc724bf02"
+  dependencies:
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.3"
+    rimraf "^2.3.4"
+    symlink-or-copy "^1.0.1"
+
+broccoli-plugin@^1.0.0, broccoli-plugin@^1.1.0, broccoli-plugin@^1.2.0, broccoli-plugin@^1.2.1, broccoli-plugin@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/broccoli-plugin/-/broccoli-plugin-1.3.0.tgz#bee704a8e42da08cb58e513aaa436efb7f0ef1ee"
+  dependencies:
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.3"
+    rimraf "^2.3.4"
+    symlink-or-copy "^1.1.8"
+
+broccoli-sane-watcher@^1.1.1:
+  version "1.1.5"
+  resolved "https://registry.yarnpkg.com/broccoli-sane-watcher/-/broccoli-sane-watcher-1.1.5.tgz#f2b0af9cf0afb74c7a49cd88eb11c6869ee8c0c0"
+  dependencies:
+    broccoli-slow-trees "^1.1.0"
+    debug "^2.1.0"
+    rsvp "^3.0.18"
+    sane "^1.1.1"
+
+broccoli-sass-source-maps@^1.8.0:
+  version "1.8.1"
+  resolved "https://registry.yarnpkg.com/broccoli-sass-source-maps/-/broccoli-sass-source-maps-1.8.1.tgz#115e32be25dc5f1686af1c8d1fa4c4c62749f0b6"
+  dependencies:
+    broccoli-caching-writer "^3.0.3"
+    include-path-searcher "^0.1.0"
+    mkdirp "^0.3.5"
+    node-sass "^3.8.0"
+    object-assign "^2.0.0"
+    rsvp "^3.0.6"
+
+broccoli-slow-trees@^1.0.0, broccoli-slow-trees@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-slow-trees/-/broccoli-slow-trees-1.1.0.tgz#426c5724e008107e4573f73e8a9ca702916b78f7"
+
+broccoli-source@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-source/-/broccoli-source-1.1.0.tgz#54f0e82c8b73f46580cbbc4f578f0b32fca8f809"
+
+broccoli-sri-hash@^2.1.0:
+  version "2.1.2"
+  resolved "https://registry.yarnpkg.com/broccoli-sri-hash/-/broccoli-sri-hash-2.1.2.tgz#bc69905ed7a381ad325cc0d02ded071328ebf3f3"
+  dependencies:
+    broccoli-caching-writer "^2.2.0"
+    mkdirp "^0.5.1"
+    rsvp "^3.1.0"
+    sri-toolbox "^0.2.0"
+    symlink-or-copy "^1.0.1"
+
+broccoli-stew@^1.0.0, broccoli-stew@^1.0.4, broccoli-stew@^1.3.3:
+  version "1.4.2"
+  resolved "https://registry.yarnpkg.com/broccoli-stew/-/broccoli-stew-1.4.2.tgz#9ec4062fd7162c6026561a2fbf64558363aff8d6"
+  dependencies:
+    broccoli-funnel "^1.0.1"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-persistent-filter "^1.1.6"
+    broccoli-plugin "^1.3.0"
+    chalk "^1.1.3"
+    debug "^2.4.0"
+    ensure-posix-path "^1.0.1"
+    fs-extra "^2.0.0"
+    minimatch "^3.0.2"
+    resolve "^1.1.6"
+    rsvp "^3.0.16"
+    sanitize-filename "^1.5.3"
+    symlink-or-copy "^1.1.8"
+    walk-sync "^0.3.0"
+
+broccoli-uglify-sourcemap@^1.0.0:
+  version "1.5.2"
+  resolved "https://registry.yarnpkg.com/broccoli-uglify-sourcemap/-/broccoli-uglify-sourcemap-1.5.2.tgz#04f84ab0db539031fa868ccfa563c9932d50cedb"
+  dependencies:
+    broccoli-plugin "^1.2.1"
+    debug "^2.2.0"
+    lodash.merge "^4.5.1"
+    matcher-collection "^1.0.0"
+    mkdirp "^0.5.0"
+    source-map-url "^0.3.0"
+    symlink-or-copy "^1.0.1"
+    uglify-js "^2.7.0"
+    walk-sync "^0.1.3"
+
+broccoli-viz@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/broccoli-viz/-/broccoli-viz-2.0.1.tgz#3f3ed2fb83e368aa5306fae460801dea552e40db"
+
+broccoli-writer@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/broccoli-writer/-/broccoli-writer-0.1.1.tgz#d4d71aa8f2afbc67a3866b91a2da79084b96ab2d"
+  dependencies:
+    quick-temp "^0.1.0"
+    rsvp "^3.0.6"
+
+browserslist@^1.4.0:
+  version "1.7.7"
+  resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-1.7.7.tgz#0bd76704258be829b2398bb50e4b62d1a166b0b9"
+  dependencies:
+    caniuse-db "^1.0.30000639"
+    electron-to-chromium "^1.2.7"
+
+bser@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/bser/-/bser-1.0.2.tgz#381116970b2a6deea5646dd15dd7278444b56169"
+  dependencies:
+    node-int64 "^0.4.0"
+
+buffer-shims@^1.0.0, buffer-shims@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/buffer-shims/-/buffer-shims-1.0.0.tgz#9978ce317388c649ad8793028c3477ef044a8b51"
+
+builtin-modules@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/builtin-modules/-/builtin-modules-1.1.1.tgz#270f076c5a72c02f5b65a47df94c5fe3a278892f"
+
+builtins@0.0.7:
+  version "0.0.7"
+  resolved "https://registry.yarnpkg.com/builtins/-/builtins-0.0.7.tgz#355219cd6cf18dbe7c01cc7fd2dce765cfdc549a"
+
+builtins@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/builtins/-/builtins-1.0.3.tgz#cb94faeb61c8696451db36534e1422f94f0aee88"
+
+bytes@2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.2.0.tgz#fd35464a403f6f9117c2de3609ecff9cae000588"
+
+bytes@2.3.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.3.0.tgz#d5b680a165b6201739acb611542aabc2d8ceb070"
+
+bytes@2.4.0:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.4.0.tgz#7d97196f9d5baf7f6935e25985549edd2a6c2339"
+
+callsite@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/callsite/-/callsite-1.0.0.tgz#280398e5d664bd74038b6f0905153e6e8af1bc20"
+
+camelcase-keys@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/camelcase-keys/-/camelcase-keys-2.1.0.tgz#308beeaffdf28119051efa1d932213c91b8f92e7"
+  dependencies:
+    camelcase "^2.0.0"
+    map-obj "^1.0.0"
+
+camelcase@^1.0.2, camelcase@^1.2.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-1.2.1.tgz#9bb5304d2e0b56698b2c758b08a3eaa9daa58a39"
+
+camelcase@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-2.1.1.tgz#7c1d16d679a1bbe59ca02cacecfb011e201f5a1f"
+
+camelcase@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-3.0.0.tgz#32fc4b9fcdaf845fcdf7e73bb97cac2261f0ab0a"
+
+can-symlink@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/can-symlink/-/can-symlink-1.0.0.tgz#97b607d8a84bb6c6e228b902d864ecb594b9d219"
+  dependencies:
+    tmp "0.0.28"
+
+caniuse-db@^1.0.30000639:
+  version "1.0.30000664"
+  resolved "https://registry.yarnpkg.com/caniuse-db/-/caniuse-db-1.0.30000664.tgz#e16316e5fdabb9c7209b2bf0744ffc8a14201f22"
+
+cardinal@^0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/cardinal/-/cardinal-0.5.0.tgz#00d5f661dbd4aabfdf7d41ce48a5a59bca35a291"
+  dependencies:
+    ansicolors "~0.2.1"
+    redeyed "~0.5.0"
+
+caseless@~0.11.0:
+  version "0.11.0"
+  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.11.0.tgz#715b96ea9841593cc33067923f5ec60ebda4f7d7"
+
+caseless@~0.12.0:
+  version "0.12.0"
+  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc"
+
+center-align@^0.1.1:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/center-align/-/center-align-0.1.3.tgz#aa0d32629b6ee972200411cbd4461c907bc2b7ad"
+  dependencies:
+    align-text "^0.1.3"
+    lazy-cache "^1.0.3"
+
+chalk@^0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-0.5.1.tgz#663b3a648b68b55d04690d49167aa837858f2174"
+  dependencies:
+    ansi-styles "^1.1.0"
+    escape-string-regexp "^1.0.0"
+    has-ansi "^0.1.0"
+    strip-ansi "^0.3.0"
+    supports-color "^0.2.0"
+
+chalk@^1.0.0, chalk@^1.1.0, chalk@^1.1.1, chalk@^1.1.3:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98"
+  dependencies:
+    ansi-styles "^2.2.1"
+    escape-string-regexp "^1.0.2"
+    has-ansi "^2.0.0"
+    strip-ansi "^3.0.0"
+    supports-color "^2.0.0"
+
+chalk@~0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-0.4.0.tgz#5199a3ddcd0c1efe23bc08c1b027b06176e0c64f"
+  dependencies:
+    ansi-styles "~1.0.0"
+    has-color "~0.1.0"
+    strip-ansi "~0.1.0"
+
+char-spinner@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/char-spinner/-/char-spinner-1.0.1.tgz#e6ea67bd247e107112983b7ab0479ed362800081"
+
+charm@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/charm/-/charm-1.0.2.tgz#8add367153a6d9a581331052c4090991da995e35"
+  dependencies:
+    inherits "^2.0.1"
+
+chmodr@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/chmodr/-/chmodr-1.0.2.tgz#04662b932d0f02ec66deaa2b0ea42811968e3eb9"
+
+chownr@^1.0.1, chownr@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.0.1.tgz#e2a75042a9551908bebd25b8523d5f9769d79181"
+
+clean-base-url@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/clean-base-url/-/clean-base-url-1.0.0.tgz#c901cf0a20b972435b0eccd52d056824a4351b7b"
+
+clean-css-promise@^0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/clean-css-promise/-/clean-css-promise-0.1.1.tgz#43f3d2c8dfcb2bf071481252cd9b76433c08eecb"
+  dependencies:
+    array-to-error "^1.0.0"
+    clean-css "^3.4.5"
+    pinkie-promise "^2.0.0"
+
+clean-css@^3.4.5:
+  version "3.4.25"
+  resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-3.4.25.tgz#9e9a52d5c1e6bc5123e1b2783fa65fe958946ede"
+  dependencies:
+    commander "2.8.x"
+    source-map "0.4.x"
+
+cli-cursor@^1.0.1, cli-cursor@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/cli-cursor/-/cli-cursor-1.0.2.tgz#64da3f7d56a54412e59794bd62dc35295e8f2987"
+  dependencies:
+    restore-cursor "^1.0.1"
+
+cli-spinners@^0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/cli-spinners/-/cli-spinners-0.1.2.tgz#bb764d88e185fb9e1e6a2a1f19772318f605e31c"
+
+cli-table2@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/cli-table2/-/cli-table2-0.2.0.tgz#2d1ef7f218a0e786e214540562d4bd177fe32d97"
+  dependencies:
+    lodash "^3.10.1"
+    string-width "^1.0.1"
+  optionalDependencies:
+    colors "^1.1.2"
+
+cli-table@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/cli-table/-/cli-table-0.3.1.tgz#f53b05266a8b1a0b934b3d0821e6e2dc5914ae23"
+  dependencies:
+    colors "1.0.3"
+
+cli-width@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/cli-width/-/cli-width-2.1.0.tgz#b234ca209b29ef66fc518d9b98d5847b00edf00a"
+
+cli@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/cli/-/cli-1.0.1.tgz#22817534f24bfa4950c34d532d48ecbc621b8c14"
+  dependencies:
+    exit "0.1.2"
+    glob "^7.1.1"
+
+cliui@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/cliui/-/cliui-2.1.0.tgz#4b475760ff80264c762c3a1719032e91c7fea0d1"
+  dependencies:
+    center-align "^0.1.1"
+    right-align "^0.1.1"
+    wordwrap "0.0.2"
+
+cliui@^3.2.0:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/cliui/-/cliui-3.2.0.tgz#120601537a916d29940f934da3b48d585a39213d"
+  dependencies:
+    string-width "^1.0.1"
+    strip-ansi "^3.0.1"
+    wrap-ansi "^2.0.0"
+
+clone@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/clone/-/clone-0.2.0.tgz#c6126a90ad4f72dbf5acdb243cc37724fe93fc1f"
+
+clone@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/clone/-/clone-1.0.2.tgz#260b7a99ebb1edfe247538175f783243cb19d149"
+
+clone@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/clone/-/clone-2.1.1.tgz#d217d1e961118e3ac9a4b8bba3285553bf647cdb"
+
+cmd-shim@~2.0.2:
+  version "2.0.2"
+  resolved "https://registry.yarnpkg.com/cmd-shim/-/cmd-shim-2.0.2.tgz#6fcbda99483a8fd15d7d30a196ca69d688a2efdb"
+  dependencies:
+    graceful-fs "^4.1.2"
+    mkdirp "~0.5.0"
+
+co@^4.6.0:
+  version "4.6.0"
+  resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184"
+
+code-point-at@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77"
+
+colors@1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/colors/-/colors-1.0.3.tgz#0433f44d809680fdeb60ed260f1b0c262e82a40b"
+
+colors@^1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/colors/-/colors-1.1.2.tgz#168a4701756b6a7f51a12ce0c97bfa28c084ed63"
+
+colors@~0.6.0-1:
+  version "0.6.2"
+  resolved "https://registry.yarnpkg.com/colors/-/colors-0.6.2.tgz#2423fe6678ac0c5dae8852e5d0e5be08c997abcc"
+
+columnify@~1.5.4:
+  version "1.5.4"
+  resolved "https://registry.yarnpkg.com/columnify/-/columnify-1.5.4.tgz#4737ddf1c7b69a8a7c340570782e947eec8e78bb"
+  dependencies:
+    strip-ansi "^3.0.0"
+    wcwidth "^1.0.0"
+
+combined-stream@^1.0.5, combined-stream@~1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.5.tgz#938370a57b4a51dea2c77c15d5c5fdf895164009"
+  dependencies:
+    delayed-stream "~1.0.0"
+
+commander@2.8.x:
+  version "2.8.1"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.8.1.tgz#06be367febfda0c330aa1e2a072d3dc9762425d4"
+  dependencies:
+    graceful-readlink ">= 1.0.0"
+
+commander@^2.5.0, commander@^2.6.0, commander@^2.9.0:
+  version "2.9.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.9.0.tgz#9c99094176e12240cb22d6c5146098400fe0f7d4"
+  dependencies:
+    graceful-readlink ">= 1.0.0"
+
+commander@~2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.1.0.tgz#d121bbae860d9992a3d517ba96f56588e47c6781"
+
+commoner@~0.10.3:
+  version "0.10.8"
+  resolved "https://registry.yarnpkg.com/commoner/-/commoner-0.10.8.tgz#34fc3672cd24393e8bb47e70caa0293811f4f2c5"
+  dependencies:
+    commander "^2.5.0"
+    detective "^4.3.1"
+    glob "^5.0.15"
+    graceful-fs "^4.1.2"
+    iconv-lite "^0.4.5"
+    mkdirp "^0.5.0"
+    private "^0.1.6"
+    q "^1.1.2"
+    recast "^0.11.17"
+
+component-bind@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/component-bind/-/component-bind-1.0.0.tgz#00c608ab7dcd93897c0009651b1d3a8e1e73bbd1"
+
+component-emitter@1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.1.2.tgz#296594f2753daa63996d2af08d15a95116c9aec3"
+
+component-emitter@1.2.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.2.1.tgz#137918d6d78283f7df7a6b7c5a63e140e69425e6"
+
+component-inherit@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/component-inherit/-/component-inherit-0.0.3.tgz#645fc4adf58b72b649d5cae65135619db26ff143"
+
+compressible@~2.0.8:
+  version "2.0.10"
+  resolved "https://registry.yarnpkg.com/compressible/-/compressible-2.0.10.tgz#feda1c7f7617912732b29bf8cf26252a20b9eecd"
+  dependencies:
+    mime-db ">= 1.27.0 < 2"
+
+compression@^1.4.4:
+  version "1.6.2"
+  resolved "https://registry.yarnpkg.com/compression/-/compression-1.6.2.tgz#cceb121ecc9d09c52d7ad0c3350ea93ddd402bc3"
+  dependencies:
+    accepts "~1.3.3"
+    bytes "2.3.0"
+    compressible "~2.0.8"
+    debug "~2.2.0"
+    on-headers "~1.0.1"
+    vary "~1.1.0"
+
+concat-map@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b"
+
+concat-stream@^1.4.6:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.6.0.tgz#0aac662fd52be78964d5532f694784e70110acf7"
+  dependencies:
+    inherits "^2.0.3"
+    readable-stream "^2.2.2"
+    typedarray "^0.0.6"
+
+config-chain@~1.1.10:
+  version "1.1.11"
+  resolved "https://registry.yarnpkg.com/config-chain/-/config-chain-1.1.11.tgz#aba09747dfbe4c3e70e766a6e41586e1859fc6f2"
+  dependencies:
+    ini "^1.3.4"
+    proto-list "~1.2.1"
+
+configstore@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/configstore/-/configstore-2.1.0.tgz#737a3a7036e9886102aa6099e47bb33ab1aba1a1"
+  dependencies:
+    dot-prop "^3.0.0"
+    graceful-fs "^4.1.2"
+    mkdirp "^0.5.0"
+    object-assign "^4.0.1"
+    os-tmpdir "^1.0.0"
+    osenv "^0.1.0"
+    uuid "^2.0.1"
+    write-file-atomic "^1.1.2"
+    xdg-basedir "^2.0.0"
+
+connect@^3.3.3:
+  version "3.6.1"
+  resolved "https://registry.yarnpkg.com/connect/-/connect-3.6.1.tgz#b7760693a74f0454face1d9378edb3f885b43227"
+  dependencies:
+    debug "2.6.3"
+    finalhandler "1.0.1"
+    parseurl "~1.3.1"
+    utils-merge "1.0.0"
+
+console-browserify@1.1.x:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/console-browserify/-/console-browserify-1.1.0.tgz#f0241c45730a9fc6323b206dbf38edc741d0bb10"
+  dependencies:
+    date-now "^0.1.4"
+
+console-control-strings@^1.0.0, console-control-strings@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/console-control-strings/-/console-control-strings-1.1.0.tgz#3d7cf4464db6446ea644bf4b39507f9851008e8e"
+
+consolidate@^0.14.0:
+  version "0.14.5"
+  resolved "https://registry.yarnpkg.com/consolidate/-/consolidate-0.14.5.tgz#5a25047bc76f73072667c8cb52c989888f494c63"
+  dependencies:
+    bluebird "^3.1.1"
+
+content-disposition@0.5.2:
+  version "0.5.2"
+  resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.2.tgz#0cf68bb9ddf5f2be7961c3a85178cb85dba78cb4"
+
+content-type@~1.0.1, content-type@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.2.tgz#b7d113aee7a8dd27bd21133c4dc2529df1721eed"
+
+convert-source-map@^1.1.0:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.5.0.tgz#9acd70851c6d5dfdd93d9282e5edf94a03ff46b5"
+
+cookie-signature@1.0.6:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c"
+
+cookie@0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.3.1.tgz#e7e0a1f9ef43b4c8ba925c5c5a96e806d16873bb"
+
+copy-dereference@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/copy-dereference/-/copy-dereference-1.0.0.tgz#6b131865420fd81b413ba994b44d3655311152b6"
+
+core-js@^1.0.0:
+  version "1.2.7"
+  resolved "https://registry.yarnpkg.com/core-js/-/core-js-1.2.7.tgz#652294c14651db28fa93bd2d5ff2983a4f08c636"
+
+core-js@^2.4.0:
+  version "2.4.1"
+  resolved "https://registry.yarnpkg.com/core-js/-/core-js-2.4.1.tgz#4de911e667b0eae9124e34254b53aea6fc618d3e"
+
+core-object@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/core-object/-/core-object-1.1.0.tgz#86d63918733cf9da1a5aae729e62c0a88e66ad0a"
+
+core-object@^2.0.2:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/core-object/-/core-object-2.1.1.tgz#4b7a5f1edefcb1e6d0dcb58eab1b9f90bfc666a8"
+  dependencies:
+    chalk "^1.1.3"
+
+core-util-is@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7"
+
+cross-spawn@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-3.0.1.tgz#1256037ecb9f0c5f79e3d6ef135e30770184b982"
+  dependencies:
+    lru-cache "^4.0.1"
+    which "^1.2.9"
+
+cross-spawn@^5.1.0:
+  version "5.1.0"
+  resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-5.1.0.tgz#e8bd0efee58fcff6f8f94510a0a554bbfa235449"
+  dependencies:
+    lru-cache "^4.0.1"
+    shebang-command "^1.2.0"
+    which "^1.2.9"
+
+cryptiles@2.x.x:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/cryptiles/-/cryptiles-2.0.5.tgz#3bdfecdc608147c1c67202fa291e7dca59eaa3b8"
+  dependencies:
+    boom "2.x.x"
+
+currently-unhandled@^0.4.1:
+  version "0.4.1"
+  resolved "https://registry.yarnpkg.com/currently-unhandled/-/currently-unhandled-0.4.1.tgz#988df33feab191ef799a61369dd76c17adf957ea"
+  dependencies:
+    array-find-index "^1.0.1"
+
+d@1:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/d/-/d-1.0.0.tgz#754bb5bfe55451da69a58b94d45f4c5b0462d58f"
+  dependencies:
+    es5-ext "^0.10.9"
+
+dashdash@^1.12.0:
+  version "1.14.1"
+  resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0"
+  dependencies:
+    assert-plus "^1.0.0"
+
+date-now@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/date-now/-/date-now-0.1.4.tgz#eaf439fd4d4848ad74e5cc7dbef200672b9e345b"
+
+debug@2.2.0, debug@~2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.2.0.tgz#f87057e995b1a1f6ae6a4960664137bc56f039da"
+  dependencies:
+    ms "0.7.1"
+
+debug@2.3.3:
+  version "2.3.3"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.3.3.tgz#40c453e67e6e13c901ddec317af8986cda9eff8c"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.1:
+  version "2.6.1"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.1.tgz#79855090ba2c4e3115cc7d8769491d58f0491351"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.3:
+  version "2.6.3"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.3.tgz#0f7eb8c30965ec08c72accfa0130c8b79984141d"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.4:
+  version "2.6.4"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.4.tgz#7586a9b3c39741c0282ae33445c4e8ac74734fe0"
+  dependencies:
+    ms "0.7.3"
+
+debug@^2.1.0, debug@^2.1.1, debug@^2.1.3, debug@^2.2.0, debug@^2.4.0:
+  version "2.6.6"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.6.tgz#a9fa6fbe9ca43cf1e79f73b75c0189cbb7d6db5a"
+  dependencies:
+    ms "0.7.3"
+
+debuglog@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/debuglog/-/debuglog-1.0.1.tgz#aa24ffb9ac3df9a2351837cfb2d279360cd78492"
+
+decamelize@^1.0.0, decamelize@^1.1.1, decamelize@^1.1.2:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290"
+
+defaults@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/defaults/-/defaults-1.0.3.tgz#c656051e9817d9ff08ed881477f3fe4019f3ef7d"
+  dependencies:
+    clone "^1.0.2"
+
+defined@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/defined/-/defined-1.0.0.tgz#c98d9bcef75674188e110969151199e39b1fa693"
+
+defs@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/defs/-/defs-1.1.1.tgz#b22609f2c7a11ba7a3db116805c139b1caffa9d2"
+  dependencies:
+    alter "~0.2.0"
+    ast-traverse "~0.1.1"
+    breakable "~1.0.0"
+    esprima-fb "~15001.1001.0-dev-harmony-fb"
+    simple-fmt "~0.1.0"
+    simple-is "~0.2.0"
+    stringmap "~0.2.2"
+    stringset "~0.2.1"
+    tryor "~0.1.2"
+    yargs "~3.27.0"
+
+delayed-stream@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619"
+
+delegates@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a"
+
+depd@1.1.0, depd@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.0.tgz#e1bd82c6aab6ced965b97b88b17ed3e528ca18c3"
+
+destroy@~1.0.4:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.0.4.tgz#978857442c44749e4206613e37946205826abd80"
+
+detect-indent@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/detect-indent/-/detect-indent-3.0.1.tgz#9dc5e5ddbceef8325764b9451b02bc6d54084f75"
+  dependencies:
+    get-stdin "^4.0.1"
+    minimist "^1.1.0"
+    repeating "^1.1.0"
+
+detect-indent@^4.0.0:
+  version "4.0.0"
+  resolved "https://registry.yarnpkg.com/detect-indent/-/detect-indent-4.0.0.tgz#f76d064352cdf43a1cb6ce619c4ee3a9475de208"
+  dependencies:
+    repeating "^2.0.0"
+
+detective@^4.3.1:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/detective/-/detective-4.5.0.tgz#6e5a8c6b26e6c7a254b1c6b6d7490d98ec91edd1"
+  dependencies:
+    acorn "^4.0.3"
+    defined "^1.0.0"
+
+dezalgo@^1.0.0, dezalgo@^1.0.1, dezalgo@^1.0.2, dezalgo@~1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/dezalgo/-/dezalgo-1.0.3.tgz#7f742de066fc748bc8db820569dddce49bf0d456"
+  dependencies:
+    asap "^2.0.0"
+    wrappy "1"
+
+diff@^2.2.2:
+  version "2.2.3"
+  resolved "https://registry.yarnpkg.com/diff/-/diff-2.2.3.tgz#60eafd0d28ee906e4e8ff0a52c1229521033bf99"
+
+dom-serializer@0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-0.1.0.tgz#073c697546ce0780ce23be4a28e293e40bc30c82"
+  dependencies:
+    domelementtype "~1.1.1"
+    entities "~1.1.1"
+
+domelementtype@1:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.3.0.tgz#b17aed82e8ab59e52dd9c19b1756e0fc187204c2"
+
+domelementtype@~1.1.1:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.1.3.tgz#bd28773e2642881aec51544924299c5cd822185b"
+
+domhandler@2.3:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-2.3.0.tgz#2de59a0822d5027fabff6f032c2b25a2a8abe738"
+  dependencies:
+    domelementtype "1"
+
+domutils@1.5:
+  version "1.5.1"
+  resolved "https://registry.yarnpkg.com/domutils/-/domutils-1.5.1.tgz#dcd8488a26f563d61079e48c9f7b7e32373682cf"
+  dependencies:
+    dom-serializer "0"
+    domelementtype "1"
+
+dot-prop@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/dot-prop/-/dot-prop-3.0.0.tgz#1b708af094a49c9a0e7dbcad790aba539dac1177"
+  dependencies:
+    is-obj "^1.0.0"
+
+ecc-jsbn@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.1.tgz#0fc73a9ed5f0d53c38193398523ef7e543777505"
+  dependencies:
+    jsbn "~0.1.0"
+
+editions@^1.1.1:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/editions/-/editions-1.3.3.tgz#0907101bdda20fac3cbe334c27cbd0688dc99a5b"
+
+editor@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/editor/-/editor-1.0.0.tgz#60c7f87bd62bcc6a894fa8ccd6afb7823a24f742"
+
+ee-first@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d"
+
+electron-to-chromium@^1.2.7:
+  version "1.3.8"
+  resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.8.tgz#b2c8a2c79bb89fbbfd3724d9555e15095b5f5fb6"
+
+ember-ajax@^2.0.1:
+  version "2.5.6"
+  resolved "https://registry.yarnpkg.com/ember-ajax/-/ember-ajax-2.5.6.tgz#a75f743ccf1b95e979a5cf96013b3dba8fa625e4"
+  dependencies:
+    ember-cli-babel "^5.1.5"
+
+ember-basic-dropdown@^0.17.1:
+  version "0.17.4"
+  resolved "https://registry.yarnpkg.com/ember-basic-dropdown/-/ember-basic-dropdown-0.17.4.tgz#1bece739199e3c973c04b6eb0c489c38a75ef3d9"
+  dependencies:
+    ember-cli-babel "^5.1.10"
+    ember-cli-htmlbars "^1.1.1"
+    ember-wormhole "^0.5.1"
+
+ember-cli-app-version@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-app-version/-/ember-cli-app-version-1.0.1.tgz#d135eba75f30e791d8a5e5844f1251dcbcc40438"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+    ember-cli-htmlbars "^1.0.0"
+    git-repo-version "0.3.0"
+
+ember-cli-babel@5.1.10:
+  version "5.1.10"
+  resolved "https://registry.yarnpkg.com/ember-cli-babel/-/ember-cli-babel-5.1.10.tgz#d403f178aab602e1337c403c5a58c0200a8969aa"
+  dependencies:
+    broccoli-babel-transpiler "^5.6.0"
+    broccoli-funnel "^1.0.0"
+    clone "^1.0.2"
+    ember-cli-version-checker "^1.0.2"
+    resolve "^1.1.2"
+
+ember-cli-babel@^5.0.0, ember-cli-babel@^5.1.10, ember-cli-babel@^5.1.3, ember-cli-babel@^5.1.5, ember-cli-babel@^5.1.6, ember-cli-babel@^5.1.7, ember-cli-babel@^5.2.1:
+  version "5.2.4"
+  resolved "https://registry.yarnpkg.com/ember-cli-babel/-/ember-cli-babel-5.2.4.tgz#5ce4f46b08ed6f6d21e878619fb689719d6e8e13"
+  dependencies:
+    broccoli-babel-transpiler "^5.6.2"
+    broccoli-funnel "^1.0.0"
+    clone "^2.0.0"
+    ember-cli-version-checker "^1.0.2"
+    resolve "^1.1.2"
+
+ember-cli-babel@^6.0.0, ember-cli-babel@^6.0.0-beta.4, ember-cli-babel@^6.0.0-beta.7:
+  version "6.1.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-babel/-/ember-cli-babel-6.1.0.tgz#d9c83a7d0c67cc8a3ccb9bd082971c3593e54fad"
+  dependencies:
+    amd-name-resolver "0.0.6"
+    babel-plugin-debug-macros "^0.1.6"
+    babel-plugin-transform-es2015-modules-amd "^6.24.0"
+    babel-polyfill "^6.16.0"
+    babel-preset-env "^1.2.0"
+    broccoli-babel-transpiler "^6.0.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-source "^1.1.0"
+    clone "^2.0.0"
+    ember-cli-version-checker "^1.2.0"
+
+ember-cli-broccoli@0.16.9:
+  version "0.16.9"
+  resolved "https://registry.yarnpkg.com/ember-cli-broccoli/-/ember-cli-broccoli-0.16.9.tgz#4e9128f59ffaee99705c01e9a44a691a0ae199db"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.2.5"
+    broccoli-slow-trees "^1.0.0"
+    commander "^2.5.0"
+    connect "^3.3.3"
+    copy-dereference "^1.0.0"
+    findup-sync "^0.2.1"
+    handlebars "^4.0.4"
+    mime "^1.2.11"
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.2"
+    rimraf "^2.2.8"
+    rsvp "^3.0.17"
+
+ember-cli-daterangepicker@0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-daterangepicker/-/ember-cli-daterangepicker-0.3.0.tgz#aaba1af90f462b2c9a0f9c2d3c52e9ad23aaaa54"
+  dependencies:
+    broccoli-funnel "^1.0.1"
+    broccoli-merge-trees "^1.1.1"
+    ember-cli-babel "^5.1.5"
+
+ember-cli-dependency-checker@^1.2.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-dependency-checker/-/ember-cli-dependency-checker-1.4.0.tgz#2b13f977e1eea843fc1a21a001be6ca5d4ef1942"
+  dependencies:
+    chalk "^0.5.1"
+    is-git-url "^0.2.0"
+    semver "^4.1.0"
+
+ember-cli-file-picker@0.0.10:
+  version "0.0.10"
+  resolved "https://registry.yarnpkg.com/ember-cli-file-picker/-/ember-cli-file-picker-0.0.10.tgz#7018214123c8a0105ffbe2bed639ec1e37adbde2"
+  dependencies:
+    ember-cli-babel "^5.1.10"
+
+ember-cli-flash@1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-flash/-/ember-cli-flash-1.4.0.tgz#0a831b406e98a0f62027e2706cbe581a1bf4f608"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+    ember-cli-htmlbars "^1.0.3"
+    ember-runtime-enumerable-includes-polyfill "^1.0.1"
+
+ember-cli-get-component-path-option@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-get-component-path-option/-/ember-cli-get-component-path-option-1.0.0.tgz#0d7b595559e2f9050abed804f1d8eff1b08bc771"
+
+ember-cli-get-dependency-depth@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-get-dependency-depth/-/ember-cli-get-dependency-depth-1.0.0.tgz#e0afecf82a2d52f00f28ab468295281aec368d11"
+
+ember-cli-htmlbars-inline-precompile@^0.3.1:
+  version "0.3.6"
+  resolved "https://registry.yarnpkg.com/ember-cli-htmlbars-inline-precompile/-/ember-cli-htmlbars-inline-precompile-0.3.6.tgz#4095fe423f93102724c0725e4dd1a31f25e24de5"
+  dependencies:
+    babel-plugin-htmlbars-inline-precompile "^0.1.0"
+    ember-cli-babel "^5.1.3"
+    ember-cli-htmlbars "^1.0.0"
+    hash-for-dep "^1.0.2"
+
+ember-cli-htmlbars@^1.0.0, ember-cli-htmlbars@^1.0.3, ember-cli-htmlbars@^1.0.8, ember-cli-htmlbars@^1.1.0, ember-cli-htmlbars@^1.1.1:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-1.3.0.tgz#e090f011239153bf45dab29625f94a46fce205af"
+  dependencies:
+    broccoli-persistent-filter "^1.0.3"
+    ember-cli-version-checker "^1.0.2"
+    hash-for-dep "^1.0.2"
+    json-stable-stringify "^1.0.0"
+    strip-bom "^2.0.0"
+
+ember-cli-inject-live-reload@^1.4.0:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-inject-live-reload/-/ember-cli-inject-live-reload-1.6.1.tgz#82b8f5be454815a75e7f6d42c9ce0bc883a914a3"
+
+ember-cli-is-package-missing@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-is-package-missing/-/ember-cli-is-package-missing-1.0.0.tgz#6e6184cafb92635dd93ca6c946b104292d4e3390"
+
+ember-cli-jshint@^1.0.0:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/ember-cli-jshint/-/ember-cli-jshint-1.0.5.tgz#8a0185f19cbd7136995ee6ac92941a560e265b91"
+  dependencies:
+    broccoli-jshint "^1.0.0"
+
+ember-cli-legacy-blueprints@^0.1.1:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/ember-cli-legacy-blueprints/-/ember-cli-legacy-blueprints-0.1.4.tgz#83d6c005ac0e39750ff9dd45cd1b78cf697150c6"
+  dependencies:
+    chalk "^1.1.1"
+    ember-cli-get-component-path-option "^1.0.0"
+    ember-cli-get-dependency-depth "^1.0.0"
+    ember-cli-is-package-missing "^1.0.0"
+    ember-cli-lodash-subset "^1.0.7"
+    ember-cli-normalize-entity-name "^1.0.0"
+    ember-cli-path-utils "^1.0.0"
+    ember-cli-string-utils "^1.0.0"
+    ember-cli-test-info "^1.0.0"
+    ember-cli-valid-component-name "^1.0.0"
+    ember-cli-version-checker "^1.1.7"
+    ember-router-generator "^1.0.0"
+    exists-sync "0.0.3"
+    fs-extra "^0.24.0"
+    inflection "^1.7.1"
+    rsvp "^3.0.17"
+    silent-error "^1.0.0"
+
+ember-cli-lodash-subset@^1.0.7:
+  version "1.0.12"
+  resolved "https://registry.yarnpkg.com/ember-cli-lodash-subset/-/ember-cli-lodash-subset-1.0.12.tgz#af2e77eba5dcb0d77f3308d3a6fd7d3450f6e537"
+
+ember-cli-moment-shim@3.0.1:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-moment-shim/-/ember-cli-moment-shim-3.0.1.tgz#c67c39f71c1c5a38bcc93bfa65b14e25f069895f"
+  dependencies:
+    broccoli-funnel "^1.0.0"
+    broccoli-merge-trees "^1.1.5"
+    broccoli-stew "^1.0.0"
+    chalk "^1.1.1"
+    ember-cli-babel "^5.0.0"
+    exists-sync "0.0.4"
+    lodash.defaults "^4.1.0"
+    moment "^2.13.0"
+    moment-timezone "^0.5.0"
+
+ember-cli-normalize-entity-name@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-normalize-entity-name/-/ember-cli-normalize-entity-name-1.0.0.tgz#0b14f7bcbc599aa117b5fddc81e4fd03c4bad5b7"
+  dependencies:
+    silent-error "^1.0.0"
+
+ember-cli-path-utils@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-path-utils/-/ember-cli-path-utils-1.0.0.tgz#4e39af8b55301cddc5017739b77a804fba2071ed"
+
+ember-cli-preprocess-registry@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-preprocess-registry/-/ember-cli-preprocess-registry-2.0.0.tgz#45c8b985eba06bb443b3abce1c3c6220fdcb8094"
+  dependencies:
+    broccoli-clean-css "^1.1.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    debug "^2.2.0"
+    exists-sync "0.0.3"
+    lodash "^3.10.0"
+    process-relative-require "^1.0.0"
+    silent-error "^1.0.0"
+
+ember-cli-qunit@^2.0.0:
+  version "2.2.6"
+  resolved "https://registry.yarnpkg.com/ember-cli-qunit/-/ember-cli-qunit-2.2.6.tgz#d802174724edd59b61ffdd0581fceb3236014dd1"
+  dependencies:
+    broccoli-babel-transpiler "^5.5.0"
+    broccoli-concat "^2.2.0"
+    broccoli-funnel "^1.0.1"
+    broccoli-merge-trees "^1.1.0"
+    ember-cli-babel "^5.1.5"
+    ember-cli-version-checker "^1.1.4"
+    ember-qunit "^0.4.18"
+    qunit-notifications "^0.1.1"
+    qunitjs "^1.20.0"
+    resolve "^1.1.6"
+    rsvp "^3.2.1"
+
+ember-cli-release@^0.2.9:
+  version "0.2.9"
+  resolved "https://registry.yarnpkg.com/ember-cli-release/-/ember-cli-release-0.2.9.tgz#5e8de3d034c65597933748023058470ec1231adb"
+  dependencies:
+    chalk "^1.0.0"
+    git-tools "^0.1.4"
+    make-array "^0.1.2"
+    merge "^1.2.0"
+    moment-timezone "^0.3.0"
+    nopt "^3.0.3"
+    rsvp "^3.0.17"
+    semver "^4.3.1"
+    silent-error "^1.0.0"
+
+ember-cli-sass@5.6.0:
+  version "5.6.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-sass/-/ember-cli-sass-5.6.0.tgz#792de67544bb903eef421a3e59c484840fea5352"
+  dependencies:
+    broccoli-funnel "^1.0.0"
+    broccoli-merge-trees "^1.1.0"
+    broccoli-sass-source-maps "^1.8.0"
+    ember-cli-babel "5.1.10"
+    ember-cli-version-checker "^1.0.2"
+    merge "^1.2.0"
+
+ember-cli-sri@^2.1.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-sri/-/ember-cli-sri-2.1.1.tgz#971620934a4b9183cf7923cc03e178b83aa907fd"
+  dependencies:
+    broccoli-sri-hash "^2.1.0"
+
+ember-cli-string-utils@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-string-utils/-/ember-cli-string-utils-1.1.0.tgz#39b677fc2805f55173735376fcef278eaa4452a1"
+
+ember-cli-test-info@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-test-info/-/ember-cli-test-info-1.0.0.tgz#ed4e960f249e97523cf891e4aed2072ce84577b4"
+  dependencies:
+    ember-cli-string-utils "^1.0.0"
+
+ember-cli-test-loader@^1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-test-loader/-/ember-cli-test-loader-1.1.1.tgz#333311209b18185d0e0e95f918349da10cacf0b1"
+  dependencies:
+    ember-cli-babel "^5.2.1"
+
+ember-cli-uglify@^1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-uglify/-/ember-cli-uglify-1.2.0.tgz#3208c32b54bc2783056e8bb0d5cfe9bbaf17ffb2"
+  dependencies:
+    broccoli-uglify-sourcemap "^1.0.0"
+
+ember-cli-valid-component-name@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-valid-component-name/-/ember-cli-valid-component-name-1.0.0.tgz#71550ce387e0233065f30b30b1510aa2dfbe87ef"
+  dependencies:
+    silent-error "^1.0.0"
+
+ember-cli-version-checker@^1.0.2, ember-cli-version-checker@^1.1.4, ember-cli-version-checker@^1.1.6, ember-cli-version-checker@^1.1.7, ember-cli-version-checker@^1.2.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-version-checker/-/ember-cli-version-checker-1.3.1.tgz#0bc2d134c830142da64bf9627a0eded10b61ae72"
+  dependencies:
+    semver "^5.3.0"
+
+ember-cli@2.7.0:
+  version "2.7.0"
+  resolved "https://registry.yarnpkg.com/ember-cli/-/ember-cli-2.7.0.tgz#26dd9ab583d987e3b4e6b64b7f0cdfa059610404"
+  dependencies:
+    amd-name-resolver "0.0.5"
+    bower "^1.3.12"
+    bower-config "^1.3.0"
+    bower-endpoint-parser "0.2.2"
+    broccoli-babel-transpiler "^5.4.5"
+    broccoli-concat "^2.0.4"
+    broccoli-config-loader "^1.0.0"
+    broccoli-config-replace "^1.1.2"
+    broccoli-funnel "^1.0.0"
+    broccoli-funnel-reducer "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-sane-watcher "^1.1.1"
+    broccoli-source "^1.1.0"
+    broccoli-viz "^2.0.1"
+    chalk "^1.1.3"
+    clean-base-url "^1.0.0"
+    compression "^1.4.4"
+    configstore "^2.0.0"
+    core-object "^2.0.2"
+    debug "^2.1.3"
+    diff "^2.2.2"
+    ember-cli-broccoli "0.16.9"
+    ember-cli-get-component-path-option "^1.0.0"
+    ember-cli-is-package-missing "^1.0.0"
+    ember-cli-legacy-blueprints "^0.1.1"
+    ember-cli-normalize-entity-name "^1.0.0"
+    ember-cli-preprocess-registry "^2.0.0"
+    ember-cli-string-utils "^1.0.0"
+    ember-try "^0.2.2"
+    escape-string-regexp "^1.0.3"
+    exists-sync "0.0.3"
+    exit "^0.1.2"
+    express "^4.12.3"
+    filesize "^3.1.3"
+    find-up "^1.1.2"
+    fs-extra "0.30.0"
+    fs-monitor-stack "^1.0.2"
+    fs-tree-diff "^0.4.4"
+    get-caller-file "^1.0.0"
+    git-repo-info "^1.0.4"
+    glob "7.0.3"
+    http-proxy "^1.9.0"
+    inflection "^1.7.0"
+    inquirer "^0.12.0"
+    is-git-url "^0.2.0"
+    isbinaryfile "^3.0.0"
+    leek "0.0.21"
+    lodash "^4.12.0"
+    markdown-it "6.0.4"
+    markdown-it-terminal "0.0.3"
+    minimatch "^3.0.0"
+    morgan "^1.5.2"
+    node-modules-path "^1.0.0"
+    node-uuid "^1.4.3"
+    nopt "^3.0.1"
+    npm "2.15.5"
+    npm-package-arg "^4.1.1"
+    ora "^0.2.0"
+    portfinder "^1.0.3"
+    promise-map-series "^0.2.1"
+    quick-temp "0.1.5"
+    resolve "^1.1.6"
+    rsvp "^3.0.17"
+    sane "^1.1.1"
+    semver "^5.1.0"
+    silent-error "^1.0.0"
+    symlink-or-copy "^1.0.1"
+    temp "0.8.3"
+    testem "^1.8.1"
+    through "^2.3.6"
+    tiny-lr "0.2.1"
+    tree-sync "^1.1.0"
+    walk-sync "^0.2.6"
+    yam "0.0.19"
+
+ember-composable-helpers@1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/ember-composable-helpers/-/ember-composable-helpers-1.1.2.tgz#fc9f540269b1f01dec89be9bdc61d31c8cf41350"
+  dependencies:
+    broccoli-funnel "^1.0.1"
+    ember-cli-babel "^5.1.6"
+
+ember-computed-decorators@0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/ember-computed-decorators/-/ember-computed-decorators-0.2.2.tgz#7c934a575c55ac3a18b6aaeb7cd2cbe149bc9b34"
+  dependencies:
+    ember-cli-babel "^5.1.5"
+
+ember-concurrency@^0.7.15:
+  version "0.7.19"
+  resolved "https://registry.yarnpkg.com/ember-concurrency/-/ember-concurrency-0.7.19.tgz#095f2ede1b56ab068958cac5b55e77b9de67e1c6"
+  dependencies:
+    ember-cli-babel "^5.1.5"
+    ember-getowner-polyfill "^1.1.0"
+    ember-maybe-import-regenerator "^0.1.4"
+
+ember-data@^2.7.0:
+  version "2.13.0"
+  resolved "https://registry.yarnpkg.com/ember-data/-/ember-data-2.13.0.tgz#6d61487129de0e72225cc98bbc0d995e2042a933"
+  dependencies:
+    amd-name-resolver "0.0.5"
+    babel-plugin-feature-flags "^0.3.1"
+    babel-plugin-filter-imports "^0.3.1"
+    babel6-plugin-strip-class-callcheck "^6.0.0"
+    babel6-plugin-strip-heimdall "^6.0.1"
+    broccoli-babel-transpiler "^6.0.0"
+    broccoli-file-creator "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    chalk "^1.1.1"
+    ember-cli-babel "^6.0.0-beta.7"
+    ember-cli-path-utils "^1.0.0"
+    ember-cli-string-utils "^1.0.0"
+    ember-cli-test-info "^1.0.0"
+    ember-cli-version-checker "^1.1.4"
+    ember-inflector "^2.0.0"
+    ember-runtime-enumerable-includes-polyfill "^2.0.0"
+    exists-sync "0.0.3"
+    git-repo-info "^1.1.2"
+    heimdalljs "^0.3.0"
+    inflection "^1.8.0"
+    npm-git-info "^1.0.0"
+    semver "^5.1.0"
+    silent-error "^1.0.0"
+
+ember-export-application-global@^1.0.5:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ember-export-application-global/-/ember-export-application-global-1.1.1.tgz#f257d5271268932a89d7392679ce4db89d7154af"
+  dependencies:
+    ember-cli-babel "^5.1.10"
+
+ember-factory-for-polyfill@^1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ember-factory-for-polyfill/-/ember-factory-for-polyfill-1.1.1.tgz#c1124d541a058baaa6681d9611340c16f0baf660"
+  dependencies:
+    ember-cli-babel "^5.1.7"
+    ember-cli-version-checker "^1.2.0"
+
+ember-font-awesome@2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/ember-font-awesome/-/ember-font-awesome-2.2.0.tgz#8b7b5e4b1b5ff2b865b09331b9bf5506fb0c3c1e"
+  dependencies:
+    chalk "^1.1.3"
+    ember-cli-babel "^5.1.6"
+    ember-cli-htmlbars "^1.0.3"
+    ember-computed-decorators "0.2.2"
+
+ember-getowner-polyfill@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-getowner-polyfill/-/ember-getowner-polyfill-1.0.0.tgz#f847ceabd97ab97e9e9d279c382400a4d407a9d6"
+  dependencies:
+    ember-cli-babel "^5.1.5"
+
+ember-getowner-polyfill@^1.1.0:
+  version "1.2.3"
+  resolved "https://registry.yarnpkg.com/ember-getowner-polyfill/-/ember-getowner-polyfill-1.2.3.tgz#ea70f4a48b1c05b91056371d1878bbafe018222e"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+    ember-cli-version-checker "^1.2.0"
+    ember-factory-for-polyfill "^1.1.0"
+
+ember-i18n@4.5.0:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/ember-i18n/-/ember-i18n-4.5.0.tgz#5d40313db72241bbf1fe8f1181d922b818a7af78"
+  dependencies:
+    broccoli-funnel "^1.0.6"
+    ember-cli-babel "^5.0.0"
+    ember-cli-version-checker "^1.1.6"
+    ember-getowner-polyfill "^1.1.0"
+
+ember-inflector@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/ember-inflector/-/ember-inflector-2.0.0.tgz#ac0870e87c0724bd42cf5ed7ef166c49a296ecfb"
+  dependencies:
+    ember-cli-babel "^6.0.0"
+
+ember-load-initializers@^0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/ember-load-initializers/-/ember-load-initializers-0.5.1.tgz#76e3db23c111dbdcd3ae6f687036bf0b56be0cbe"
+
+ember-macro-helpers@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/ember-macro-helpers/-/ember-macro-helpers-0.4.0.tgz#914670001478fcccccb84819aca7630cc44526c8"
+  dependencies:
+    ember-cli-babel "^5.1.7"
+
+ember-maybe-import-regenerator@^0.1.4:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/ember-maybe-import-regenerator/-/ember-maybe-import-regenerator-0.1.6.tgz#35d41828afa6d6a59bc0da3ce47f34c573d776ca"
+  dependencies:
+    broccoli-funnel "^1.0.1"
+    broccoli-merge-trees "^1.0.0"
+    ember-cli-babel "^6.0.0-beta.4"
+    regenerator-runtime "^0.9.5"
+
+ember-modal-dialog@0.9.0:
+  version "0.9.0"
+  resolved "https://registry.yarnpkg.com/ember-modal-dialog/-/ember-modal-dialog-0.9.0.tgz#e2797762f13eea5aa71524c1ec516c150980b287"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+    ember-cli-htmlbars "^1.0.8"
+    ember-cli-version-checker "^1.1.6"
+    ember-wormhole "~0.3.6"
+
+ember-moment@7.2.0:
+  version "7.2.0"
+  resolved "https://registry.yarnpkg.com/ember-moment/-/ember-moment-7.2.0.tgz#73202f5d62d7c645feb3abf580b3801c1beb6efd"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+    ember-macro-helpers "^0.4.0"
+
+ember-power-select@1.0.0-beta.31:
+  version "1.0.0-beta.31"
+  resolved "https://registry.yarnpkg.com/ember-power-select/-/ember-power-select-1.0.0-beta.31.tgz#151c48e10541d0f8afbe6e7af4ac4f5bb069d17b"
+  dependencies:
+    ember-basic-dropdown "^0.17.1"
+    ember-cli-babel "^5.1.10"
+    ember-cli-htmlbars "^1.1.0"
+    ember-concurrency "^0.7.15"
+    ember-text-measurer "^0.3.3"
+    ember-truth-helpers "^1.2.0"
+
+ember-qunit@^0.4.18:
+  version "0.4.24"
+  resolved "https://registry.yarnpkg.com/ember-qunit/-/ember-qunit-0.4.24.tgz#b54cf6688c442d07eacea47c3285879cdd7c2163"
+  dependencies:
+    ember-test-helpers "^0.5.32"
+
+ember-resolver@^2.0.3:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/ember-resolver/-/ember-resolver-2.1.1.tgz#5e4c1fffe9f5f48fc2194ad7592274ed0cd74f72"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+    ember-cli-version-checker "^1.1.6"
+
+ember-responsive@2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/ember-responsive/-/ember-responsive-2.0.0.tgz#100554745a486a75d6bb7d3d8c0b146bdff06363"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+    ember-getowner-polyfill "1.0.0"
+
+ember-router-generator@^1.0.0:
+  version "1.2.3"
+  resolved "https://registry.yarnpkg.com/ember-router-generator/-/ember-router-generator-1.2.3.tgz#8ed2ca86ff323363120fc14278191e9e8f1315ee"
+  dependencies:
+    recast "^0.11.3"
+
+ember-runtime-enumerable-includes-polyfill@^1.0.1:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/ember-runtime-enumerable-includes-polyfill/-/ember-runtime-enumerable-includes-polyfill-1.0.4.tgz#16a7612e347a2edf07da8b2f2f09dbfee70deba0"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+    ember-cli-version-checker "^1.1.6"
+
+ember-runtime-enumerable-includes-polyfill@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/ember-runtime-enumerable-includes-polyfill/-/ember-runtime-enumerable-includes-polyfill-2.0.0.tgz#6e9ba118bc909d1d7762de1b03a550d8955308a9"
+  dependencies:
+    ember-cli-babel "^6.0.0"
+    ember-cli-version-checker "^1.1.6"
+
+ember-sass-bootstrap@0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/ember-sass-bootstrap/-/ember-sass-bootstrap-0.1.2.tgz#3c26c66b1a219a131979f6c8a6ded6325d4556a7"
+  dependencies:
+    bootstrap-sass "^3.3.6"
+    broccoli-stew "^1.0.4"
+    ember-cli-babel "^5.1.5"
+
+ember-test-helpers@^0.5.32:
+  version "0.5.34"
+  resolved "https://registry.yarnpkg.com/ember-test-helpers/-/ember-test-helpers-0.5.34.tgz#c8439108d1cba1d7d838c212208a5c4061471b83"
+  dependencies:
+    klassy "^0.1.3"
+
+ember-text-measurer@^0.3.3:
+  version "0.3.3"
+  resolved "https://registry.yarnpkg.com/ember-text-measurer/-/ember-text-measurer-0.3.3.tgz#0762809a71c2e1f2e60ab00c53c6eb1b63c9f963"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+
+ember-truth-helpers@^1.2.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/ember-truth-helpers/-/ember-truth-helpers-1.3.0.tgz#6ed9f83ce9a49f52bb416d55e227426339a64c60"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+
+ember-try-config@^2.0.1:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/ember-try-config/-/ember-try-config-2.1.0.tgz#e0e156229a542346a58ee6f6ad605104c98edfe0"
+  dependencies:
+    lodash "^4.6.1"
+    node-fetch "^1.3.3"
+    rsvp "^3.2.1"
+    semver "^5.1.0"
+
+ember-try@^0.2.2:
+  version "0.2.14"
+  resolved "https://registry.yarnpkg.com/ember-try/-/ember-try-0.2.14.tgz#d47e8fa38858d5683e47856e24a260b39e9caf4a"
+  dependencies:
+    chalk "^1.0.0"
+    cli-table2 "^0.2.0"
+    core-object "^1.1.0"
+    debug "^2.2.0"
+    ember-cli-version-checker "^1.1.6"
+    ember-try-config "^2.0.1"
+    extend "^3.0.0"
+    fs-extra "^0.26.0"
+    promise-map-series "^0.2.1"
+    resolve "^1.1.6"
+    rimraf "^2.3.2"
+    rsvp "^3.0.17"
+    semver "^5.1.0"
+    sync-exec "^0.6.2"
+
+ember-uploader@1.2.3:
+  version "1.2.3"
+  resolved "https://registry.yarnpkg.com/ember-uploader/-/ember-uploader-1.2.3.tgz#7e8bf0d8c2d1f4298d8c78189ecb11a8d0ad49dc"
+  dependencies:
+    broccoli-file-creator "^1.0.1"
+    broccoli-merge-trees "^1.0.0"
+    ember-cli-babel "^5.1.6"
+
+ember-wormhole@^0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/ember-wormhole/-/ember-wormhole-0.5.1.tgz#f2a6fff13b1c037ffa83b2c9291d8b5978878e5b"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+    ember-cli-htmlbars "^1.0.3"
+
+ember-wormhole@~0.3.6:
+  version "0.3.6"
+  resolved "https://registry.yarnpkg.com/ember-wormhole/-/ember-wormhole-0.3.6.tgz#bbe21bb5478ad254efe4fff4019ac6710f4ad85c"
+  dependencies:
+    ember-cli-babel "^5.0.0"
+
+encodeurl@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.1.tgz#79e3d58655346909fe6f0f45a5de68103b294d20"
+
+encoding@^0.1.11:
+  version "0.1.12"
+  resolved "https://registry.yarnpkg.com/encoding/-/encoding-0.1.12.tgz#538b66f3ee62cd1ab51ec323829d1f9480c74beb"
+  dependencies:
+    iconv-lite "~0.4.13"
+
+engine.io-client@1.8.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/engine.io-client/-/engine.io-client-1.8.0.tgz#7b730e4127414087596d9be3c88d2bc5fdb6cf5c"
+  dependencies:
+    component-emitter "1.2.1"
+    component-inherit "0.0.3"
+    debug "2.3.3"
+    engine.io-parser "1.3.1"
+    has-cors "1.1.0"
+    indexof "0.0.1"
+    parsejson "0.0.3"
+    parseqs "0.0.5"
+    parseuri "0.0.5"
+    ws "1.1.1"
+    xmlhttprequest-ssl "1.5.3"
+    yeast "0.1.2"
+
+engine.io-parser@1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/engine.io-parser/-/engine.io-parser-1.3.1.tgz#9554f1ae33107d6fbd170ca5466d2f833f6a07cf"
+  dependencies:
+    after "0.8.1"
+    arraybuffer.slice "0.0.6"
+    base64-arraybuffer "0.1.5"
+    blob "0.0.4"
+    has-binary "0.1.6"
+    wtf-8 "1.0.0"
+
+engine.io@1.8.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/engine.io/-/engine.io-1.8.0.tgz#3eeb5f264cb75dbbec1baaea26d61f5a4eace2aa"
+  dependencies:
+    accepts "1.3.3"
+    base64id "0.1.0"
+    cookie "0.3.1"
+    debug "2.3.3"
+    engine.io-parser "1.3.1"
+    ws "1.1.1"
+
+ensure-posix-path@^1.0.0, ensure-posix-path@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/ensure-posix-path/-/ensure-posix-path-1.0.2.tgz#a65b3e42d0b71cfc585eb774f9943c8d9b91b0c2"
+
+entities@1.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/entities/-/entities-1.0.0.tgz#b2987aa3821347fcde642b24fdfc9e4fb712bf26"
+
+entities@~1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/entities/-/entities-1.1.1.tgz#6e5c2d0a5621b5dadaecef80b90edfb5cd7772f0"
+
+error-ex@^1.2.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.1.tgz#f855a86ce61adc4e8621c3cda21e7a7612c3a8dc"
+  dependencies:
+    is-arrayish "^0.2.1"
+
+es5-ext@^0.10.14, es5-ext@^0.10.9, es5-ext@~0.10.14:
+  version "0.10.15"
+  resolved "https://registry.yarnpkg.com/es5-ext/-/es5-ext-0.10.15.tgz#c330a5934c1ee21284a7c081a86e5fd937c91ea6"
+  dependencies:
+    es6-iterator "2"
+    es6-symbol "~3.1"
+
+es6-iterator@2:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/es6-iterator/-/es6-iterator-2.0.1.tgz#8e319c9f0453bf575d374940a655920e59ca5512"
+  dependencies:
+    d "1"
+    es5-ext "^0.10.14"
+    es6-symbol "^3.1"
+
+es6-symbol@^3.0.2, es6-symbol@^3.1, es6-symbol@~3.1:
+  version "3.1.1"
+  resolved "https://registry.yarnpkg.com/es6-symbol/-/es6-symbol-3.1.1.tgz#bf00ef4fdab6ba1b46ecb7b629b4c7ed5715cc77"
+  dependencies:
+    d "1"
+    es5-ext "~0.10.14"
+
+escape-html@~1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988"
+
+escape-string-regexp@^1.0.0, escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.3, escape-string-regexp@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4"
+
+esprima-fb@~12001.1.0-dev-harmony-fb:
+  version "12001.1.0-dev-harmony-fb"
+  resolved "https://registry.yarnpkg.com/esprima-fb/-/esprima-fb-12001.1.0-dev-harmony-fb.tgz#d84400384ba95ce2678c617ad24a7f40808da915"
+
+esprima-fb@~15001.1001.0-dev-harmony-fb:
+  version "15001.1001.0-dev-harmony-fb"
+  resolved "https://registry.yarnpkg.com/esprima-fb/-/esprima-fb-15001.1001.0-dev-harmony-fb.tgz#43beb57ec26e8cf237d3dd8b33e42533577f2659"
+
+esprima@^2.6.0:
+  version "2.7.3"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-2.7.3.tgz#96e3b70d5779f6ad49cd032673d1c312767ba581"
+
+esprima@^3.1.1, esprima@~3.1.0:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-3.1.3.tgz#fdca51cee6133895e3c88d535ce49dbff62a4633"
+
+esutils@^2.0.0, esutils@^2.0.2:
+  version "2.0.2"
+  resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.2.tgz#0abf4f1caa5bcb1f7a9d8acc6dea4faaa04bac9b"
+
+etag@~1.8.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.0.tgz#6f631aef336d6c46362b51764044ce216be3c051"
+
+eventemitter3@1.x.x:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-1.2.0.tgz#1c86991d816ad1e504750e73874224ecf3bec508"
+
+events-to-array@^1.0.1:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/events-to-array/-/events-to-array-1.1.2.tgz#2d41f563e1fe400ed4962fe1a4d5c6a7539df7f6"
+
+exec-sh@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/exec-sh/-/exec-sh-0.2.0.tgz#14f75de3f20d286ef933099b2ce50a90359cef10"
+  dependencies:
+    merge "^1.1.3"
+
+exists-sync@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/exists-sync/-/exists-sync-0.0.3.tgz#b910000bedbb113b378b82f5f5a7638107622dcf"
+
+exists-sync@0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/exists-sync/-/exists-sync-0.0.4.tgz#9744c2c428cc03b01060db454d4b12f0ef3c8879"
+
+exit-hook@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/exit-hook/-/exit-hook-1.1.1.tgz#f05ca233b48c05d54fff07765df8507e95c02ff8"
+
+exit@0.1.2, exit@0.1.x, exit@^0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/exit/-/exit-0.1.2.tgz#0632638f8d877cc82107d30a0fff1a17cba1cd0c"
+
+expand-brackets@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-0.1.5.tgz#df07284e342a807cd733ac5af72411e581d1177b"
+  dependencies:
+    is-posix-bracket "^0.1.0"
+
+expand-range@^1.8.1:
+  version "1.8.2"
+  resolved "https://registry.yarnpkg.com/expand-range/-/expand-range-1.8.2.tgz#a299effd335fe2721ebae8e257ec79644fc85337"
+  dependencies:
+    fill-range "^2.1.0"
+
+express@^4.10.7, express@^4.12.3:
+  version "4.15.2"
+  resolved "https://registry.yarnpkg.com/express/-/express-4.15.2.tgz#af107fc148504457f2dca9a6f2571d7129b97b35"
+  dependencies:
+    accepts "~1.3.3"
+    array-flatten "1.1.1"
+    content-disposition "0.5.2"
+    content-type "~1.0.2"
+    cookie "0.3.1"
+    cookie-signature "1.0.6"
+    debug "2.6.1"
+    depd "~1.1.0"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    etag "~1.8.0"
+    finalhandler "~1.0.0"
+    fresh "0.5.0"
+    merge-descriptors "1.0.1"
+    methods "~1.1.2"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    path-to-regexp "0.1.7"
+    proxy-addr "~1.1.3"
+    qs "6.4.0"
+    range-parser "~1.2.0"
+    send "0.15.1"
+    serve-static "1.12.1"
+    setprototypeof "1.0.3"
+    statuses "~1.3.1"
+    type-is "~1.6.14"
+    utils-merge "1.0.0"
+    vary "~1.1.0"
+
+extend@^3.0.0, extend@~3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.1.tgz#a755ea7bc1adfcc5a31ce7e762dbaadc5e636444"
+
+extglob@^0.3.1:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/extglob/-/extglob-0.3.2.tgz#2e18ff3d2f49ab2765cec9023f011daa8d8349a1"
+  dependencies:
+    is-extglob "^1.0.0"
+
+extsprintf@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.0.2.tgz#e1080e0658e300b06294990cc70e1502235fd550"
+
+fast-ordered-set@^1.0.0, fast-ordered-set@^1.0.2:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/fast-ordered-set/-/fast-ordered-set-1.0.3.tgz#3fbb36634f7be79e4f7edbdb4a357dee25d184eb"
+  dependencies:
+    blank-object "^1.0.1"
+
+fast-sourcemap-concat@^1.0.1:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/fast-sourcemap-concat/-/fast-sourcemap-concat-1.1.0.tgz#a800767abed5eda02e67238ec063a709be61f9d4"
+  dependencies:
+    chalk "^0.5.1"
+    debug "^2.2.0"
+    fs-extra "^0.30.0"
+    memory-streams "^0.1.0"
+    mkdirp "^0.5.0"
+    rsvp "^3.0.14"
+    source-map "^0.4.2"
+    source-map-url "^0.3.0"
+
+faye-websocket@~0.10.0:
+  version "0.10.0"
+  resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.10.0.tgz#4e492f8d04dfb6f89003507f6edbf2d501e7c6f4"
+  dependencies:
+    websocket-driver ">=0.5.1"
+
+fb-watchman@^1.8.0:
+  version "1.9.2"
+  resolved "https://registry.yarnpkg.com/fb-watchman/-/fb-watchman-1.9.2.tgz#a24cf47827f82d38fb59a69ad70b76e3b6ae7383"
+  dependencies:
+    bser "1.0.2"
+
+figures@^1.3.5:
+  version "1.7.0"
+  resolved "https://registry.yarnpkg.com/figures/-/figures-1.7.0.tgz#cbe1e3affcf1cd44b80cadfed28dc793a9701d2e"
+  dependencies:
+    escape-string-regexp "^1.0.5"
+    object-assign "^4.1.0"
+
+filename-regex@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/filename-regex/-/filename-regex-2.0.1.tgz#c1c4b9bee3e09725ddb106b75c1e301fe2f18b26"
+
+filesize@^3.1.3:
+  version "3.5.6"
+  resolved "https://registry.yarnpkg.com/filesize/-/filesize-3.5.6.tgz#5fd98f3eac94ec9516ef8ed5782fad84a01a0a1a"
+
+fill-range@^2.1.0:
+  version "2.2.3"
+  resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-2.2.3.tgz#50b77dfd7e469bc7492470963699fe7a8485a723"
+  dependencies:
+    is-number "^2.1.0"
+    isobject "^2.0.0"
+    randomatic "^1.1.3"
+    repeat-element "^1.1.2"
+    repeat-string "^1.5.2"
+
+finalhandler@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.0.1.tgz#bcd15d1689c0e5ed729b6f7f541a6df984117db8"
+  dependencies:
+    debug "2.6.3"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    statuses "~1.3.1"
+    unpipe "~1.0.0"
+
+finalhandler@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.0.2.tgz#d0e36f9dbc557f2de14423df6261889e9d60c93a"
+  dependencies:
+    debug "2.6.4"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    statuses "~1.3.1"
+    unpipe "~1.0.0"
+
+find-up@^1.0.0, find-up@^1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/find-up/-/find-up-1.1.2.tgz#6b2e9822b1a2ce0a60ab64d610eccad53cb24d0f"
+  dependencies:
+    path-exists "^2.0.0"
+    pinkie-promise "^2.0.0"
+
+findup-sync@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/findup-sync/-/findup-sync-0.2.1.tgz#e0a90a450075c49466ee513732057514b81e878c"
+  dependencies:
+    glob "~4.3.0"
+
+findup-sync@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/findup-sync/-/findup-sync-0.3.0.tgz#37930aa5d816b777c03445e1966cc6790a4c0b16"
+  dependencies:
+    glob "~5.0.0"
+
+findup@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/findup/-/findup-0.1.5.tgz#8ad929a3393bac627957a7e5de4623b06b0e2ceb"
+  dependencies:
+    colors "~0.6.0-1"
+    commander "~2.1.0"
+
+fireworm@^0.7.0:
+  version "0.7.1"
+  resolved "https://registry.yarnpkg.com/fireworm/-/fireworm-0.7.1.tgz#ccf20f7941f108883fcddb99383dbe6e1861c758"
+  dependencies:
+    async "~0.2.9"
+    is-type "0.0.1"
+    lodash.debounce "^3.1.1"
+    lodash.flatten "^3.0.2"
+    minimatch "^3.0.2"
+
+for-in@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80"
+
+for-own@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/for-own/-/for-own-0.1.5.tgz#5265c681a4f294dabbf17c9509b6763aa84510ce"
+  dependencies:
+    for-in "^1.0.1"
+
+forever-agent@~0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91"
+
+form-data@~1.0.0-rc3:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-1.0.1.tgz#ae315db9a4907fa065502304a66d7733475ee37c"
+  dependencies:
+    async "^2.0.1"
+    combined-stream "^1.0.5"
+    mime-types "^2.1.11"
+
+form-data@~2.1.1:
+  version "2.1.4"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.1.4.tgz#33c183acf193276ecaa98143a69e94bfee1750d1"
+  dependencies:
+    asynckit "^0.4.0"
+    combined-stream "^1.0.5"
+    mime-types "^2.1.12"
+
+forwarded@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.1.0.tgz#19ef9874c4ae1c297bcf078fde63a09b66a84363"
+
+fresh@0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.0.tgz#f474ca5e6a9246d6fd8e0953cfa9b9c805afa78e"
+
+fs-extra@0.30.0, fs-extra@^0.30.0:
+  version "0.30.0"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.30.0.tgz#f233ffcc08d4da7d432daa449776989db1df93f0"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    klaw "^1.0.0"
+    path-is-absolute "^1.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@^0.24.0:
+  version "0.24.0"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.24.0.tgz#d4e4342a96675cb7846633a6099249332b539952"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    path-is-absolute "^1.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@^0.26.0, fs-extra@^0.26.6:
+  version "0.26.7"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.26.7.tgz#9ae1fdd94897798edab76d0918cf42d0c3184fa9"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    klaw "^1.0.0"
+    path-is-absolute "^1.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@^2.0.0:
+  version "2.1.2"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-2.1.2.tgz#046c70163cef9aad46b0e4a7fa467fb22d71de35"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+
+fs-monitor-stack@^1.0.2:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/fs-monitor-stack/-/fs-monitor-stack-1.1.1.tgz#c4038d5977939b6b4e38396d7e7cd0895a7ac6b3"
+
+fs-readdir-recursive@^0.1.0:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/fs-readdir-recursive/-/fs-readdir-recursive-0.1.2.tgz#315b4fb8c1ca5b8c47defef319d073dad3568059"
+
+fs-tree-diff@^0.4.4:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/fs-tree-diff/-/fs-tree-diff-0.4.4.tgz#f6b75d70db22c1f3b05d592270f4ed6c9c2f82dd"
+  dependencies:
+    debug "^2.2.0"
+    fast-ordered-set "^1.0.2"
+
+fs-tree-diff@^0.5.2, fs-tree-diff@^0.5.3, fs-tree-diff@^0.5.4, fs-tree-diff@^0.5.6:
+  version "0.5.6"
+  resolved "https://registry.yarnpkg.com/fs-tree-diff/-/fs-tree-diff-0.5.6.tgz#342665749e8dca406800b672268c8f5073f3e623"
+  dependencies:
+    heimdalljs-logger "^0.1.7"
+    object-assign "^4.1.0"
+    path-posix "^1.0.0"
+    symlink-or-copy "^1.1.8"
+
+fs-vacuum@~1.2.9:
+  version "1.2.10"
+  resolved "https://registry.yarnpkg.com/fs-vacuum/-/fs-vacuum-1.2.10.tgz#b7629bec07a4031a2548fdf99f5ecf1cc8b31e36"
+  dependencies:
+    graceful-fs "^4.1.2"
+    path-is-inside "^1.0.1"
+    rimraf "^2.5.2"
+
+fs-write-stream-atomic@~1.0.8:
+  version "1.0.10"
+  resolved "https://registry.yarnpkg.com/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.10.tgz#b47df53493ef911df75731e70a9ded0189db40c9"
+  dependencies:
+    graceful-fs "^4.1.2"
+    iferr "^0.1.5"
+    imurmurhash "^0.1.4"
+    readable-stream "1 || 2"
+
+fs.realpath@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f"
+
+fstream-ignore@^1.0.0:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/fstream-ignore/-/fstream-ignore-1.0.5.tgz#9c31dae34767018fe1d249b24dada67d092da105"
+  dependencies:
+    fstream "^1.0.0"
+    inherits "2"
+    minimatch "^3.0.0"
+
+fstream-npm@~1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/fstream-npm/-/fstream-npm-1.0.7.tgz#7ed0d1ac13d7686dd9e1bf6ceb8be273bf6d2f86"
+  dependencies:
+    fstream-ignore "^1.0.0"
+    inherits "2"
+
+fstream@^1.0.0, fstream@^1.0.2, fstream@~1.0.8:
+  version "1.0.11"
+  resolved "https://registry.yarnpkg.com/fstream/-/fstream-1.0.11.tgz#5c1fb1f117477114f0632a0eb4b71b3cb0fd3171"
+  dependencies:
+    graceful-fs "^4.1.2"
+    inherits "~2.0.0"
+    mkdirp ">=0.5 0"
+    rimraf "2"
+
+gauge@~1.2.5:
+  version "1.2.7"
+  resolved "https://registry.yarnpkg.com/gauge/-/gauge-1.2.7.tgz#e9cec5483d3d4ee0ef44b60a7d99e4935e136d93"
+  dependencies:
+    ansi "^0.3.0"
+    has-unicode "^2.0.0"
+    lodash.pad "^4.1.0"
+    lodash.padend "^4.1.0"
+    lodash.padstart "^4.1.0"
+
+gauge@~2.7.1:
+  version "2.7.4"
+  resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.4.tgz#2c03405c7538c39d7eb37b317022e325fb018bf7"
+  dependencies:
+    aproba "^1.0.3"
+    console-control-strings "^1.0.0"
+    has-unicode "^2.0.0"
+    object-assign "^4.1.0"
+    signal-exit "^3.0.0"
+    string-width "^1.0.1"
+    strip-ansi "^3.0.1"
+    wide-align "^1.1.0"
+
+gaze@^1.0.0:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/gaze/-/gaze-1.1.2.tgz#847224677adb8870d679257ed3388fdb61e40105"
+  dependencies:
+    globule "^1.0.0"
+
+generate-function@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/generate-function/-/generate-function-2.0.0.tgz#6858fe7c0969b7d4e9093337647ac79f60dfbe74"
+
+generate-object-property@^1.1.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/generate-object-property/-/generate-object-property-1.2.0.tgz#9c0e1c40308ce804f4783618b937fa88f99d50d0"
+  dependencies:
+    is-property "^1.0.0"
+
+get-caller-file@^1.0.0, get-caller-file@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-1.0.2.tgz#f702e63127e7e231c160a80c1554acb70d5047e5"
+
+get-stdin@^4.0.1:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-4.0.1.tgz#b968c6b0a04384324902e8bf1a5df32579a450fe"
+
+getpass@^0.1.1:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa"
+  dependencies:
+    assert-plus "^1.0.0"
+
+git-repo-info@^1.0.4, git-repo-info@^1.1.2:
+  version "1.4.1"
+  resolved "https://registry.yarnpkg.com/git-repo-info/-/git-repo-info-1.4.1.tgz#2a072823254aaf62fcf0766007d7b6651bd41943"
+
+git-repo-version@0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/git-repo-version/-/git-repo-version-0.3.0.tgz#c9b97d0d21c4357d669dc1269c2b6a75da6cc0e9"
+  dependencies:
+    git-repo-info "^1.0.4"
+
+git-tools@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/git-tools/-/git-tools-0.1.4.tgz#5e43e59443b8a5dedb39dba663da49e79f943978"
+  dependencies:
+    spawnback "~1.0.0"
+
+github-url-from-git@~1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/github-url-from-git/-/github-url-from-git-1.4.0.tgz#285e6b520819001bde128674704379e4ff03e0de"
+
+github-url-from-username-repo@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/github-url-from-username-repo/-/github-url-from-username-repo-1.0.2.tgz#7dd79330d2abe69c10c2cef79714c97215791dfa"
+
+glob-base@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/glob-base/-/glob-base-0.3.0.tgz#dbb164f6221b1c0b1ccf82aea328b497df0ea3c4"
+  dependencies:
+    glob-parent "^2.0.0"
+    is-glob "^2.0.0"
+
+glob-parent@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-2.0.0.tgz#81383d72db054fcccf5336daa902f182f6edbb28"
+  dependencies:
+    is-glob "^2.0.0"
+
+"glob@3 || 4", glob@~4.3.0:
+  version "4.3.5"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-4.3.5.tgz#80fbb08ca540f238acce5d11d1e9bc41e75173d3"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^2.0.1"
+    once "^1.3.0"
+
+glob@7.0.3, glob@~7.0.3:
+  version "7.0.3"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-7.0.3.tgz#0aa235931a4a96ac13d60ffac2fb877bd6ed4f58"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "2 || 3"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@^5.0.10, glob@^5.0.15, glob@~5.0.0:
+  version "5.0.15"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.15.tgz#1bc936b9e02f4a603fcc222ecf7633d30b8b93b1"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "2 || 3"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@^7.0.0, glob@^7.0.3, glob@^7.0.4, glob@^7.0.5, glob@^7.1.1, glob@~7.1.1:
+  version "7.1.1"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.1.tgz#805211df04faaf1c63a3600306cdf5ade50b2ec8"
+  dependencies:
+    fs.realpath "^1.0.0"
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^3.0.2"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+globals@^6.4.0:
+  version "6.4.1"
+  resolved "https://registry.yarnpkg.com/globals/-/globals-6.4.1.tgz#8498032b3b6d1cc81eebc5f79690d8fe29fabf4f"
+
+globals@^9.0.0:
+  version "9.17.0"
+  resolved "https://registry.yarnpkg.com/globals/-/globals-9.17.0.tgz#0c0ca696d9b9bb694d2e5470bd37777caad50286"
+
+globule@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/globule/-/globule-1.1.0.tgz#c49352e4dc183d85893ee825385eb994bb6df45f"
+  dependencies:
+    glob "~7.1.1"
+    lodash "~4.16.4"
+    minimatch "~3.0.2"
+
+graceful-fs@^4.1.11, graceful-fs@^4.1.2, graceful-fs@^4.1.3, graceful-fs@^4.1.4, graceful-fs@^4.1.6, graceful-fs@^4.1.9, graceful-fs@~4.1.4:
+  version "4.1.11"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.11.tgz#0e8bdfe4d1ddb8854d64e04ea7c00e2a026e5658"
+
+"graceful-readlink@>= 1.0.0":
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/graceful-readlink/-/graceful-readlink-1.0.1.tgz#4cafad76bc62f02fa039b2f94e9a3dd3a391a725"
+
+growly@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/growly/-/growly-1.3.0.tgz#f10748cbe76af964b7c96c93c6bcc28af120c081"
+
+handlebars@^4.0.4:
+  version "4.0.6"
+  resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-4.0.6.tgz#2ce4484850537f9c97a8026d5399b935c4ed4ed7"
+  dependencies:
+    async "^1.4.0"
+    optimist "^0.6.1"
+    source-map "^0.4.4"
+  optionalDependencies:
+    uglify-js "^2.6"
+
+har-schema@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-1.0.5.tgz#d263135f43307c02c602afc8fe95970c0151369e"
+
+har-validator@~2.0.6:
+  version "2.0.6"
+  resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-2.0.6.tgz#cdcbc08188265ad119b6a5a7c8ab70eecfb5d27d"
+  dependencies:
+    chalk "^1.1.1"
+    commander "^2.9.0"
+    is-my-json-valid "^2.12.4"
+    pinkie-promise "^2.0.0"
+
+har-validator@~4.2.1:
+  version "4.2.1"
+  resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-4.2.1.tgz#33481d0f1bbff600dd203d75812a6a5fba002e2a"
+  dependencies:
+    ajv "^4.9.1"
+    har-schema "^1.0.5"
+
+has-ansi@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-0.1.0.tgz#84f265aae8c0e6a88a12d7022894b7568894c62e"
+  dependencies:
+    ansi-regex "^0.2.0"
+
+has-ansi@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91"
+  dependencies:
+    ansi-regex "^2.0.0"
+
+has-binary@0.1.6:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/has-binary/-/has-binary-0.1.6.tgz#25326f39cfa4f616ad8787894e3af2cfbc7b6e10"
+  dependencies:
+    isarray "0.0.1"
+
+has-binary@0.1.7:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/has-binary/-/has-binary-0.1.7.tgz#68e61eb16210c9545a0a5cce06a873912fe1e68c"
+  dependencies:
+    isarray "0.0.1"
+
+has-color@~0.1.0:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/has-color/-/has-color-0.1.7.tgz#67144a5260c34fc3cca677d041daf52fe7b78b2f"
+
+has-cors@1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/has-cors/-/has-cors-1.1.0.tgz#5e474793f7ea9843d1bb99c23eef49ff126fff39"
+
+has-unicode@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9"
+
+hash-for-dep@^1.0.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/hash-for-dep/-/hash-for-dep-1.1.2.tgz#e3347ed92960eb0bb53a2c6c2b70e36d75b7cd0c"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    heimdalljs "^0.2.3"
+    heimdalljs-logger "^0.1.7"
+    resolve "^1.1.6"
+
+hawk@~3.1.3:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/hawk/-/hawk-3.1.3.tgz#078444bd7c1640b0fe540d2c9b73d59678e8e1c4"
+  dependencies:
+    boom "2.x.x"
+    cryptiles "2.x.x"
+    hoek "2.x.x"
+    sntp "1.x.x"
+
+heimdalljs-logger@^0.1.7:
+  version "0.1.9"
+  resolved "https://registry.yarnpkg.com/heimdalljs-logger/-/heimdalljs-logger-0.1.9.tgz#d76ada4e45b7bb6f786fc9c010a68eb2e2faf176"
+  dependencies:
+    debug "^2.2.0"
+    heimdalljs "^0.2.0"
+
+heimdalljs@^0.2.0, heimdalljs@^0.2.1, heimdalljs@^0.2.3:
+  version "0.2.4"
+  resolved "https://registry.yarnpkg.com/heimdalljs/-/heimdalljs-0.2.4.tgz#34ead16eab422c94803065d33abeba1f7b24a910"
+  dependencies:
+    rsvp "~3.2.1"
+
+heimdalljs@^0.3.0:
+  version "0.3.3"
+  resolved "https://registry.yarnpkg.com/heimdalljs/-/heimdalljs-0.3.3.tgz#e92d2c6f77fd46d5bf50b610d28ad31755054d0b"
+  dependencies:
+    rsvp "~3.2.1"
+
+hoek@2.x.x:
+  version "2.16.3"
+  resolved "https://registry.yarnpkg.com/hoek/-/hoek-2.16.3.tgz#20bb7403d3cea398e91dc4710a8ff1b8274a25ed"
+
+home-or-tmp@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/home-or-tmp/-/home-or-tmp-1.0.0.tgz#4b9f1e40800c3e50c6c27f781676afcce71f3985"
+  dependencies:
+    os-tmpdir "^1.0.1"
+    user-home "^1.1.1"
+
+home-or-tmp@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/home-or-tmp/-/home-or-tmp-2.0.0.tgz#e36c3f2d2cae7d746a857e38d18d5f32a7882db8"
+  dependencies:
+    os-homedir "^1.0.0"
+    os-tmpdir "^1.0.1"
+
+hosted-git-info@^2.1.4, hosted-git-info@^2.1.5, hosted-git-info@~2.1.4:
+  version "2.1.5"
+  resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.1.5.tgz#0ba81d90da2e25ab34a332e6ec77936e1598118b"
+
+htmlparser2@3.8.x:
+  version "3.8.3"
+  resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-3.8.3.tgz#996c28b191516a8be86501a7d79757e5c70c1068"
+  dependencies:
+    domelementtype "1"
+    domhandler "2.3"
+    domutils "1.5"
+    entities "1.0"
+    readable-stream "1.1"
+
+http-errors@~1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.3.1.tgz#197e22cdebd4198585e8694ef6786197b91ed942"
+  dependencies:
+    inherits "~2.0.1"
+    statuses "1"
+
+http-errors@~1.6.1:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.6.1.tgz#5f8b8ed98aca545656bf572997387f904a722257"
+  dependencies:
+    depd "1.1.0"
+    inherits "2.0.3"
+    setprototypeof "1.0.3"
+    statuses ">= 1.3.1 < 2"
+
+http-proxy@^1.13.1, http-proxy@^1.9.0:
+  version "1.16.2"
+  resolved "https://registry.yarnpkg.com/http-proxy/-/http-proxy-1.16.2.tgz#06dff292952bf64dbe8471fa9df73066d4f37742"
+  dependencies:
+    eventemitter3 "1.x.x"
+    requires-port "1.x.x"
+
+http-signature@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.1.1.tgz#df72e267066cd0ac67fb76adf8e134a8fbcf91bf"
+  dependencies:
+    assert-plus "^0.2.0"
+    jsprim "^1.2.2"
+    sshpk "^1.7.0"
+
+iconv-lite@0.4.13, iconv-lite@^0.4.5, iconv-lite@~0.4.13:
+  version "0.4.13"
+  resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.13.tgz#1f88aba4ab0b1508e8312acc39345f36e992e2f2"
+
+iferr@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/iferr/-/iferr-0.1.5.tgz#c60eed69e6d8fdb6b3104a1fcbca1c192dc5b501"
+
+imurmurhash@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea"
+
+in-publish@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/in-publish/-/in-publish-2.0.0.tgz#e20ff5e3a2afc2690320b6dc552682a9c7fadf51"
+
+include-path-searcher@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/include-path-searcher/-/include-path-searcher-0.1.0.tgz#c0cf2ddfa164fb2eae07bc7ca43a7f191cb4d7bd"
+
+indent-string@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-2.1.0.tgz#8e2d48348742121b4a8218b7a137e9a52049dc80"
+  dependencies:
+    repeating "^2.0.0"
+
+indexof@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/indexof/-/indexof-0.0.1.tgz#82dc336d232b9062179d05ab3293a66059fd435d"
+
+inflection@^1.7.0, inflection@^1.7.1, inflection@^1.8.0:
+  version "1.12.0"
+  resolved "https://registry.yarnpkg.com/inflection/-/inflection-1.12.0.tgz#a200935656d6f5f6bc4dc7502e1aecb703228416"
+
+inflight@^1.0.4, inflight@~1.0.4:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9"
+  dependencies:
+    once "^1.3.0"
+    wrappy "1"
+
+inherits@2, inherits@2.0.3, inherits@^2.0.1, inherits@^2.0.3, inherits@~2.0.0, inherits@~2.0.1:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de"
+
+ini@^1.3.4, ini@~1.3.4:
+  version "1.3.4"
+  resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.4.tgz#0537cb79daf59b59a1a517dff706c86ec039162e"
+
+init-package-json@~1.9.3:
+  version "1.9.6"
+  resolved "https://registry.yarnpkg.com/init-package-json/-/init-package-json-1.9.6.tgz#789fc2b74466a4952b9ea77c0575bc78ebd60a61"
+  dependencies:
+    glob "^7.1.1"
+    npm-package-arg "^4.0.0 || ^5.0.0"
+    promzard "^0.3.0"
+    read "~1.0.1"
+    read-package-json "1 || 2"
+    semver "2.x || 3.x || 4 || 5"
+    validate-npm-package-license "^3.0.1"
+    validate-npm-package-name "^3.0.0"
+
+inline-source-map-comment@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/inline-source-map-comment/-/inline-source-map-comment-1.0.5.tgz#50a8a44c2a790dfac441b5c94eccd5462635faf6"
+  dependencies:
+    chalk "^1.0.0"
+    get-stdin "^4.0.1"
+    minimist "^1.1.1"
+    sum-up "^1.0.1"
+    xtend "^4.0.0"
+
+inquirer@^0.12.0:
+  version "0.12.0"
+  resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-0.12.0.tgz#1ef2bfd63504df0bc75785fff8c2c41df12f077e"
+  dependencies:
+    ansi-escapes "^1.1.0"
+    ansi-regex "^2.0.0"
+    chalk "^1.0.0"
+    cli-cursor "^1.0.1"
+    cli-width "^2.0.0"
+    figures "^1.3.5"
+    lodash "^4.3.0"
+    readline2 "^1.0.1"
+    run-async "^0.1.0"
+    rx-lite "^3.1.2"
+    string-width "^1.0.1"
+    strip-ansi "^3.0.0"
+    through "^2.3.6"
+
+invariant@^2.2.0, invariant@^2.2.2:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.2.tgz#9e1f56ac0acdb6bf303306f338be3b204ae60360"
+  dependencies:
+    loose-envify "^1.0.0"
+
+invert-kv@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/invert-kv/-/invert-kv-1.0.0.tgz#104a8e4aaca6d3d8cd157a8ef8bfab2d7a3ffdb6"
+
+ipaddr.js@1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.3.0.tgz#1e03a52fdad83a8bbb2b25cbf4998b4cffcd3dec"
+
+is-arrayish@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d"
+
+is-buffer@^1.1.5:
+  version "1.1.5"
+  resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.5.tgz#1f3b26ef613b214b88cbca23cc6c01d87961eecc"
+
+is-builtin-module@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-builtin-module/-/is-builtin-module-1.0.0.tgz#540572d34f7ac3119f8f76c30cbc1b1e037affbe"
+  dependencies:
+    builtin-modules "^1.0.0"
+
+is-dotfile@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-dotfile/-/is-dotfile-1.0.2.tgz#2c132383f39199f8edc268ca01b9b007d205cc4d"
+
+is-equal-shallow@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/is-equal-shallow/-/is-equal-shallow-0.1.3.tgz#2238098fc221de0bcfa5d9eac4c45d638aa1c534"
+  dependencies:
+    is-primitive "^2.0.0"
+
+is-extendable@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89"
+
+is-extglob@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-1.0.0.tgz#ac468177c4943405a092fc8f29760c6ffc6206c0"
+
+is-finite@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-finite/-/is-finite-1.0.2.tgz#cc6677695602be550ef11e8b4aa6305342b6d0aa"
+  dependencies:
+    number-is-nan "^1.0.0"
+
+is-fullwidth-code-point@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb"
+  dependencies:
+    number-is-nan "^1.0.0"
+
+is-git-url@^0.2.0:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/is-git-url/-/is-git-url-0.2.3.tgz#445200d6fbd6da028fb5e01440d9afc93f3ccb64"
+
+is-glob@^2.0.0, is-glob@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-2.0.1.tgz#d096f926a3ded5600f3fdfd91198cb0888c2d863"
+  dependencies:
+    is-extglob "^1.0.0"
+
+is-integer@^1.0.4:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/is-integer/-/is-integer-1.0.6.tgz#5273819fada880d123e1ac00a938e7172dd8d95e"
+  dependencies:
+    is-finite "^1.0.0"
+
+is-my-json-valid@^2.12.4:
+  version "2.16.0"
+  resolved "https://registry.yarnpkg.com/is-my-json-valid/-/is-my-json-valid-2.16.0.tgz#f079dd9bfdae65ee2038aae8acbc86ab109e3693"
+  dependencies:
+    generate-function "^2.0.0"
+    generate-object-property "^1.1.0"
+    jsonpointer "^4.0.0"
+    xtend "^4.0.0"
+
+is-number@^2.0.2, is-number@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/is-number/-/is-number-2.1.0.tgz#01fcbbb393463a548f2f466cce16dece49db908f"
+  dependencies:
+    kind-of "^3.0.2"
+
+is-obj@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/is-obj/-/is-obj-1.0.1.tgz#3e4729ac1f5fde025cd7d83a896dab9f4f67db0f"
+
+is-posix-bracket@^0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/is-posix-bracket/-/is-posix-bracket-0.1.1.tgz#3334dc79774368e92f016e6fbc0a88f5cd6e6bc4"
+
+is-primitive@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/is-primitive/-/is-primitive-2.0.0.tgz#207bab91638499c07b2adf240a41a87210034575"
+
+is-property@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-property/-/is-property-1.0.2.tgz#57fe1c4e48474edd65b09911f26b1cd4095dda84"
+
+is-stream@^1.0.1:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-1.1.0.tgz#12d4a3dd4e68e0b79ceb8dbc84173ae80d91ca44"
+
+is-type@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/is-type/-/is-type-0.0.1.tgz#f651d85c365d44955d14a51d8d7061f3f6b4779c"
+  dependencies:
+    core-util-is "~1.0.0"
+
+is-typedarray@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a"
+
+is-utf8@^0.2.0:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/is-utf8/-/is-utf8-0.2.1.tgz#4b0da1442104d1b336340e80797e865cf39f7d72"
+
+isarray@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf"
+
+isarray@1.0.0, isarray@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11"
+
+isbinaryfile@^3.0.0:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/isbinaryfile/-/isbinaryfile-3.0.2.tgz#4a3e974ec0cba9004d3fc6cde7209ea69368a621"
+
+isexe@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10"
+
+isobject@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/isobject/-/isobject-2.1.0.tgz#f065561096a3f1da2ef46272f815c840d87e0c89"
+  dependencies:
+    isarray "1.0.0"
+
+isstream@~0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a"
+
+istextorbinary@2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/istextorbinary/-/istextorbinary-2.1.0.tgz#dbed2a6f51be2f7475b68f89465811141b758874"
+  dependencies:
+    binaryextensions "1 || 2"
+    editions "^1.1.1"
+    textextensions "1 || 2"
+
+jju@^1.1.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/jju/-/jju-1.3.0.tgz#dadd9ef01924bc728b03f2f7979bdbd62f7a2aaa"
+
+jodid25519@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/jodid25519/-/jodid25519-1.0.2.tgz#06d4912255093419477d425633606e0e90782967"
+  dependencies:
+    jsbn "~0.1.0"
+
+jquery@>=1.10:
+  version "3.2.1"
+  resolved "https://registry.yarnpkg.com/jquery/-/jquery-3.2.1.tgz#5c4d9de652af6cd0a770154a631bba12b015c787"
+
+js-base64@^2.1.8:
+  version "2.1.9"
+  resolved "https://registry.yarnpkg.com/js-base64/-/js-base64-2.1.9.tgz#f0e80ae039a4bd654b5f281fc93f04a914a7fcce"
+
+js-tokens@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-1.0.1.tgz#cc435a5c8b94ad15acb7983140fc80182c89aeae"
+
+js-tokens@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.1.tgz#08e9f132484a2c45a30907e9dc4d5567b7f114d7"
+
+js-yaml@^3.2.5, js-yaml@^3.2.7:
+  version "3.8.3"
+  resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.8.3.tgz#33a05ec481c850c8875929166fe1beb61c728766"
+  dependencies:
+    argparse "^1.0.7"
+    esprima "^3.1.1"
+
+jsbn@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513"
+
+jsesc@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-1.3.0.tgz#46c3fec8c1892b12b0833db9bc7622176dbab34b"
+
+jsesc@~0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-0.5.0.tgz#e7dee66e35d6fc16f710fe91d5cf69f70f08911d"
+
+jshint@^2.7.0:
+  version "2.9.4"
+  resolved "https://registry.yarnpkg.com/jshint/-/jshint-2.9.4.tgz#5e3ba97848d5290273db514aee47fe24cf592934"
+  dependencies:
+    cli "~1.0.0"
+    console-browserify "1.1.x"
+    exit "0.1.x"
+    htmlparser2 "3.8.x"
+    lodash "3.7.x"
+    minimatch "~3.0.2"
+    shelljs "0.3.x"
+    strip-json-comments "1.0.x"
+
+json-parse-helpfulerror@^1.0.2:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/json-parse-helpfulerror/-/json-parse-helpfulerror-1.0.3.tgz#13f14ce02eed4e981297b64eb9e3b932e2dd13dc"
+  dependencies:
+    jju "^1.1.0"
+
+json-schema@0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.2.3.tgz#b480c892e59a2f05954ce727bd3f2a4e882f9e13"
+
+json-stable-stringify@^1.0.0, json-stable-stringify@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/json-stable-stringify/-/json-stable-stringify-1.0.1.tgz#9a759d39c5f2ff503fd5300646ed445f88c4f9af"
+  dependencies:
+    jsonify "~0.0.0"
+
+json-stringify-safe@~5.0.1:
+  version "5.0.1"
+  resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb"
+
+json3@3.3.2:
+  version "3.3.2"
+  resolved "https://registry.yarnpkg.com/json3/-/json3-3.3.2.tgz#3c0434743df93e2f5c42aee7b19bcb483575f4e1"
+
+json5@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/json5/-/json5-0.4.0.tgz#054352e4c4c80c86c0923877d449de176a732c8d"
+
+json5@^0.5.0:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/json5/-/json5-0.5.1.tgz#1eade7acc012034ad84e2396767ead9fa5495821"
+
+jsonfile@^2.1.0:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-2.4.0.tgz#3736a2b428b87bbda0cc83b53fa3d633a35c2ae8"
+  optionalDependencies:
+    graceful-fs "^4.1.6"
+
+jsonify@~0.0.0:
+  version "0.0.0"
+  resolved "https://registry.yarnpkg.com/jsonify/-/jsonify-0.0.0.tgz#2c74b6ee41d93ca51b7b5aaee8f503631d252a73"
+
+jsonpointer@^4.0.0:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/jsonpointer/-/jsonpointer-4.0.1.tgz#4fd92cb34e0e9db3c89c8622ecf51f9b978c6cb9"
+
+jsprim@^1.2.2:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.0.tgz#a3b87e40298d8c380552d8cc7628a0bb95a22918"
+  dependencies:
+    assert-plus "1.0.0"
+    extsprintf "1.0.2"
+    json-schema "0.2.3"
+    verror "1.3.6"
+
+kind-of@^3.0.2:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.2.0.tgz#b58abe4d5c044ad33726a8c1525b48cf891bff07"
+  dependencies:
+    is-buffer "^1.1.5"
+
+klassy@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/klassy/-/klassy-0.1.3.tgz#c31d5756d583197d75f582b6e692872be497067f"
+
+klaw@^1.0.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/klaw/-/klaw-1.3.1.tgz#4088433b46b3b1ba259d78785d8e96f73ba02439"
+  optionalDependencies:
+    graceful-fs "^4.1.9"
+
+lazy-cache@^1.0.3:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/lazy-cache/-/lazy-cache-1.0.4.tgz#a1d78fc3a50474cb80845d3b3b6e1da49a446e8e"
+
+lcid@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/lcid/-/lcid-1.0.0.tgz#308accafa0bc483a3867b4b6f2b9506251d1b835"
+  dependencies:
+    invert-kv "^1.0.0"
+
+leek@0.0.21:
+  version "0.0.21"
+  resolved "https://registry.yarnpkg.com/leek/-/leek-0.0.21.tgz#09804bf70f8aefbba745f5d56d2a4debf22711ff"
+  dependencies:
+    debug "^2.1.0"
+    lodash.assign "^3.2.0"
+    request "^2.27.0"
+    rsvp "^3.0.21"
+
+leven@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/leven/-/leven-1.0.2.tgz#9144b6eebca5f1d0680169f1a6770dcea60b75c3"
+
+linkify-it@~1.2.0, linkify-it@~1.2.2:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/linkify-it/-/linkify-it-1.2.4.tgz#0773526c317c8fd13bd534ee1d180ff88abf881a"
+  dependencies:
+    uc.micro "^1.0.1"
+
+livereload-js@^2.2.0:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/livereload-js/-/livereload-js-2.2.2.tgz#6c87257e648ab475bc24ea257457edcc1f8d0bc2"
+
+load-json-file@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/load-json-file/-/load-json-file-1.1.0.tgz#956905708d58b4bab4c2261b04f59f31c99374c0"
+  dependencies:
+    graceful-fs "^4.1.2"
+    parse-json "^2.2.0"
+    pify "^2.0.0"
+    pinkie-promise "^2.0.0"
+    strip-bom "^2.0.0"
+
+loader.js@^4.0.1:
+  version "4.3.0"
+  resolved "https://registry.yarnpkg.com/loader.js/-/loader.js-4.3.0.tgz#736c13eb8afdf75abd6c2d7b4f7fd40e1105a71f"
+
+lockfile@~1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/lockfile/-/lockfile-1.0.3.tgz#2638fc39a0331e9cac1a04b71799931c9c50df79"
+
+lodash._arraycopy@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/lodash._arraycopy/-/lodash._arraycopy-3.0.0.tgz#76e7b7c1f1fb92547374878a562ed06a3e50f6e1"
+
+lodash._arrayeach@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/lodash._arrayeach/-/lodash._arrayeach-3.0.0.tgz#bab156b2a90d3f1bbd5c653403349e5e5933ef9e"
+
+lodash._baseassign@^3.0.0:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/lodash._baseassign/-/lodash._baseassign-3.2.0.tgz#8c38a099500f215ad09e59f1722fd0c52bfe0a4e"
+  dependencies:
+    lodash._basecopy "^3.0.0"
+    lodash.keys "^3.0.0"
+
+lodash._basecopy@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/lodash._basecopy/-/lodash._basecopy-3.0.1.tgz#8da0e6a876cf344c0ad8a54882111dd3c5c7ca36"
+
+lodash._baseflatten@^3.0.0:
+  version "3.1.4"
+  resolved "https://registry.yarnpkg.com/lodash._baseflatten/-/lodash._baseflatten-3.1.4.tgz#0770ff80131af6e34f3b511796a7ba5214e65ff7"
+  dependencies:
+    lodash.isarguments "^3.0.0"
+    lodash.isarray "^3.0.0"
+
+lodash._basefor@^3.0.0:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/lodash._basefor/-/lodash._basefor-3.0.3.tgz#7550b4e9218ef09fad24343b612021c79b4c20c2"
+
+lodash._bindcallback@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/lodash._bindcallback/-/lodash._bindcallback-3.0.1.tgz#e531c27644cf8b57a99e17ed95b35c748789392e"
+
+lodash._createassigner@^3.0.0:
+  version "3.1.1"
+  resolved "https://registry.yarnpkg.com/lodash._createassigner/-/lodash._createassigner-3.1.1.tgz#838a5bae2fdaca63ac22dee8e19fa4e6d6970b11"
+  dependencies:
+    lodash._bindcallback "^3.0.0"
+    lodash._isiterateecall "^3.0.0"
+    lodash.restparam "^3.0.0"
+
+lodash._getnative@^3.0.0:
+  version "3.9.1"
+  resolved "https://registry.yarnpkg.com/lodash._getnative/-/lodash._getnative-3.9.1.tgz#570bc7dede46d61cdcde687d65d3eecbaa3aaff5"
+
+lodash._isiterateecall@^3.0.0:
+  version "3.0.9"
+  resolved "https://registry.yarnpkg.com/lodash._isiterateecall/-/lodash._isiterateecall-3.0.9.tgz#5203ad7ba425fae842460e696db9cf3e6aac057c"
+
+lodash.assign@^3.2.0:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/lodash.assign/-/lodash.assign-3.2.0.tgz#3ce9f0234b4b2223e296b8fa0ac1fee8ebca64fa"
+  dependencies:
+    lodash._baseassign "^3.0.0"
+    lodash._createassigner "^3.0.0"
+    lodash.keys "^3.0.0"
+
+lodash.assign@^4.2.0:
+  version "4.2.0"
+  resolved "https://registry.yarnpkg.com/lodash.assign/-/lodash.assign-4.2.0.tgz#0d99f3ccd7a6d261d19bdaeb9245005d285808e7"
+
+lodash.assignin@^4.1.0:
+  version "4.2.0"
+  resolved "https://registry.yarnpkg.com/lodash.assignin/-/lodash.assignin-4.2.0.tgz#ba8df5fb841eb0a3e8044232b0e263a8dc6a28a2"
+
+lodash.clonedeep@^4.3.2, lodash.clonedeep@^4.4.1:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz#e23f3f9c4f8fbdde872529c1071857a086e5ccef"
+
+lodash.debounce@^3.1.1:
+  version "3.1.1"
+  resolved "https://registry.yarnpkg.com/lodash.debounce/-/lodash.debounce-3.1.1.tgz#812211c378a94cc29d5aa4e3346cf0bfce3a7df5"
+  dependencies:
+    lodash._getnative "^3.0.0"
+
+lodash.defaults@^4.1.0:
+  version "4.2.0"
+  resolved "https://registry.yarnpkg.com/lodash.defaults/-/lodash.defaults-4.2.0.tgz#d09178716ffea4dde9e5fb7b37f6f0802274580c"
+
+lodash.find@^4.5.1:
+  version "4.6.0"
+  resolved "https://registry.yarnpkg.com/lodash.find/-/lodash.find-4.6.0.tgz#cb0704d47ab71789ffa0de8b97dd926fb88b13b1"
+
+lodash.flatten@^3.0.2:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/lodash.flatten/-/lodash.flatten-3.0.2.tgz#de1cf57758f8f4479319d35c3e9cc60c4501938c"
+  dependencies:
+    lodash._baseflatten "^3.0.0"
+    lodash._isiterateecall "^3.0.0"
+
+lodash.isarguments@^3.0.0:
+  version "3.1.0"
+  resolved "https://registry.yarnpkg.com/lodash.isarguments/-/lodash.isarguments-3.1.0.tgz#2f573d85c6a24289ff00663b491c1d338ff3458a"
+
+lodash.isarray@^3.0.0:
+  version "3.0.4"
+  resolved "https://registry.yarnpkg.com/lodash.isarray/-/lodash.isarray-3.0.4.tgz#79e4eb88c36a8122af86f844aa9bcd851b5fbb55"
+
+lodash.isplainobject@^3.0.0:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/lodash.isplainobject/-/lodash.isplainobject-3.2.0.tgz#9a8238ae16b200432960cd7346512d0123fbf4c5"
+  dependencies:
+    lodash._basefor "^3.0.0"
+    lodash.isarguments "^3.0.0"
+    lodash.keysin "^3.0.0"
+
+lodash.istypedarray@^3.0.0:
+  version "3.0.6"
+  resolved "https://registry.yarnpkg.com/lodash.istypedarray/-/lodash.istypedarray-3.0.6.tgz#c9a477498607501d8e8494d283b87c39281cef62"
+
+lodash.keys@^3.0.0:
+  version "3.1.2"
+  resolved "https://registry.yarnpkg.com/lodash.keys/-/lodash.keys-3.1.2.tgz#4dbc0472b156be50a0b286855d1bd0b0c656098a"
+  dependencies:
+    lodash._getnative "^3.0.0"
+    lodash.isarguments "^3.0.0"
+    lodash.isarray "^3.0.0"
+
+lodash.keysin@^3.0.0:
+  version "3.0.8"
+  resolved "https://registry.yarnpkg.com/lodash.keysin/-/lodash.keysin-3.0.8.tgz#22c4493ebbedb1427962a54b445b2c8a767fb47f"
+  dependencies:
+    lodash.isarguments "^3.0.0"
+    lodash.isarray "^3.0.0"
+
+lodash.merge@^3.0.2, lodash.merge@^3.3.2:
+  version "3.3.2"
+  resolved "https://registry.yarnpkg.com/lodash.merge/-/lodash.merge-3.3.2.tgz#0d90d93ed637b1878437bb3e21601260d7afe994"
+  dependencies:
+    lodash._arraycopy "^3.0.0"
+    lodash._arrayeach "^3.0.0"
+    lodash._createassigner "^3.0.0"
+    lodash._getnative "^3.0.0"
+    lodash.isarguments "^3.0.0"
+    lodash.isarray "^3.0.0"
+    lodash.isplainobject "^3.0.0"
+    lodash.istypedarray "^3.0.0"
+    lodash.keys "^3.0.0"
+    lodash.keysin "^3.0.0"
+    lodash.toplainobject "^3.0.0"
+
+lodash.merge@^4.3.0, lodash.merge@^4.5.1:
+  version "4.6.0"
+  resolved "https://registry.yarnpkg.com/lodash.merge/-/lodash.merge-4.6.0.tgz#69884ba144ac33fe699737a6086deffadd0f89c5"
+
+lodash.omit@^4.1.0:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/lodash.omit/-/lodash.omit-4.5.0.tgz#6eb19ae5a1ee1dd9df0b969e66ce0b7fa30b5e60"
+
+lodash.pad@^4.1.0:
+  version "4.5.1"
+  resolved "https://registry.yarnpkg.com/lodash.pad/-/lodash.pad-4.5.1.tgz#4330949a833a7c8da22cc20f6a26c4d59debba70"
+
+lodash.padend@^4.1.0:
+  version "4.6.1"
+  resolved "https://registry.yarnpkg.com/lodash.padend/-/lodash.padend-4.6.1.tgz#53ccba047d06e158d311f45da625f4e49e6f166e"
+
+lodash.padstart@^4.1.0:
+  version "4.6.1"
+  resolved "https://registry.yarnpkg.com/lodash.padstart/-/lodash.padstart-4.6.1.tgz#d2e3eebff0d9d39ad50f5cbd1b52a7bce6bb611b"
+
+lodash.restparam@^3.0.0:
+  version "3.6.1"
+  resolved "https://registry.yarnpkg.com/lodash.restparam/-/lodash.restparam-3.6.1.tgz#936a4e309ef330a7645ed4145986c85ae5b20805"
+
+lodash.toplainobject@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/lodash.toplainobject/-/lodash.toplainobject-3.0.0.tgz#28790ad942d293d78aa663a07ecf7f52ca04198d"
+  dependencies:
+    lodash._basecopy "^3.0.0"
+    lodash.keysin "^3.0.0"
+
+lodash.uniq@^4.2.0:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773"
+
+lodash.uniqby@^4.7.0:
+  version "4.7.0"
+  resolved "https://registry.yarnpkg.com/lodash.uniqby/-/lodash.uniqby-4.7.0.tgz#d99c07a669e9e6d24e1362dfe266c67616af1302"
+
+lodash@3.7.x:
+  version "3.7.0"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-3.7.0.tgz#3678bd8ab995057c07ade836ed2ef087da811d45"
+
+lodash@^3.10.0, lodash@^3.10.1, lodash@^3.9.3:
+  version "3.10.1"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-3.10.1.tgz#5bf45e8e49ba4189e17d482789dfd15bd140b7b6"
+
+lodash@^4.0.0, lodash@^4.12.0, lodash@^4.14.0, lodash@^4.2.0, lodash@^4.3.0, lodash@^4.6.1:
+  version "4.17.4"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.4.tgz#78203a4d1c328ae1d86dca6460e369b57f4055ae"
+
+lodash@~4.16.4:
+  version "4.16.6"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.16.6.tgz#d22c9ac660288f3843e16ba7d2b5d06cca27d777"
+
+longest@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/longest/-/longest-1.0.1.tgz#30a0b2da38f73770e8294a0d22e6625ed77d0097"
+
+loose-envify@^1.0.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.3.1.tgz#d1a8ad33fa9ce0e713d65fdd0ac8b748d478c848"
+  dependencies:
+    js-tokens "^3.0.0"
+
+loud-rejection@^1.0.0:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/loud-rejection/-/loud-rejection-1.6.0.tgz#5b46f80147edee578870f086d04821cf998e551f"
+  dependencies:
+    currently-unhandled "^0.4.1"
+    signal-exit "^3.0.0"
+
+lru-cache@2:
+  version "2.7.3"
+  resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-2.7.3.tgz#6d4524e8b955f95d4f5b58851ce21dd72fb4e952"
+
+lru-cache@^4.0.1, lru-cache@~4.0.1:
+  version "4.0.2"
+  resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-4.0.2.tgz#1d17679c069cda5d040991a09dbc2c0db377e55e"
+  dependencies:
+    pseudomap "^1.0.1"
+    yallist "^2.0.0"
+
+make-array@^0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/make-array/-/make-array-0.1.2.tgz#335e36ebb0c5a43154d21213a1ecaeae2a1bb3ef"
+
+makeerror@1.0.x:
+  version "1.0.11"
+  resolved "https://registry.yarnpkg.com/makeerror/-/makeerror-1.0.11.tgz#e01a5c9109f2af79660e4e8b9587790184f5a96c"
+  dependencies:
+    tmpl "1.0.x"
+
+map-obj@^1.0.0, map-obj@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/map-obj/-/map-obj-1.0.1.tgz#d933ceb9205d82bdcf4886f6742bdc2b4dea146d"
+
+markdown-it-terminal@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/markdown-it-terminal/-/markdown-it-terminal-0.0.3.tgz#c77a8533c2170b46d2a907a3c3452d4d7f4aa5db"
+  dependencies:
+    ansi-styles "^2.1.0"
+    cardinal "^0.5.0"
+    cli-table "^0.3.1"
+    lodash.merge "^3.3.2"
+    markdown-it "^4.4.0"
+
+markdown-it@6.0.4:
+  version "6.0.4"
+  resolved "https://registry.yarnpkg.com/markdown-it/-/markdown-it-6.0.4.tgz#87665d767d75d6ab25411d705ddd76c71285dab7"
+  dependencies:
+    argparse "^1.0.7"
+    entities "~1.1.1"
+    linkify-it "~1.2.2"
+    mdurl "~1.0.1"
+    uc.micro "^1.0.1"
+
+markdown-it@^4.4.0:
+  version "4.4.0"
+  resolved "https://registry.yarnpkg.com/markdown-it/-/markdown-it-4.4.0.tgz#3df373dbea587a9a7fef3e56311b68908f75c414"
+  dependencies:
+    argparse "~1.0.2"
+    entities "~1.1.1"
+    linkify-it "~1.2.0"
+    mdurl "~1.0.0"
+    uc.micro "^1.0.0"
+
+matcher-collection@^1.0.0, matcher-collection@^1.0.1:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/matcher-collection/-/matcher-collection-1.0.4.tgz#2f66ae0869996f29e43d0b62c83dd1d43e581755"
+  dependencies:
+    minimatch "^3.0.2"
+
+md5-hex@^1.0.2:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/md5-hex/-/md5-hex-1.3.0.tgz#d2c4afe983c4370662179b8cad145219135046c4"
+  dependencies:
+    md5-o-matic "^0.1.1"
+
+md5-o-matic@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/md5-o-matic/-/md5-o-matic-0.1.1.tgz#822bccd65e117c514fab176b25945d54100a03c3"
+
+mdurl@~1.0.0, mdurl@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/mdurl/-/mdurl-1.0.1.tgz#fe85b2ec75a59037f2adfec100fd6c601761152e"
+
+media-typer@0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748"
+
+memory-streams@^0.1.0:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/memory-streams/-/memory-streams-0.1.2.tgz#273ff777ab60fec599b116355255282cca2c50c2"
+  dependencies:
+    readable-stream "~1.0.2"
+
+meow@^3.7.0:
+  version "3.7.0"
+  resolved "https://registry.yarnpkg.com/meow/-/meow-3.7.0.tgz#72cb668b425228290abbfa856892587308a801fb"
+  dependencies:
+    camelcase-keys "^2.0.0"
+    decamelize "^1.1.2"
+    loud-rejection "^1.0.0"
+    map-obj "^1.0.1"
+    minimist "^1.1.3"
+    normalize-package-data "^2.3.4"
+    object-assign "^4.0.1"
+    read-pkg-up "^1.0.1"
+    redent "^1.0.0"
+    trim-newlines "^1.0.0"
+
+merge-descriptors@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61"
+
+merge@^1.1.3, merge@^1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/merge/-/merge-1.2.0.tgz#7531e39d4949c281a66b8c5a6e0265e8b05894da"
+
+methods@~1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee"
+
+micromatch@^2.1.5:
+  version "2.3.11"
+  resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-2.3.11.tgz#86677c97d1720b363431d04d0d15293bd38c1565"
+  dependencies:
+    arr-diff "^2.0.0"
+    array-unique "^0.2.1"
+    braces "^1.8.2"
+    expand-brackets "^0.1.4"
+    extglob "^0.3.1"
+    filename-regex "^2.0.0"
+    is-extglob "^1.0.0"
+    is-glob "^2.0.1"
+    kind-of "^3.0.2"
+    normalize-path "^2.0.1"
+    object.omit "^2.0.0"
+    parse-glob "^3.0.4"
+    regex-cache "^0.4.2"
+
+"mime-db@>= 1.27.0 < 2", mime-db@~1.27.0:
+  version "1.27.0"
+  resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.27.0.tgz#820f572296bbd20ec25ed55e5b5de869e5436eb1"
+
+mime-types@^2.1.11, mime-types@^2.1.12, mime-types@~2.1.11, mime-types@~2.1.15, mime-types@~2.1.7:
+  version "2.1.15"
+  resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.15.tgz#a4ebf5064094569237b8cf70046776d09fc92aed"
+  dependencies:
+    mime-db "~1.27.0"
+
+mime@1.3.4, mime@^1.2.11:
+  version "1.3.4"
+  resolved "https://registry.yarnpkg.com/mime/-/mime-1.3.4.tgz#115f9e3b6b3daf2959983cb38f149a2d40eb5d53"
+
+minimatch@1:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-1.0.0.tgz#e0dd2120b49e1b724ce8d714c520822a9438576d"
+  dependencies:
+    lru-cache "2"
+    sigmund "~1.0.0"
+
+"minimatch@2 || 3", minimatch@^3.0.0, minimatch@^3.0.2, minimatch@~3.0.0, minimatch@~3.0.2:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.3.tgz#2a4e4090b96b2db06a9d7df01055a62a77c9b774"
+  dependencies:
+    brace-expansion "^1.0.0"
+
+minimatch@^2.0.1, minimatch@^2.0.3:
+  version "2.0.10"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-2.0.10.tgz#8d087c39c6b38c001b97fca7ce6d0e1e80afbac7"
+  dependencies:
+    brace-expansion "^1.0.0"
+
+minimist@0.0.8, minimist@~0.0.1:
+  version "0.0.8"
+  resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d"
+
+minimist@^1.1.0, minimist@^1.1.1, minimist@^1.1.3:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.0.tgz#a35008b20f41383eec1fb914f4cd5df79a264284"
+
+mkdirp@0.5.x, "mkdirp@>=0.5 0", mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@~0.5.0, mkdirp@~0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903"
+  dependencies:
+    minimist "0.0.8"
+
+mkdirp@^0.3.5:
+  version "0.3.5"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.3.5.tgz#de3e5f8961c88c787ee1368df849ac4413eca8d7"
+
+mkdirp@~0.4.0:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.4.2.tgz#427c8c18ece398b932f6f666f4e1e5b7740e78c8"
+  dependencies:
+    minimist "0.0.8"
+
+mktemp@~0.3.4:
+  version "0.3.5"
+  resolved "https://registry.yarnpkg.com/mktemp/-/mktemp-0.3.5.tgz#a1504c706d0d2b198c6a0eb645f7fdaf8181f7de"
+
+moment-timezone@^0.3.0:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/moment-timezone/-/moment-timezone-0.3.1.tgz#3ef47856b02d53b718a10a5ec2023aa299e07bf5"
+  dependencies:
+    moment ">= 2.6.0"
+
+moment-timezone@^0.5.0:
+  version "0.5.13"
+  resolved "https://registry.yarnpkg.com/moment-timezone/-/moment-timezone-0.5.13.tgz#99ce5c7d827262eb0f1f702044177f60745d7b90"
+  dependencies:
+    moment ">= 2.9.0"
+
+"moment@>= 2.6.0", "moment@>= 2.9.0", moment@^2.13.0, moment@^2.9.0:
+  version "2.18.1"
+  resolved "https://registry.yarnpkg.com/moment/-/moment-2.18.1.tgz#c36193dd3ce1c2eed2adb7c802dbbc77a81b1c0f"
+
+morgan@^1.5.2:
+  version "1.8.1"
+  resolved "https://registry.yarnpkg.com/morgan/-/morgan-1.8.1.tgz#f93023d3887bd27b78dfd6023cea7892ee27a4b1"
+  dependencies:
+    basic-auth "~1.1.0"
+    debug "2.6.1"
+    depd "~1.1.0"
+    on-finished "~2.3.0"
+    on-headers "~1.0.1"
+
+mout@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/mout/-/mout-1.0.0.tgz#9bdf1d4af57d66d47cb353a6335a3281098e1501"
+
+ms@0.7.1:
+  version "0.7.1"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.1.tgz#9cd13c03adbff25b65effde7ce864ee952017098"
+
+ms@0.7.2:
+  version "0.7.2"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.2.tgz#ae25cf2512b3885a1d95d7f037868d8431124765"
+
+ms@0.7.3:
+  version "0.7.3"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.3.tgz#708155a5e44e33f5fd0fc53e81d0d40a91be1fff"
+
+mustache@^2.2.1:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/mustache/-/mustache-2.3.0.tgz#4028f7778b17708a489930a6e52ac3bca0da41d0"
+
+mute-stream@0.0.5, mute-stream@~0.0.4:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.5.tgz#8fbfabb0a98a253d3184331f9e8deb7372fac6c0"
+
+nan@^2.3.2:
+  version "2.6.2"
+  resolved "https://registry.yarnpkg.com/nan/-/nan-2.6.2.tgz#e4ff34e6c95fdfb5aecc08de6596f43605a7db45"
+
+negotiator@0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.1.tgz#2b327184e8992101177b28563fb5e7102acd0ca9"
+
+node-fetch@^1.3.3:
+  version "1.6.3"
+  resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-1.6.3.tgz#dc234edd6489982d58e8f0db4f695029abcd8c04"
+  dependencies:
+    encoding "^0.1.11"
+    is-stream "^1.0.1"
+
+node-gyp@^3.3.1, node-gyp@~3.3.1:
+  version "3.3.1"
+  resolved "https://registry.yarnpkg.com/node-gyp/-/node-gyp-3.3.1.tgz#80f7b6d7c2f9c0495ba42c518a670c99bdf6e4a0"
+  dependencies:
+    fstream "^1.0.0"
+    glob "3 || 4"
+    graceful-fs "^4.1.2"
+    minimatch "1"
+    mkdirp "^0.5.0"
+    nopt "2 || 3"
+    npmlog "0 || 1 || 2"
+    osenv "0"
+    path-array "^1.0.0"
+    request "2"
+    rimraf "2"
+    semver "2.x || 3.x || 4 || 5"
+    tar "^2.0.0"
+    which "1"
+
+node-int64@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/node-int64/-/node-int64-0.4.0.tgz#87a9065cdb355d3182d8f94ce11188b825c68a3b"
+
+node-modules-path@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/node-modules-path/-/node-modules-path-1.0.1.tgz#40096b08ce7ad0ea14680863af449c7c75a5d1c8"
+
+node-notifier@^5.0.1:
+  version "5.1.2"
+  resolved "https://registry.yarnpkg.com/node-notifier/-/node-notifier-5.1.2.tgz#2fa9e12605fa10009d44549d6fcd8a63dde0e4ff"
+  dependencies:
+    growly "^1.3.0"
+    semver "^5.3.0"
+    shellwords "^0.1.0"
+    which "^1.2.12"
+
+node-sass@^3.8.0:
+  version "3.13.1"
+  resolved "https://registry.yarnpkg.com/node-sass/-/node-sass-3.13.1.tgz#7240fbbff2396304b4223527ed3020589c004fc2"
+  dependencies:
+    async-foreach "^0.1.3"
+    chalk "^1.1.1"
+    cross-spawn "^3.0.0"
+    gaze "^1.0.0"
+    get-stdin "^4.0.1"
+    glob "^7.0.3"
+    in-publish "^2.0.0"
+    lodash.assign "^4.2.0"
+    lodash.clonedeep "^4.3.2"
+    meow "^3.7.0"
+    mkdirp "^0.5.1"
+    nan "^2.3.2"
+    node-gyp "^3.3.1"
+    npmlog "^4.0.0"
+    request "^2.61.0"
+    sass-graph "^2.1.1"
+
+node-uuid@^1.4.3, node-uuid@~1.4.7:
+  version "1.4.8"
+  resolved "https://registry.yarnpkg.com/node-uuid/-/node-uuid-1.4.8.tgz#b040eb0923968afabf8d32fb1f17f1167fdab907"
+
+"nopt@2 || 3", nopt@^3.0.1, nopt@^3.0.3, nopt@~3.0.6:
+  version "3.0.6"
+  resolved "https://registry.yarnpkg.com/nopt/-/nopt-3.0.6.tgz#c6465dbf08abcd4db359317f79ac68a646b28ff9"
+  dependencies:
+    abbrev "1"
+
+normalize-git-url@~3.0.2:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/normalize-git-url/-/normalize-git-url-3.0.2.tgz#8e5f14be0bdaedb73e07200310aa416c27350fc4"
+
+normalize-package-data@^2.0.0, normalize-package-data@^2.3.2, normalize-package-data@^2.3.4, "normalize-package-data@~1.0.1 || ^2.0.0", normalize-package-data@~2.3.5:
+  version "2.3.8"
+  resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-2.3.8.tgz#d819eda2a9dedbd1ffa563ea4071d936782295bb"
+  dependencies:
+    hosted-git-info "^2.1.4"
+    is-builtin-module "^1.0.0"
+    semver "2 || 3 || 4 || 5"
+    validate-npm-package-license "^3.0.1"
+
+normalize-path@^2.0.1:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-2.1.1.tgz#1ab28b556e198363a8c1a6f7e6fa20137fe6aed9"
+  dependencies:
+    remove-trailing-separator "^1.0.1"
+
+npm-cache-filename@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/npm-cache-filename/-/npm-cache-filename-1.0.2.tgz#ded306c5b0bfc870a9e9faf823bc5f283e05ae11"
+
+npm-git-info@^1.0.0:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/npm-git-info/-/npm-git-info-1.0.3.tgz#a933c42ec321e80d3646e0d6e844afe94630e1d5"
+
+npm-install-checks@~1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/npm-install-checks/-/npm-install-checks-1.0.7.tgz#6d91aeda0ac96801f1ed7aadee116a6c0a086a57"
+  dependencies:
+    npmlog "0.1 || 1 || 2"
+    semver "^2.3.0 || 3.x || 4 || 5"
+
+"npm-package-arg@^3.0.0 || ^4.0.0", "npm-package-arg@^4.0.0 || ^5.0.0", npm-package-arg@^4.1.1:
+  version "4.2.1"
+  resolved "https://registry.yarnpkg.com/npm-package-arg/-/npm-package-arg-4.2.1.tgz#593303fdea85f7c422775f17f9eb7670f680e3ec"
+  dependencies:
+    hosted-git-info "^2.1.5"
+    semver "^5.1.0"
+
+npm-package-arg@~4.1.0:
+  version "4.1.1"
+  resolved "https://registry.yarnpkg.com/npm-package-arg/-/npm-package-arg-4.1.1.tgz#86d9dca985b4c5e5d59772dfd5de6919998a495a"
+  dependencies:
+    hosted-git-info "^2.1.4"
+    semver "4 || 5"
+
+npm-registry-client@~7.1.0:
+  version "7.1.2"
+  resolved "https://registry.yarnpkg.com/npm-registry-client/-/npm-registry-client-7.1.2.tgz#ddf243a2bd149d35172fe680aff40dfa20054bc3"
+  dependencies:
+    chownr "^1.0.1"
+    concat-stream "^1.4.6"
+    graceful-fs "^4.1.2"
+    mkdirp "^0.5.0"
+    normalize-package-data "~1.0.1 || ^2.0.0"
+    npm-package-arg "^3.0.0 || ^4.0.0"
+    once "^1.3.0"
+    request "^2.47.0"
+    retry "^0.8.0"
+    rimraf "2"
+    semver "2 >=2.2.1 || 3.x || 4 || 5"
+    slide "^1.1.3"
+  optionalDependencies:
+    npmlog "~2.0.0 || ~3.1.0"
+
+npm-user-validate@~0.1.2:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/npm-user-validate/-/npm-user-validate-0.1.5.tgz#52465d50c2d20294a57125b996baedbf56c5004b"
+
+npm@2.15.5:
+  version "2.15.5"
+  resolved "https://registry.yarnpkg.com/npm/-/npm-2.15.5.tgz#5fcd71999c3d54baa0e1c27ac44f84a1b82b4559"
+  dependencies:
+    abbrev "~1.0.7"
+    ansi "~0.3.1"
+    ansicolors "~0.3.2"
+    ansistyles "~0.1.3"
+    archy "~1.0.0"
+    async-some "~1.0.2"
+    block-stream "0.0.9"
+    char-spinner "~1.0.1"
+    chmodr "~1.0.2"
+    chownr "~1.0.1"
+    cmd-shim "~2.0.2"
+    columnify "~1.5.4"
+    config-chain "~1.1.10"
+    dezalgo "~1.0.3"
+    editor "~1.0.0"
+    fs-vacuum "~1.2.9"
+    fs-write-stream-atomic "~1.0.8"
+    fstream "~1.0.8"
+    fstream-npm "~1.0.7"
+    github-url-from-git "~1.4.0"
+    github-url-from-username-repo "~1.0.2"
+    glob "~7.0.3"
+    graceful-fs "~4.1.4"
+    hosted-git-info "~2.1.4"
+    inflight "~1.0.4"
+    inherits "~2.0.1"
+    ini "~1.3.4"
+    init-package-json "~1.9.3"
+    lockfile "~1.0.1"
+    lru-cache "~4.0.1"
+    minimatch "~3.0.0"
+    mkdirp "~0.5.1"
+    node-gyp "~3.3.1"
+    nopt "~3.0.6"
+    normalize-git-url "~3.0.2"
+    normalize-package-data "~2.3.5"
+    npm-cache-filename "~1.0.2"
+    npm-install-checks "~1.0.7"
+    npm-package-arg "~4.1.0"
+    npm-registry-client "~7.1.0"
+    npm-user-validate "~0.1.2"
+    npmlog "~2.0.3"
+    once "~1.3.3"
+    opener "~1.4.1"
+    osenv "~0.1.3"
+    path-is-inside "~1.0.0"
+    read "~1.0.7"
+    read-installed "~4.0.3"
+    read-package-json "~2.0.4"
+    readable-stream "~2.1.2"
+    realize-package-specifier "~3.0.3"
+    request "~2.72.0"
+    retry "~0.9.0"
+    rimraf "~2.5.2"
+    semver "~5.1.0"
+    sha "~2.0.1"
+    slide "~1.1.6"
+    sorted-object "~2.0.0"
+    spdx-license-ids "~1.2.1"
+    strip-ansi "~3.0.1"
+    tar "~2.2.1"
+    text-table "~0.2.0"
+    uid-number "0.0.6"
+    umask "~1.1.0"
+    validate-npm-package-license "~3.0.1"
+    validate-npm-package-name "~2.2.2"
+    which "~1.2.8"
+    wrappy "~1.0.1"
+    write-file-atomic "~1.1.4"
+
+"npmlog@0 || 1 || 2", "npmlog@0.1 || 1 || 2", "npmlog@~2.0.0 || ~3.1.0", npmlog@~2.0.3:
+  version "2.0.4"
+  resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-2.0.4.tgz#98b52530f2514ca90d09ec5b22c8846722375692"
+  dependencies:
+    ansi "~0.3.1"
+    are-we-there-yet "~1.1.2"
+    gauge "~1.2.5"
+
+npmlog@^4.0.0:
+  version "4.0.2"
+  resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-4.0.2.tgz#d03950e0e78ce1527ba26d2a7592e9348ac3e75f"
+  dependencies:
+    are-we-there-yet "~1.1.2"
+    console-control-strings "~1.1.0"
+    gauge "~2.7.1"
+    set-blocking "~2.0.0"
+
+number-is-nan@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/number-is-nan/-/number-is-nan-1.0.1.tgz#097b602b53422a522c1afb8790318336941a011d"
+
+oauth-sign@~0.8.1:
+  version "0.8.2"
+  resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.8.2.tgz#46a6ab7f0aead8deae9ec0565780b7d4efeb9d43"
+
+object-assign@4.1.0:
+  version "4.1.0"
+  resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.0.tgz#7a3b3d0e98063d43f4c03f2e8ae6cd51a86883a0"
+
+object-assign@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-2.1.1.tgz#43c36e5d569ff8e4816c4efa8be02d26967c18aa"
+
+object-assign@^4.0.1, object-assign@^4.1.0:
+  version "4.1.1"
+  resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863"
+
+object-component@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/object-component/-/object-component-0.0.3.tgz#f0c69aa50efc95b866c186f400a33769cb2f1291"
+
+object.omit@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/object.omit/-/object.omit-2.0.1.tgz#1a9c744829f39dbb858c76ca3579ae2a54ebd1fa"
+  dependencies:
+    for-own "^0.1.4"
+    is-extendable "^0.1.1"
+
+on-finished@~2.3.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.3.0.tgz#20f1336481b083cd75337992a16971aa2d906947"
+  dependencies:
+    ee-first "1.1.1"
+
+on-headers@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/on-headers/-/on-headers-1.0.1.tgz#928f5d0f470d49342651ea6794b0857c100693f7"
+
+once@^1.3.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1"
+  dependencies:
+    wrappy "1"
+
+once@~1.3.3:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/once/-/once-1.3.3.tgz#b2e261557ce4c314ec8304f3fa82663e4297ca20"
+  dependencies:
+    wrappy "1"
+
+onetime@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/onetime/-/onetime-1.1.0.tgz#a1f7838f8314c516f05ecefcbc4ccfe04b4ed789"
+
+opener@~1.4.1:
+  version "1.4.3"
+  resolved "https://registry.yarnpkg.com/opener/-/opener-1.4.3.tgz#5c6da2c5d7e5831e8ffa3964950f8d6674ac90b8"
+
+optimist@^0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.6.1.tgz#da3ea74686fa21a19a111c326e90eb15a0196686"
+  dependencies:
+    minimist "~0.0.1"
+    wordwrap "~0.0.2"
+
+options@>=0.0.5:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/options/-/options-0.0.6.tgz#ec22d312806bb53e731773e7cdaefcf1c643128f"
+
+ora@^0.2.0:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/ora/-/ora-0.2.3.tgz#37527d220adcd53c39b73571d754156d5db657a4"
+  dependencies:
+    chalk "^1.1.1"
+    cli-cursor "^1.0.2"
+    cli-spinners "^0.1.2"
+    object-assign "^4.0.1"
+
+os-homedir@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3"
+
+os-locale@^1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/os-locale/-/os-locale-1.4.0.tgz#20f9f17ae29ed345e8bde583b13d2009803c14d9"
+  dependencies:
+    lcid "^1.0.0"
+
+os-tmpdir@^1.0.0, os-tmpdir@^1.0.1, os-tmpdir@~1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274"
+
+osenv@0, osenv@^0.1.0, osenv@^0.1.3, osenv@~0.1.3:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.1.4.tgz#42fe6d5953df06c8064be6f176c3d05aaaa34644"
+  dependencies:
+    os-homedir "^1.0.0"
+    os-tmpdir "^1.0.0"
+
+output-file-sync@^1.1.0:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/output-file-sync/-/output-file-sync-1.1.2.tgz#d0a33eefe61a205facb90092e826598d5245ce76"
+  dependencies:
+    graceful-fs "^4.1.4"
+    mkdirp "^0.5.1"
+    object-assign "^4.1.0"
+
+parse-glob@^3.0.4:
+  version "3.0.4"
+  resolved "https://registry.yarnpkg.com/parse-glob/-/parse-glob-3.0.4.tgz#b2c376cfb11f35513badd173ef0bb6e3a388391c"
+  dependencies:
+    glob-base "^0.3.0"
+    is-dotfile "^1.0.0"
+    is-extglob "^1.0.0"
+    is-glob "^2.0.0"
+
+parse-json@^2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-2.2.0.tgz#f480f40434ef80741f8469099f8dea18f55a4dc9"
+  dependencies:
+    error-ex "^1.2.0"
+
+parsejson@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/parsejson/-/parsejson-0.0.3.tgz#ab7e3759f209ece99437973f7d0f1f64ae0e64ab"
+  dependencies:
+    better-assert "~1.0.0"
+
+parseqs@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/parseqs/-/parseqs-0.0.5.tgz#d5208a3738e46766e291ba2ea173684921a8b89d"
+  dependencies:
+    better-assert "~1.0.0"
+
+parseuri@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/parseuri/-/parseuri-0.0.5.tgz#80204a50d4dbb779bfdc6ebe2778d90e4bce320a"
+  dependencies:
+    better-assert "~1.0.0"
+
+parseurl@~1.3.0, parseurl@~1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.1.tgz#c8ab8c9223ba34888aa64a297b28853bec18da56"
+
+path-array@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/path-array/-/path-array-1.0.1.tgz#7e2f0f35f07a2015122b868b7eac0eb2c4fec271"
+  dependencies:
+    array-index "^1.0.0"
+
+path-exists@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-1.0.0.tgz#d5a8998eb71ef37a74c34eb0d9eba6e878eea081"
+
+path-exists@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-2.1.0.tgz#0feb6c64f0fc518d9a754dd5efb62c7022761f4b"
+  dependencies:
+    pinkie-promise "^2.0.0"
+
+path-is-absolute@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f"
+
+path-is-inside@^1.0.1, path-is-inside@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/path-is-inside/-/path-is-inside-1.0.2.tgz#365417dede44430d1c11af61027facf074bdfc53"
+
+path-parse@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.5.tgz#3c1adf871ea9cd6c9431b6ea2bd74a0ff055c4c1"
+
+path-posix@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/path-posix/-/path-posix-1.0.0.tgz#06b26113f56beab042545a23bfa88003ccac260f"
+
+path-to-regexp@0.1.7:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c"
+
+path-type@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/path-type/-/path-type-1.1.0.tgz#59c44f7ee491da704da415da5a4070ba4f8fe441"
+  dependencies:
+    graceful-fs "^4.1.2"
+    pify "^2.0.0"
+    pinkie-promise "^2.0.0"
+
+performance-now@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-0.2.0.tgz#33ef30c5c77d4ea21c5a53869d91b56d8f2555e5"
+
+pify@^2.0.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c"
+
+pinkie-promise@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa"
+  dependencies:
+    pinkie "^2.0.0"
+
+pinkie@^2.0.0:
+  version "2.0.4"
+  resolved "https://registry.yarnpkg.com/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870"
+
+portfinder@^1.0.3:
+  version "1.0.13"
+  resolved "https://registry.yarnpkg.com/portfinder/-/portfinder-1.0.13.tgz#bb32ecd87c27104ae6ee44b5a3ccbf0ebb1aede9"
+  dependencies:
+    async "^1.5.2"
+    debug "^2.2.0"
+    mkdirp "0.5.x"
+
+preserve@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/preserve/-/preserve-0.2.0.tgz#815ed1f6ebc65926f865b310c0713bcb3315ce4b"
+
+printf@^0.2.3:
+  version "0.2.5"
+  resolved "https://registry.yarnpkg.com/printf/-/printf-0.2.5.tgz#c438ca2ca33e3927671db4ab69c0e52f936a4f0f"
+
+private@^0.1.6, private@~0.1.5:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/private/-/private-0.1.7.tgz#68ce5e8a1ef0a23bb570cc28537b5332aba63ef1"
+
+process-nextick-args@~1.0.6:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-1.0.7.tgz#150e20b756590ad3f91093f25a4f2ad8bff30ba3"
+
+process-relative-require@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/process-relative-require/-/process-relative-require-1.0.0.tgz#1590dfcf5b8f2983ba53e398446b68240b4cc68a"
+  dependencies:
+    node-modules-path "^1.0.0"
+
+promise-map-series@^0.2.1:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/promise-map-series/-/promise-map-series-0.2.3.tgz#c2d377afc93253f6bd03dbb77755eb88ab20a847"
+  dependencies:
+    rsvp "^3.0.14"
+
+promzard@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/promzard/-/promzard-0.3.0.tgz#26a5d6ee8c7dee4cb12208305acfb93ba382a9ee"
+  dependencies:
+    read "1"
+
+proto-list@~1.2.1:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/proto-list/-/proto-list-1.2.4.tgz#212d5bfe1318306a420f6402b8e26ff39647a849"
+
+proxy-addr@~1.1.3:
+  version "1.1.4"
+  resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-1.1.4.tgz#27e545f6960a44a627d9b44467e35c1b6b4ce2f3"
+  dependencies:
+    forwarded "~0.1.0"
+    ipaddr.js "1.3.0"
+
+pseudomap@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/pseudomap/-/pseudomap-1.0.2.tgz#f052a28da70e618917ef0a8ac34c1ae5a68286b3"
+
+punycode@^1.4.1:
+  version "1.4.1"
+  resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e"
+
+q@^1.1.2:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/q/-/q-1.5.0.tgz#dd01bac9d06d30e6f219aecb8253ee9ebdc308f1"
+
+qs@5.2.0:
+  version "5.2.0"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-5.2.0.tgz#a9f31142af468cb72b25b30136ba2456834916be"
+
+qs@6.4.0, qs@~6.4.0:
+  version "6.4.0"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-6.4.0.tgz#13e26d28ad6b0ffaa91312cd3bf708ed351e7233"
+
+qs@~5.1.0:
+  version "5.1.0"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-5.1.0.tgz#4d932e5c7ea411cca76a312d39a606200fd50cd9"
+
+qs@~6.1.0:
+  version "6.1.2"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-6.1.2.tgz#b59d8925d0c999ef6d63acf4ac5abb0adaa24b54"
+
+quick-temp@0.1.5, quick-temp@^0.1.0, quick-temp@^0.1.2, quick-temp@^0.1.3, quick-temp@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/quick-temp/-/quick-temp-0.1.5.tgz#0d0d67f0fb6a589a0e142f90985f76cdbaf403f7"
+  dependencies:
+    mktemp "~0.3.4"
+    rimraf "~2.2.6"
+    underscore.string "~2.3.3"
+
+qunit-notifications@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/qunit-notifications/-/qunit-notifications-0.1.1.tgz#3001afc6a6a77dfbd962ccbcddde12dec5286c09"
+
+qunitjs@^1.20.0:
+  version "1.23.1"
+  resolved "https://registry.yarnpkg.com/qunitjs/-/qunitjs-1.23.1.tgz#1971cf97ac9be01a64d2315508d2e48e6fd4e719"
+
+randomatic@^1.1.3:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/randomatic/-/randomatic-1.1.6.tgz#110dcabff397e9dcff7c0789ccc0a49adf1ec5bb"
+  dependencies:
+    is-number "^2.0.2"
+    kind-of "^3.0.2"
+
+range-parser@~1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.0.tgz#f49be6b487894ddc40dcc94a322f611092e00d5e"
+
+raw-body@~2.1.5:
+  version "2.1.7"
+  resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.1.7.tgz#adfeace2e4fb3098058014d08c072dcc59758774"
+  dependencies:
+    bytes "2.4.0"
+    iconv-lite "0.4.13"
+    unpipe "1.0.0"
+
+read-installed@~4.0.3:
+  version "4.0.3"
+  resolved "https://registry.yarnpkg.com/read-installed/-/read-installed-4.0.3.tgz#ff9b8b67f187d1e4c29b9feb31f6b223acd19067"
+  dependencies:
+    debuglog "^1.0.1"
+    read-package-json "^2.0.0"
+    readdir-scoped-modules "^1.0.0"
+    semver "2 || 3 || 4 || 5"
+    slide "~1.1.3"
+    util-extend "^1.0.1"
+  optionalDependencies:
+    graceful-fs "^4.1.2"
+
+"read-package-json@1 || 2", read-package-json@^2.0.0, read-package-json@~2.0.4:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/read-package-json/-/read-package-json-2.0.5.tgz#f93a64e641529df68a08c64de46389e8a3f88845"
+  dependencies:
+    glob "^7.1.1"
+    json-parse-helpfulerror "^1.0.2"
+    normalize-package-data "^2.0.0"
+  optionalDependencies:
+    graceful-fs "^4.1.2"
+
+read-pkg-up@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-1.0.1.tgz#9d63c13276c065918d57f002a57f40a1b643fb02"
+  dependencies:
+    find-up "^1.0.0"
+    read-pkg "^1.0.0"
+
+read-pkg@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-1.1.0.tgz#f5ffaa5ecd29cb31c0474bca7d756b6bb29e3f28"
+  dependencies:
+    load-json-file "^1.0.0"
+    normalize-package-data "^2.3.2"
+    path-type "^1.0.0"
+
+read@1, read@~1.0.1, read@~1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/read/-/read-1.0.7.tgz#b3da19bd052431a97671d44a42634adf710b40c4"
+  dependencies:
+    mute-stream "~0.0.4"
+
+"readable-stream@1 || 2", readable-stream@^2, readable-stream@^2.0.2, readable-stream@~2.1.2:
+  version "2.1.5"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.1.5.tgz#66fa8b720e1438b364681f2ad1a63c618448c9d0"
+  dependencies:
+    buffer-shims "^1.0.0"
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "~1.0.0"
+    process-nextick-args "~1.0.6"
+    string_decoder "~0.10.x"
+    util-deprecate "~1.0.1"
+
+readable-stream@1.1:
+  version "1.1.13"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.1.13.tgz#f6eef764f514c89e2b9e23146a75ba106756d23e"
+  dependencies:
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "0.0.1"
+    string_decoder "~0.10.x"
+
+readable-stream@^2.0.6, readable-stream@^2.2.2:
+  version "2.2.9"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.2.9.tgz#cf78ec6f4a6d1eb43d26488cac97f042e74b7fc8"
+  dependencies:
+    buffer-shims "~1.0.0"
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "~1.0.0"
+    process-nextick-args "~1.0.6"
+    string_decoder "~1.0.0"
+    util-deprecate "~1.0.1"
+
+readable-stream@~1.0.2:
+  version "1.0.34"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.0.34.tgz#125820e34bc842d2f2aaafafe4c2916ee32c157c"
+  dependencies:
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "0.0.1"
+    string_decoder "~0.10.x"
+
+readable-stream@~2.0.5:
+  version "2.0.6"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.0.6.tgz#8f90341e68a53ccc928788dacfcd11b36eb9b78e"
+  dependencies:
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "~1.0.0"
+    process-nextick-args "~1.0.6"
+    string_decoder "~0.10.x"
+    util-deprecate "~1.0.1"
+
+readdir-scoped-modules@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/readdir-scoped-modules/-/readdir-scoped-modules-1.0.2.tgz#9fafa37d286be5d92cbaebdee030dc9b5f406747"
+  dependencies:
+    debuglog "^1.0.1"
+    dezalgo "^1.0.0"
+    graceful-fs "^4.1.2"
+    once "^1.3.0"
+
+readline2@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/readline2/-/readline2-1.0.1.tgz#41059608ffc154757b715d9989d199ffbf372e35"
+  dependencies:
+    code-point-at "^1.0.0"
+    is-fullwidth-code-point "^1.0.0"
+    mute-stream "0.0.5"
+
+realize-package-specifier@~3.0.3:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/realize-package-specifier/-/realize-package-specifier-3.0.3.tgz#d0def882952b8de3f67eba5e91199661271f41f4"
+  dependencies:
+    dezalgo "^1.0.1"
+    npm-package-arg "^4.1.1"
+
+recast@0.10.33, recast@^0.10.10:
+  version "0.10.33"
+  resolved "https://registry.yarnpkg.com/recast/-/recast-0.10.33.tgz#942808f7aa016f1fa7142c461d7e5704aaa8d697"
+  dependencies:
+    ast-types "0.8.12"
+    esprima-fb "~15001.1001.0-dev-harmony-fb"
+    private "~0.1.5"
+    source-map "~0.5.0"
+
+recast@^0.11.17, recast@^0.11.3:
+  version "0.11.23"
+  resolved "https://registry.yarnpkg.com/recast/-/recast-0.11.23.tgz#451fd3004ab1e4df9b4e4b66376b2a21912462d3"
+  dependencies:
+    ast-types "0.9.6"
+    esprima "~3.1.0"
+    private "~0.1.5"
+    source-map "~0.5.0"
+
+redent@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/redent/-/redent-1.0.0.tgz#cf916ab1fd5f1f16dfb20822dd6ec7f730c2afde"
+  dependencies:
+    indent-string "^2.1.0"
+    strip-indent "^1.0.1"
+
+redeyed@~0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/redeyed/-/redeyed-0.5.0.tgz#7ab000e60ee3875ac115d29edb32c1403c6c25d1"
+  dependencies:
+    esprima-fb "~12001.1.0-dev-harmony-fb"
+
+regenerate@^1.2.1:
+  version "1.3.2"
+  resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.3.2.tgz#d1941c67bad437e1be76433add5b385f95b19260"
+
+regenerator-runtime@^0.10.0:
+  version "0.10.5"
+  resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.10.5.tgz#336c3efc1220adcedda2c9fab67b5a7955a33658"
+
+regenerator-runtime@^0.9.5:
+  version "0.9.6"
+  resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.9.6.tgz#d33eb95d0d2001a4be39659707c51b0cb71ce029"
+
+regenerator-transform@0.9.11:
+  version "0.9.11"
+  resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.9.11.tgz#3a7d067520cb7b7176769eb5ff868691befe1283"
+  dependencies:
+    babel-runtime "^6.18.0"
+    babel-types "^6.19.0"
+    private "^0.1.6"
+
+regenerator@0.8.40:
+  version "0.8.40"
+  resolved "https://registry.yarnpkg.com/regenerator/-/regenerator-0.8.40.tgz#a0e457c58ebdbae575c9f8cd75127e93756435d8"
+  dependencies:
+    commoner "~0.10.3"
+    defs "~1.1.0"
+    esprima-fb "~15001.1001.0-dev-harmony-fb"
+    private "~0.1.5"
+    recast "0.10.33"
+    through "~2.3.8"
+
+regex-cache@^0.4.2:
+  version "0.4.3"
+  resolved "https://registry.yarnpkg.com/regex-cache/-/regex-cache-0.4.3.tgz#9b1a6c35d4d0dfcef5711ae651e8e9d3d7114145"
+  dependencies:
+    is-equal-shallow "^0.1.3"
+    is-primitive "^2.0.0"
+
+regexpu-core@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-2.0.0.tgz#49d038837b8dcf8bfa5b9a42139938e6ea2ae240"
+  dependencies:
+    regenerate "^1.2.1"
+    regjsgen "^0.2.0"
+    regjsparser "^0.1.4"
+
+regexpu@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/regexpu/-/regexpu-1.3.0.tgz#e534dc991a9e5846050c98de6d7dd4a55c9ea16d"
+  dependencies:
+    esprima "^2.6.0"
+    recast "^0.10.10"
+    regenerate "^1.2.1"
+    regjsgen "^0.2.0"
+    regjsparser "^0.1.4"
+
+regjsgen@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.2.0.tgz#6c016adeac554f75823fe37ac05b92d5a4edb1f7"
+
+regjsparser@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.1.5.tgz#7ee8f84dc6fa792d3fd0ae228d24bd949ead205c"
+  dependencies:
+    jsesc "~0.5.0"
+
+remove-trailing-separator@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/remove-trailing-separator/-/remove-trailing-separator-1.0.1.tgz#615ebb96af559552d4bf4057c8436d486ab63cc4"
+
+repeat-element@^1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/repeat-element/-/repeat-element-1.1.2.tgz#ef089a178d1483baae4d93eb98b4f9e4e11d990a"
+
+repeat-string@^1.5.2:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637"
+
+repeating@^1.1.0, repeating@^1.1.2:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/repeating/-/repeating-1.1.3.tgz#3d4114218877537494f97f77f9785fab810fa4ac"
+  dependencies:
+    is-finite "^1.0.0"
+
+repeating@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/repeating/-/repeating-2.0.1.tgz#5214c53a926d3552707527fbab415dbc08d06dda"
+  dependencies:
+    is-finite "^1.0.0"
+
+request@2, request@^2.27.0, request@^2.47.0, request@^2.61.0:
+  version "2.81.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.81.0.tgz#c6928946a0e06c5f8d6f8a9333469ffda46298a0"
+  dependencies:
+    aws-sign2 "~0.6.0"
+    aws4 "^1.2.1"
+    caseless "~0.12.0"
+    combined-stream "~1.0.5"
+    extend "~3.0.0"
+    forever-agent "~0.6.1"
+    form-data "~2.1.1"
+    har-validator "~4.2.1"
+    hawk "~3.1.3"
+    http-signature "~1.1.0"
+    is-typedarray "~1.0.0"
+    isstream "~0.1.2"
+    json-stringify-safe "~5.0.1"
+    mime-types "~2.1.7"
+    oauth-sign "~0.8.1"
+    performance-now "^0.2.0"
+    qs "~6.4.0"
+    safe-buffer "^5.0.1"
+    stringstream "~0.0.4"
+    tough-cookie "~2.3.0"
+    tunnel-agent "^0.6.0"
+    uuid "^3.0.0"
+
+request@~2.72.0:
+  version "2.72.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.72.0.tgz#0ce3a179512620b10441f14c82e21c12c0ddb4e1"
+  dependencies:
+    aws-sign2 "~0.6.0"
+    aws4 "^1.2.1"
+    bl "~1.1.2"
+    caseless "~0.11.0"
+    combined-stream "~1.0.5"
+    extend "~3.0.0"
+    forever-agent "~0.6.1"
+    form-data "~1.0.0-rc3"
+    har-validator "~2.0.6"
+    hawk "~3.1.3"
+    http-signature "~1.1.0"
+    is-typedarray "~1.0.0"
+    isstream "~0.1.2"
+    json-stringify-safe "~5.0.1"
+    mime-types "~2.1.7"
+    node-uuid "~1.4.7"
+    oauth-sign "~0.8.1"
+    qs "~6.1.0"
+    stringstream "~0.0.4"
+    tough-cookie "~2.2.0"
+    tunnel-agent "~0.4.1"
+
+require-directory@^2.1.1:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42"
+
+require-main-filename@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-1.0.1.tgz#97f717b69d48784f5f526a6c5aa8ffdda055a4d1"
+
+requires-port@1.x.x:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/requires-port/-/requires-port-1.0.0.tgz#925d2601d39ac485e091cf0da5c6e694dc3dcaff"
+
+resolve@^1.1.2, resolve@^1.1.6:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.3.3.tgz#655907c3469a8680dc2de3a275a8fdd69691f0e5"
+  dependencies:
+    path-parse "^1.0.5"
+
+restore-cursor@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/restore-cursor/-/restore-cursor-1.0.1.tgz#34661f46886327fed2991479152252df92daa541"
+  dependencies:
+    exit-hook "^1.0.0"
+    onetime "^1.0.0"
+
+retry@^0.8.0:
+  version "0.8.0"
+  resolved "https://registry.yarnpkg.com/retry/-/retry-0.8.0.tgz#2367628dc0edb247b1eab649dc53ac8628ac2d5f"
+
+retry@~0.9.0:
+  version "0.9.0"
+  resolved "https://registry.yarnpkg.com/retry/-/retry-0.9.0.tgz#6f697e50a0e4ddc8c8f7fb547a9b60dead43678d"
+
+right-align@^0.1.1:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/right-align/-/right-align-0.1.3.tgz#61339b722fe6a3515689210d24e14c96148613ef"
+  dependencies:
+    align-text "^0.1.1"
+
+rimraf@2, rimraf@^2.2.8, rimraf@^2.3.2, rimraf@^2.3.4, rimraf@^2.4.3, rimraf@^2.4.4, rimraf@^2.5.2, rimraf@^2.5.3, rimraf@^2.6.1:
+  version "2.6.1"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.1.tgz#c2338ec643df7a1b7fe5c54fa86f57428a55f33d"
+  dependencies:
+    glob "^7.0.5"
+
+rimraf@~2.2.6:
+  version "2.2.8"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.2.8.tgz#e439be2aaee327321952730f99a8929e4fc50582"
+
+rimraf@~2.5.2:
+  version "2.5.4"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.5.4.tgz#96800093cbf1a0c86bd95b4625467535c29dfa04"
+  dependencies:
+    glob "^7.0.5"
+
+rsvp@^3.0.14, rsvp@^3.0.16, rsvp@^3.0.17, rsvp@^3.0.18, rsvp@^3.0.21, rsvp@^3.0.6, rsvp@^3.1.0, rsvp@^3.2.1:
+  version "3.5.0"
+  resolved "https://registry.yarnpkg.com/rsvp/-/rsvp-3.5.0.tgz#a62c573a4ae4e1dfd0697ebc6242e79c681eaa34"
+
+rsvp@~3.0.6:
+  version "3.0.21"
+  resolved "https://registry.yarnpkg.com/rsvp/-/rsvp-3.0.21.tgz#49c588fe18ef293bcd0ab9f4e6756e6ac433359f"
+
+rsvp@~3.2.1:
+  version "3.2.1"
+  resolved "https://registry.yarnpkg.com/rsvp/-/rsvp-3.2.1.tgz#07cb4a5df25add9e826ebc67dcc9fd89db27d84a"
+
+run-async@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/run-async/-/run-async-0.1.0.tgz#c8ad4a5e110661e402a7d21b530e009f25f8e389"
+  dependencies:
+    once "^1.3.0"
+
+rx-lite@^3.1.2:
+  version "3.1.2"
+  resolved "https://registry.yarnpkg.com/rx-lite/-/rx-lite-3.1.2.tgz#19ce502ca572665f3b647b10939f97fd1615f102"
+
+safe-buffer@^5.0.1:
+  version "5.0.1"
+  resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.0.1.tgz#d263ca54696cd8a306b5ca6551e92de57918fbe7"
+
+sane@^1.1.1:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/sane/-/sane-1.6.0.tgz#9610c452307a135d29c1fdfe2547034180c46775"
+  dependencies:
+    anymatch "^1.3.0"
+    exec-sh "^0.2.0"
+    fb-watchman "^1.8.0"
+    minimatch "^3.0.2"
+    minimist "^1.1.1"
+    walker "~1.0.5"
+    watch "~0.10.0"
+
+sanitize-filename@^1.5.3:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/sanitize-filename/-/sanitize-filename-1.6.1.tgz#612da1c96473fa02dccda92dcd5b4ab164a6772a"
+  dependencies:
+    truncate-utf8-bytes "^1.0.0"
+
+sass-graph@^2.1.1:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/sass-graph/-/sass-graph-2.2.2.tgz#f4d6c95b546ea2a09d14176d0fc1a07ee2b48354"
+  dependencies:
+    glob "^7.0.0"
+    lodash "^4.0.0"
+    scss-tokenizer "^0.2.1"
+    yargs "^6.6.0"
+
+scss-tokenizer@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/scss-tokenizer/-/scss-tokenizer-0.2.1.tgz#07c0cc577bb7ab4d08fd900185adbf4bc844141d"
+  dependencies:
+    js-base64 "^2.1.8"
+    source-map "^0.4.2"
+
+"semver@2 >=2.2.1 || 3.x || 4 || 5", "semver@2 || 3 || 4 || 5", "semver@2.x || 3.x || 4 || 5", "semver@4 || 5", "semver@^2.3.0 || 3.x || 4 || 5", semver@^5.1.0, semver@^5.3.0:
+  version "5.3.0"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-5.3.0.tgz#9b2ce5d3de02d17c6012ad326aa6b4d0cf54f94f"
+
+semver@^4.1.0, semver@^4.3.1:
+  version "4.3.6"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-4.3.6.tgz#300bc6e0e86374f7ba61068b5b1ecd57fc6532da"
+
+semver@~5.1.0:
+  version "5.1.1"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-5.1.1.tgz#a3292a373e6f3e0798da0b20641b9a9c5bc47e19"
+
+send@0.15.1:
+  version "0.15.1"
+  resolved "https://registry.yarnpkg.com/send/-/send-0.15.1.tgz#8a02354c26e6f5cca700065f5f0cdeba90ec7b5f"
+  dependencies:
+    debug "2.6.1"
+    depd "~1.1.0"
+    destroy "~1.0.4"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    etag "~1.8.0"
+    fresh "0.5.0"
+    http-errors "~1.6.1"
+    mime "1.3.4"
+    ms "0.7.2"
+    on-finished "~2.3.0"
+    range-parser "~1.2.0"
+    statuses "~1.3.1"
+
+serve-static@1.12.1:
+  version "1.12.1"
+  resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.12.1.tgz#7443a965e3ced647aceb5639fa06bf4d1bbe0039"
+  dependencies:
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    parseurl "~1.3.1"
+    send "0.15.1"
+
+set-blocking@^2.0.0, set-blocking@~2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7"
+
+setprototypeof@1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.0.3.tgz#66567e37043eeb4f04d91bd658c0cbefb55b8e04"
+
+sha@~2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/sha/-/sha-2.0.1.tgz#6030822fbd2c9823949f8f72ed6411ee5cf25aae"
+  dependencies:
+    graceful-fs "^4.1.2"
+    readable-stream "^2.0.2"
+
+shebang-command@^1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-1.2.0.tgz#44aac65b695b03398968c39f363fee5deafdf1ea"
+  dependencies:
+    shebang-regex "^1.0.0"
+
+shebang-regex@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-1.0.0.tgz#da42f49740c0b42db2ca9728571cb190c98efea3"
+
+shelljs@0.3.x:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/shelljs/-/shelljs-0.3.0.tgz#3596e6307a781544f591f37da618360f31db57b1"
+
+shellwords@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/shellwords/-/shellwords-0.1.0.tgz#66afd47b6a12932d9071cbfd98a52e785cd0ba14"
+
+sigmund@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/sigmund/-/sigmund-1.0.1.tgz#3ff21f198cad2175f9f3b781853fd94d0d19b590"
+
+signal-exit@^3.0.0:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.2.tgz#b5fdc08f1287ea1178628e415e25132b73646c6d"
+
+silent-error@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/silent-error/-/silent-error-1.0.1.tgz#71b7d503d1c6f94882b51b56be879b113cb4822c"
+  dependencies:
+    debug "^2.2.0"
+
+simple-fmt@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/simple-fmt/-/simple-fmt-0.1.0.tgz#191bf566a59e6530482cb25ab53b4a8dc85c3a6b"
+
+simple-is@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/simple-is/-/simple-is-0.2.0.tgz#2abb75aade39deb5cc815ce10e6191164850baf0"
+
+slash@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/slash/-/slash-1.0.0.tgz#c41f2f6c39fc16d1cd17ad4b5d896114ae470d55"
+
+slide@^1.1.3, slide@^1.1.5, slide@~1.1.3, slide@~1.1.6:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/slide/-/slide-1.1.6.tgz#56eb027d65b4d2dce6cb2e2d32c4d4afc9e1d707"
+
+sntp@1.x.x:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/sntp/-/sntp-1.0.9.tgz#6541184cc90aeea6c6e7b35e2659082443c66198"
+  dependencies:
+    hoek "2.x.x"
+
+socket.io-adapter@0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/socket.io-adapter/-/socket.io-adapter-0.5.0.tgz#cb6d4bb8bec81e1078b99677f9ced0046066bb8b"
+  dependencies:
+    debug "2.3.3"
+    socket.io-parser "2.3.1"
+
+socket.io-client@1.6.0:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/socket.io-client/-/socket.io-client-1.6.0.tgz#5b668f4f771304dfeed179064708386fa6717853"
+  dependencies:
+    backo2 "1.0.2"
+    component-bind "1.0.0"
+    component-emitter "1.2.1"
+    debug "2.3.3"
+    engine.io-client "1.8.0"
+    has-binary "0.1.7"
+    indexof "0.0.1"
+    object-component "0.0.3"
+    parseuri "0.0.5"
+    socket.io-parser "2.3.1"
+    to-array "0.1.4"
+
+socket.io-parser@2.3.1:
+  version "2.3.1"
+  resolved "https://registry.yarnpkg.com/socket.io-parser/-/socket.io-parser-2.3.1.tgz#dd532025103ce429697326befd64005fcfe5b4a0"
+  dependencies:
+    component-emitter "1.1.2"
+    debug "2.2.0"
+    isarray "0.0.1"
+    json3 "3.3.2"
+
+socket.io@1.6.0:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/socket.io/-/socket.io-1.6.0.tgz#3e40d932637e6bd923981b25caf7c53e83b6e2e1"
+  dependencies:
+    debug "2.3.3"
+    engine.io "1.8.0"
+    has-binary "0.1.7"
+    object-assign "4.1.0"
+    socket.io-adapter "0.5.0"
+    socket.io-client "1.6.0"
+    socket.io-parser "2.3.1"
+
+sorted-object@~2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/sorted-object/-/sorted-object-2.0.1.tgz#7d631f4bd3a798a24af1dffcfbfe83337a5df5fc"
+
+source-map-support@^0.2.10:
+  version "0.2.10"
+  resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.2.10.tgz#ea5a3900a1c1cb25096a0ae8cc5c2b4b10ded3dc"
+  dependencies:
+    source-map "0.1.32"
+
+source-map-support@^0.4.2:
+  version "0.4.15"
+  resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.4.15.tgz#03202df65c06d2bd8c7ec2362a193056fef8d3b1"
+  dependencies:
+    source-map "^0.5.6"
+
+source-map-url@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/source-map-url/-/source-map-url-0.3.0.tgz#7ecaf13b57bcd09da8a40c5d269db33799d4aaf9"
+
+source-map@0.1.32:
+  version "0.1.32"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.32.tgz#c8b6c167797ba4740a8ea33252162ff08591b266"
+  dependencies:
+    amdefine ">=0.0.4"
+
+source-map@0.4.x, source-map@^0.4.2, source-map@^0.4.4:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.4.4.tgz#eba4f5da9c0dc999de68032d8b4f76173652036b"
+  dependencies:
+    amdefine ">=0.0.4"
+
+source-map@^0.5.0, source-map@^0.5.6, source-map@~0.5.0, source-map@~0.5.1:
+  version "0.5.6"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.6.tgz#75ce38f52bf0733c5a7f0c118d81334a2bb5f412"
+
+spawn-args@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/spawn-args/-/spawn-args-0.2.0.tgz#fb7d0bd1d70fd4316bd9e3dec389e65f9d6361bb"
+
+spawnback@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/spawnback/-/spawnback-1.0.0.tgz#f73662f7e54d95367eca74d6426c677dd7ea686f"
+
+spdx-correct@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-1.0.2.tgz#4b3073d933ff51f3912f03ac5519498a4150db40"
+  dependencies:
+    spdx-license-ids "^1.0.2"
+
+spdx-expression-parse@~1.0.0:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-1.0.4.tgz#9bdf2f20e1f40ed447fbe273266191fced51626c"
+
+spdx-license-ids@^1.0.2, spdx-license-ids@~1.2.1:
+  version "1.2.2"
+  resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-1.2.2.tgz#c9df7a3424594ade6bd11900d596696dc06bac57"
+
+sprintf-js@~1.0.2:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c"
+
+sri-toolbox@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/sri-toolbox/-/sri-toolbox-0.2.0.tgz#a7fea5c3fde55e675cf1c8c06f3ebb5c2935835e"
+
+sshpk@^1.7.0:
+  version "1.13.0"
+  resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.13.0.tgz#ff2a3e4fd04497555fed97b39a0fd82fafb3a33c"
+  dependencies:
+    asn1 "~0.2.3"
+    assert-plus "^1.0.0"
+    dashdash "^1.12.0"
+    getpass "^0.1.1"
+  optionalDependencies:
+    bcrypt-pbkdf "^1.0.0"
+    ecc-jsbn "~0.1.1"
+    jodid25519 "^1.0.0"
+    jsbn "~0.1.0"
+    tweetnacl "~0.14.0"
+
+stable@~0.1.3:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/stable/-/stable-0.1.6.tgz#910f5d2aed7b520c6e777499c1f32e139fdecb10"
+
+statuses@1, "statuses@>= 1.3.1 < 2", statuses@~1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.3.1.tgz#faf51b9eb74aaef3b3acf4ad5f61abf24cb7b93e"
+
+string-width@^1.0.1, string-width@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3"
+  dependencies:
+    code-point-at "^1.0.0"
+    is-fullwidth-code-point "^1.0.0"
+    strip-ansi "^3.0.0"
+
+string_decoder@~0.10.x:
+  version "0.10.31"
+  resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-0.10.31.tgz#62e203bc41766c6c28c9fc84301dab1c5310fa94"
+
+string_decoder@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.0.0.tgz#f06f41157b664d86069f84bdbdc9b0d8ab281667"
+  dependencies:
+    buffer-shims "~1.0.0"
+
+stringmap@~0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/stringmap/-/stringmap-0.2.2.tgz#556c137b258f942b8776f5b2ef582aa069d7d1b1"
+
+stringset@~0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/stringset/-/stringset-0.2.1.tgz#ef259c4e349344377fcd1c913dd2e848c9c042b5"
+
+stringstream@~0.0.4:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/stringstream/-/stringstream-0.0.5.tgz#4e484cd4de5a0bbbee18e46307710a8a81621878"
+
+strip-ansi@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-0.3.0.tgz#25f48ea22ca79187f3174a4db8759347bb126220"
+  dependencies:
+    ansi-regex "^0.2.1"
+
+strip-ansi@^3.0.0, strip-ansi@^3.0.1, strip-ansi@~3.0.1:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf"
+  dependencies:
+    ansi-regex "^2.0.0"
+
+strip-ansi@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-0.1.1.tgz#39e8a98d044d150660abe4a6808acf70bb7bc991"
+
+strip-bom@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-2.0.0.tgz#6219a85616520491f35788bdbf1447a99c7e6b0e"
+  dependencies:
+    is-utf8 "^0.2.0"
+
+strip-indent@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/strip-indent/-/strip-indent-1.0.1.tgz#0c7962a6adefa7bbd4ac366460a638552ae1a0a2"
+  dependencies:
+    get-stdin "^4.0.1"
+
+strip-json-comments@1.0.x:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-1.0.4.tgz#1e15fbcac97d3ee99bf2d73b4c656b082bbafb91"
+
+styled_string@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/styled_string/-/styled_string-0.0.1.tgz#d22782bd81295459bc4f1df18c4bad8e94dd124a"
+
+sum-up@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/sum-up/-/sum-up-1.0.3.tgz#1c661f667057f63bcb7875aa1438bc162525156e"
+  dependencies:
+    chalk "^1.0.0"
+
+supports-color@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-0.2.0.tgz#d92de2694eb3f67323973d7ae3d8b55b4c22190a"
+
+supports-color@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7"
+
+symlink-or-copy@^1.0.0, symlink-or-copy@^1.0.1, symlink-or-copy@^1.1.8:
+  version "1.1.8"
+  resolved "https://registry.yarnpkg.com/symlink-or-copy/-/symlink-or-copy-1.1.8.tgz#cabe61e0010c1c023c173b25ee5108b37f4b4aa3"
+
+sync-exec@^0.6.2:
+  version "0.6.2"
+  resolved "https://registry.yarnpkg.com/sync-exec/-/sync-exec-0.6.2.tgz#717d22cc53f0ce1def5594362f3a89a2ebb91105"
+
+tap-parser@^5.1.0:
+  version "5.3.3"
+  resolved "https://registry.yarnpkg.com/tap-parser/-/tap-parser-5.3.3.tgz#53ec8a90f275d6fff43f169e56a679502a741185"
+  dependencies:
+    events-to-array "^1.0.1"
+    js-yaml "^3.2.7"
+  optionalDependencies:
+    readable-stream "^2"
+
+tar@^2.0.0, tar@~2.2.1:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/tar/-/tar-2.2.1.tgz#8e4d2a256c0e2185c6b18ad694aec968b83cb1d1"
+  dependencies:
+    block-stream "*"
+    fstream "^1.0.2"
+    inherits "2"
+
+temp@0.8.3:
+  version "0.8.3"
+  resolved "https://registry.yarnpkg.com/temp/-/temp-0.8.3.tgz#e0c6bc4d26b903124410e4fed81103014dfc1f59"
+  dependencies:
+    os-tmpdir "^1.0.0"
+    rimraf "~2.2.6"
+
+testem@^1.8.1:
+  version "1.16.0"
+  resolved "https://registry.yarnpkg.com/testem/-/testem-1.16.0.tgz#3933040b5d5b5fbdb6a2b1e7032e511b54a05867"
+  dependencies:
+    backbone "^1.1.2"
+    bluebird "^3.4.6"
+    charm "^1.0.0"
+    commander "^2.6.0"
+    consolidate "^0.14.0"
+    cross-spawn "^5.1.0"
+    express "^4.10.7"
+    fireworm "^0.7.0"
+    glob "^7.0.4"
+    http-proxy "^1.13.1"
+    js-yaml "^3.2.5"
+    lodash.assignin "^4.1.0"
+    lodash.clonedeep "^4.4.1"
+    lodash.find "^4.5.1"
+    lodash.uniqby "^4.7.0"
+    mkdirp "^0.5.1"
+    mustache "^2.2.1"
+    node-notifier "^5.0.1"
+    npmlog "^4.0.0"
+    printf "^0.2.3"
+    rimraf "^2.4.4"
+    socket.io "1.6.0"
+    spawn-args "^0.2.0"
+    styled_string "0.0.1"
+    tap-parser "^5.1.0"
+    xmldom "^0.1.19"
+
+text-table@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4"
+
+"textextensions@1 || 2":
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/textextensions/-/textextensions-2.1.0.tgz#1be0dc2a0dc244d44be8a09af6a85afb93c4dbc3"
+
+through@^2.3.6, through@~2.3.8:
+  version "2.3.8"
+  resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5"
+
+tiny-lr@0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/tiny-lr/-/tiny-lr-0.2.1.tgz#b3fdba802e5d56a33c2f6f10794b32e477ac729d"
+  dependencies:
+    body-parser "~1.14.0"
+    debug "~2.2.0"
+    faye-websocket "~0.10.0"
+    livereload-js "^2.2.0"
+    parseurl "~1.3.0"
+    qs "~5.1.0"
+
+tmp@0.0.28:
+  version "0.0.28"
+  resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.28.tgz#172735b7f614ea7af39664fa84cf0de4e515d120"
+  dependencies:
+    os-tmpdir "~1.0.1"
+
+tmpl@1.0.x:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/tmpl/-/tmpl-1.0.4.tgz#23640dd7b42d00433911140820e5cf440e521dd1"
+
+to-array@0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/to-array/-/to-array-0.1.4.tgz#17e6c11f73dd4f3d74cda7a4ff3238e9ad9bf890"
+
+to-fast-properties@^1.0.0, to-fast-properties@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-1.0.2.tgz#f3f5c0c3ba7299a7ef99427e44633257ade43320"
+
+tough-cookie@~2.2.0:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.2.2.tgz#c83a1830f4e5ef0b93ef2a3488e724f8de016ac7"
+
+tough-cookie@~2.3.0:
+  version "2.3.2"
+  resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.3.2.tgz#f081f76e4c85720e6c37a5faced737150d84072a"
+  dependencies:
+    punycode "^1.4.1"
+
+tree-sync@^1.1.0:
+  version "1.2.2"
+  resolved "https://registry.yarnpkg.com/tree-sync/-/tree-sync-1.2.2.tgz#2cf76b8589f59ffedb58db5a3ac7cb013d0158b7"
+  dependencies:
+    debug "^2.2.0"
+    fs-tree-diff "^0.5.6"
+    mkdirp "^0.5.1"
+    quick-temp "^0.1.5"
+    walk-sync "^0.2.7"
+
+trim-newlines@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/trim-newlines/-/trim-newlines-1.0.0.tgz#5887966bb582a4503a41eb524f7d35011815a613"
+
+trim-right@^1.0.0, trim-right@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/trim-right/-/trim-right-1.0.1.tgz#cb2e1203067e0c8de1f614094b9fe45704ea6003"
+
+truncate-utf8-bytes@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/truncate-utf8-bytes/-/truncate-utf8-bytes-1.0.2.tgz#405923909592d56f78a5818434b0b78489ca5f2b"
+  dependencies:
+    utf8-byte-length "^1.0.1"
+
+try-resolve@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/try-resolve/-/try-resolve-1.0.1.tgz#cfde6fabd72d63e5797cfaab873abbe8e700e912"
+
+tryor@~0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/tryor/-/tryor-0.1.2.tgz#8145e4ca7caff40acde3ccf946e8b8bb75b4172b"
+
+tunnel-agent@^0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd"
+  dependencies:
+    safe-buffer "^5.0.1"
+
+tunnel-agent@~0.4.1:
+  version "0.4.3"
+  resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.4.3.tgz#6373db76909fe570e08d73583365ed828a74eeeb"
+
+tweetnacl@^0.14.3, tweetnacl@~0.14.0:
+  version "0.14.5"
+  resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64"
+
+type-is@~1.6.10, type-is@~1.6.14:
+  version "1.6.15"
+  resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.15.tgz#cab10fb4909e441c82842eafe1ad646c81804410"
+  dependencies:
+    media-typer "0.3.0"
+    mime-types "~2.1.15"
+
+typedarray@^0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777"
+
+uc.micro@^1.0.0, uc.micro@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/uc.micro/-/uc.micro-1.0.3.tgz#7ed50d5e0f9a9fb0a573379259f2a77458d50192"
+
+uglify-js@^2.6, uglify-js@^2.7.0:
+  version "2.8.22"
+  resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-2.8.22.tgz#d54934778a8da14903fa29a326fb24c0ab51a1a0"
+  dependencies:
+    source-map "~0.5.1"
+    yargs "~3.10.0"
+  optionalDependencies:
+    uglify-to-browserify "~1.0.0"
+
+uglify-to-browserify@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/uglify-to-browserify/-/uglify-to-browserify-1.0.2.tgz#6e0924d6bda6b5afe349e39a6d632850a0f882b7"
+
+uid-number@0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/uid-number/-/uid-number-0.0.6.tgz#0ea10e8035e8eb5b8e4449f06da1c730663baa81"
+
+ultron@1.0.x:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/ultron/-/ultron-1.0.2.tgz#ace116ab557cd197386a4e88f4685378c8b2e4fa"
+
+umask@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/umask/-/umask-1.1.0.tgz#f29cebf01df517912bb58ff9c4e50fde8e33320d"
+
+underscore.string@~2.3.3:
+  version "2.3.3"
+  resolved "https://registry.yarnpkg.com/underscore.string/-/underscore.string-2.3.3.tgz#71c08bf6b428b1133f37e78fa3a21c82f7329b0d"
+
+underscore@>=1.8.3:
+  version "1.8.3"
+  resolved "https://registry.yarnpkg.com/underscore/-/underscore-1.8.3.tgz#4f3fb53b106e6097fcf9cb4109f2a5e9bdfa5022"
+
+unpipe@1.0.0, unpipe@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec"
+
+untildify@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/untildify/-/untildify-2.1.0.tgz#17eb2807987f76952e9c0485fc311d06a826a2e0"
+  dependencies:
+    os-homedir "^1.0.0"
+
+user-home@^1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/user-home/-/user-home-1.1.1.tgz#2b5be23a32b63a7c9deb8d0f28d485724a3df190"
+
+utf8-byte-length@^1.0.1:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/utf8-byte-length/-/utf8-byte-length-1.0.4.tgz#f45f150c4c66eee968186505ab93fcbb8ad6bf61"
+
+util-deprecate@~1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf"
+
+util-extend@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/util-extend/-/util-extend-1.0.3.tgz#a7c216d267545169637b3b6edc6ca9119e2ff93f"
+
+utils-merge@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.0.tgz#0294fb922bb9375153541c4f7096231f287c8af8"
+
+uuid@^2.0.1:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/uuid/-/uuid-2.0.3.tgz#67e2e863797215530dff318e5bf9dcebfd47b21a"
+
+uuid@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.0.1.tgz#6544bba2dfda8c1cf17e629a3a305e2bb1fee6c1"
+
+validate-npm-package-license@^3.0.1, validate-npm-package-license@~3.0.1:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.1.tgz#2804babe712ad3379459acfbe24746ab2c303fbc"
+  dependencies:
+    spdx-correct "~1.0.0"
+    spdx-expression-parse "~1.0.0"
+
+validate-npm-package-name@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/validate-npm-package-name/-/validate-npm-package-name-3.0.0.tgz#5fa912d81eb7d0c74afc140de7317f0ca7df437e"
+  dependencies:
+    builtins "^1.0.3"
+
+validate-npm-package-name@~2.2.2:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/validate-npm-package-name/-/validate-npm-package-name-2.2.2.tgz#f65695b22f7324442019a3c7fa39a6e7fd299085"
+  dependencies:
+    builtins "0.0.7"
+
+vary@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.1.tgz#67535ebb694c1d52257457984665323f587e8d37"
+
+verror@1.3.6:
+  version "1.3.6"
+  resolved "https://registry.yarnpkg.com/verror/-/verror-1.3.6.tgz#cff5df12946d297d2baaefaa2689e25be01c005c"
+  dependencies:
+    extsprintf "1.0.2"
+
+walk-sync@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/walk-sync/-/walk-sync-0.1.3.tgz#8a07261a00bda6cfb1be25e9f100fad57546f583"
+
+walk-sync@^0.2.5, walk-sync@^0.2.6, walk-sync@^0.2.7:
+  version "0.2.7"
+  resolved "https://registry.yarnpkg.com/walk-sync/-/walk-sync-0.2.7.tgz#b49be4ee6867657aeb736978b56a29d10fa39969"
+  dependencies:
+    ensure-posix-path "^1.0.0"
+    matcher-collection "^1.0.0"
+
+walk-sync@^0.3.0, walk-sync@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/walk-sync/-/walk-sync-0.3.1.tgz#558a16aeac8c0db59c028b73c66f397684ece465"
+  dependencies:
+    ensure-posix-path "^1.0.0"
+    matcher-collection "^1.0.0"
+
+walker@~1.0.5:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/walker/-/walker-1.0.7.tgz#2f7f9b8fd10d677262b18a884e28d19618e028fb"
+  dependencies:
+    makeerror "1.0.x"
+
+watch@~0.10.0:
+  version "0.10.0"
+  resolved "https://registry.yarnpkg.com/watch/-/watch-0.10.0.tgz#77798b2da0f9910d595f1ace5b0c2258521f21dc"
+
+wcwidth@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/wcwidth/-/wcwidth-1.0.1.tgz#f0b0dcf915bc5ff1528afadb2c0e17b532da2fe8"
+  dependencies:
+    defaults "^1.0.3"
+
+websocket-driver@>=0.5.1:
+  version "0.6.5"
+  resolved "https://registry.yarnpkg.com/websocket-driver/-/websocket-driver-0.6.5.tgz#5cb2556ceb85f4373c6d8238aa691c8454e13a36"
+  dependencies:
+    websocket-extensions ">=0.1.1"
+
+websocket-extensions@>=0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/websocket-extensions/-/websocket-extensions-0.1.1.tgz#76899499c184b6ef754377c2dbb0cd6cb55d29e7"
+
+which-module@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/which-module/-/which-module-1.0.0.tgz#bba63ca861948994ff307736089e3b96026c2a4f"
+
+which@1, which@^1.2.12, which@^1.2.9, which@~1.2.8:
+  version "1.2.14"
+  resolved "https://registry.yarnpkg.com/which/-/which-1.2.14.tgz#9a87c4378f03e827cecaf1acdf56c736c01c14e5"
+  dependencies:
+    isexe "^2.0.0"
+
+wide-align@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.0.tgz#40edde802a71fea1f070da3e62dcda2e7add96ad"
+  dependencies:
+    string-width "^1.0.1"
+
+window-size@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.0.tgz#5438cd2ea93b202efa3a19fe8887aee7c94f9c9d"
+
+window-size@^0.1.2:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.4.tgz#f8e1aa1ee5a53ec5bf151ffa09742a6ad7697876"
+
+wordwrap@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.2.tgz#b79669bb42ecb409f83d583cad52ca17eaa1643f"
+
+wordwrap@~0.0.2:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.3.tgz#a3d5da6cd5c0bc0008d37234bbaf1bed63059107"
+
+wrap-ansi@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-2.1.0.tgz#d8fc3d284dd05794fe84973caecdd1cf824fdd85"
+  dependencies:
+    string-width "^1.0.1"
+    strip-ansi "^3.0.1"
+
+wrappy@1, wrappy@~1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f"
+
+write-file-atomic@^1.1.2:
+  version "1.3.4"
+  resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-1.3.4.tgz#f807a4f0b1d9e913ae7a48112e6cc3af1991b45f"
+  dependencies:
+    graceful-fs "^4.1.11"
+    imurmurhash "^0.1.4"
+    slide "^1.1.5"
+
+write-file-atomic@~1.1.4:
+  version "1.1.4"
+  resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-1.1.4.tgz#b1f52dc2e8dc0e3cb04d187a25f758a38a90ca3b"
+  dependencies:
+    graceful-fs "^4.1.2"
+    imurmurhash "^0.1.4"
+    slide "^1.1.5"
+
+ws@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ws/-/ws-1.1.1.tgz#082ddb6c641e85d4bb451f03d52f06eabdb1f018"
+  dependencies:
+    options ">=0.0.5"
+    ultron "1.0.x"
+
+wtf-8@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/wtf-8/-/wtf-8-1.0.0.tgz#392d8ba2d0f1c34d1ee2d630f15d0efb68e1048a"
+
+xdg-basedir@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-2.0.0.tgz#edbc903cc385fc04523d966a335504b5504d1bd2"
+  dependencies:
+    os-homedir "^1.0.0"
+
+xmldom@^0.1.19:
+  version "0.1.27"
+  resolved "https://registry.yarnpkg.com/xmldom/-/xmldom-0.1.27.tgz#d501f97b3bdb403af8ef9ecc20573187aadac0e9"
+
+xmlhttprequest-ssl@1.5.3:
+  version "1.5.3"
+  resolved "https://registry.yarnpkg.com/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.5.3.tgz#185a888c04eca46c3e4070d99f7b49de3528992d"
+
+xtend@^4.0.0:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.1.tgz#a5c6d532be656e23db820efb943a1f04998d63af"
+
+y18n@^3.2.0, y18n@^3.2.1:
+  version "3.2.1"
+  resolved "https://registry.yarnpkg.com/y18n/-/y18n-3.2.1.tgz#6d15fba884c08679c0d77e88e7759e811e07fa41"
+
+yallist@^2.0.0:
+  version "2.1.2"
+  resolved "https://registry.yarnpkg.com/yallist/-/yallist-2.1.2.tgz#1c11f9218f076089a47dd512f93c6699a6a81d52"
+
+yam@0.0.19:
+  version "0.0.19"
+  resolved "https://registry.yarnpkg.com/yam/-/yam-0.0.19.tgz#261760b01754a5c3731a22a8df9a8cd7c9cceda6"
+  dependencies:
+    findup "^0.1.5"
+    fs-extra "^0.26.6"
+    lodash.merge "^3.0.2"
+
+yargs-parser@^4.2.0:
+  version "4.2.1"
+  resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-4.2.1.tgz#29cceac0dc4f03c6c87b4a9f217dd18c9f74871c"
+  dependencies:
+    camelcase "^3.0.0"
+
+yargs@^6.6.0:
+  version "6.6.0"
+  resolved "https://registry.yarnpkg.com/yargs/-/yargs-6.6.0.tgz#782ec21ef403345f830a808ca3d513af56065208"
+  dependencies:
+    camelcase "^3.0.0"
+    cliui "^3.2.0"
+    decamelize "^1.1.1"
+    get-caller-file "^1.0.1"
+    os-locale "^1.4.0"
+    read-pkg-up "^1.0.1"
+    require-directory "^2.1.1"
+    require-main-filename "^1.0.1"
+    set-blocking "^2.0.0"
+    string-width "^1.0.2"
+    which-module "^1.0.0"
+    y18n "^3.2.1"
+    yargs-parser "^4.2.0"
+
+yargs@~3.10.0:
+  version "3.10.0"
+  resolved "https://registry.yarnpkg.com/yargs/-/yargs-3.10.0.tgz#f7ee7bd857dd7c1d2d38c0e74efbd681d1431fd1"
+  dependencies:
+    camelcase "^1.0.2"
+    cliui "^2.1.0"
+    decamelize "^1.0.0"
+    window-size "0.1.0"
+
+yargs@~3.27.0:
+  version "3.27.0"
+  resolved "https://registry.yarnpkg.com/yargs/-/yargs-3.27.0.tgz#21205469316e939131d59f2da0c6d7f98221ea40"
+  dependencies:
+    camelcase "^1.2.1"
+    cliui "^2.1.0"
+    decamelize "^1.0.0"
+    os-locale "^1.4.0"
+    window-size "^0.1.2"
+    y18n "^3.2.0"
+
+yeast@0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/yeast/-/yeast-0.1.2.tgz#008e06d8094320c372dbc2f8ed76a0ca6c8ac419"
diff --git a/contrib/views/hueambarimigration/pom.xml b/contrib/views/hueambarimigration/pom.xml
index 30e7694..ec24679 100644
--- a/contrib/views/hueambarimigration/pom.xml
+++ b/contrib/views/hueambarimigration/pom.xml
@@ -163,30 +163,36 @@
       <plugin>
         <groupId>com.github.eirslett</groupId>
         <artifactId>frontend-maven-plugin</artifactId>
-        <version>1.3</version>
+        <version>1.4</version>
         <configuration>
           <nodeVersion>v4.5.0</nodeVersion>
-          <npmVersion>2.15.0</npmVersion>
+          <yarnVersion>v0.23.2</yarnVersion>
           <workingDirectory>${project.basedir}/src/main/resources/ui/hueambarimigration-view/</workingDirectory>
           <npmInheritsProxyConfigFromMaven>false</npmInheritsProxyConfigFromMaven>
+          <!-- setting npm_config_tmp environment variable is a workaround for 
+               https://github.com/Medium/phantomjs/issues/673 -->
+          <environmentVariables>
+            <npm_config_tmp>/tmp/npm_config_tmp</npm_config_tmp>
+          </environmentVariables>
         </configuration>
         <executions>
           <execution>
-            <id>install node and npm</id>
+            <id>install node and yarn</id>
             <phase>generate-sources</phase>
             <goals>
-              <goal>install-node-and-npm</goal>
+              <goal>install-node-and-yarn</goal>
             </goals>
           </execution>
           <execution>
-            <id>npm install</id>
+            <id>yarn install --pure-lockfile</id>
             <phase>generate-sources</phase>
             <goals>
-              <goal>npm</goal>
+              <goal>yarn</goal>
             </goals>
             <configuration>
               <arguments>install --python="${project.basedir}/../src/main/unix/ambari-python-wrap" --unsafe-perm
               </arguments>
+              <arguments>--ignore-engines</arguments>
             </configuration>
           </execution>
         </executions>
diff --git a/contrib/views/hueambarimigration/src/main/resources/ui/hueambarimigration-view/package.json b/contrib/views/hueambarimigration/src/main/resources/ui/hueambarimigration-view/package.json
index 09c3a27..83d2342 100644
--- a/contrib/views/hueambarimigration/src/main/resources/ui/hueambarimigration-view/package.json
+++ b/contrib/views/hueambarimigration/src/main/resources/ui/hueambarimigration-view/package.json
@@ -11,7 +11,7 @@
     "start": "ember server",
     "build": "ember build",
     "test": "ember test",
-    "preinstall": "chmod +x node/node_modules/npm/bin/node-gyp-bin/node-gyp",
+    "preinstall": "",
     "postinstall": "node node_modules/.bin/bower --allow-root install"
 
   },
diff --git a/contrib/views/hueambarimigration/src/main/resources/ui/hueambarimigration-view/yarn.lock b/contrib/views/hueambarimigration/src/main/resources/ui/hueambarimigration-view/yarn.lock
new file mode 100644
index 0000000..6115cb9
--- /dev/null
+++ b/contrib/views/hueambarimigration/src/main/resources/ui/hueambarimigration-view/yarn.lock
@@ -0,0 +1,5553 @@
+# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
+# yarn lockfile v1
+
+
+abbrev@1, abbrev@^1.0.5:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.0.tgz#d0554c2256636e2f56e7c2e5ad183f859428d81f"
+
+abbrev@~1.0.7:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.0.9.tgz#91b4792588a7738c25f35dd6f63752a2f8776135"
+
+accepts@1.3.3, accepts@~1.3.3:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.3.tgz#c3ca7434938648c3e0d9c1e328dd68b622c284ca"
+  dependencies:
+    mime-types "~2.1.11"
+    negotiator "0.6.1"
+
+acorn@^4.0.3:
+  version "4.0.11"
+  resolved "https://registry.yarnpkg.com/acorn/-/acorn-4.0.11.tgz#edcda3bd937e7556410d42ed5860f67399c794c0"
+
+after@0.8.1:
+  version "0.8.1"
+  resolved "https://registry.yarnpkg.com/after/-/after-0.8.1.tgz#ab5d4fb883f596816d3515f8f791c0af486dd627"
+
+align-text@^0.1.1, align-text@^0.1.3:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/align-text/-/align-text-0.1.4.tgz#0cd90a561093f35d0a99256c22b7069433fad117"
+  dependencies:
+    kind-of "^3.0.2"
+    longest "^1.0.1"
+    repeat-string "^1.5.2"
+
+alter@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/alter/-/alter-0.2.0.tgz#c7588808617572034aae62480af26b1d4d1cb3cd"
+  dependencies:
+    stable "~0.1.3"
+
+amd-name-resolver@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/amd-name-resolver/-/amd-name-resolver-0.0.5.tgz#76962dac876ed3311b05d29c6a58c14e1ef3304b"
+  dependencies:
+    ensure-posix-path "^1.0.1"
+
+amdefine@>=0.0.4:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5"
+
+ansi-escapes@^1.1.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-1.4.0.tgz#d3a8a83b319aa67793662b13e761c7911422306e"
+
+ansi-regex@^0.2.0, ansi-regex@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-0.2.1.tgz#0d8e946967a3d8143f93e24e298525fc1b2235f9"
+
+ansi-regex@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-1.1.1.tgz#41c847194646375e6a1a5d10c3ca054ef9fc980d"
+
+ansi-regex@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df"
+
+ansi-styles@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-1.1.0.tgz#eaecbf66cd706882760b2f4691582b8f55d7a7de"
+
+ansi-styles@^2.1.0, ansi-styles@^2.2.1:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe"
+
+ansi-styles@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-1.0.0.tgz#cb102df1c56f5123eab8b67cd7b98027a0279178"
+
+ansi@^0.3.0, ansi@~0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/ansi/-/ansi-0.3.1.tgz#0c42d4fb17160d5a9af1e484bace1c66922c1b21"
+
+ansicolors@~0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/ansicolors/-/ansicolors-0.2.1.tgz#be089599097b74a5c9c4a84a0cdbcdb62bd87aef"
+
+ansicolors@~0.3.2:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/ansicolors/-/ansicolors-0.3.2.tgz#665597de86a9ffe3aa9bfbe6cae5c6ea426b4979"
+
+ansistyles@~0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/ansistyles/-/ansistyles-0.1.3.tgz#5de60415bda071bb37127854c864f41b23254539"
+
+anymatch@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-1.3.0.tgz#a3e52fa39168c825ff57b0248126ce5a8ff95507"
+  dependencies:
+    arrify "^1.0.0"
+    micromatch "^2.1.5"
+
+aproba@^1.0.3:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.1.1.tgz#95d3600f07710aa0e9298c726ad5ecf2eacbabab"
+
+archy@1.0.0, archy@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/archy/-/archy-1.0.0.tgz#f9c8c13757cc1dd7bc379ac77b2c62a5c2868c40"
+
+are-we-there-yet@~1.1.2:
+  version "1.1.4"
+  resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.4.tgz#bb5dca382bb94f05e15194373d16fd3ba1ca110d"
+  dependencies:
+    delegates "^1.0.0"
+    readable-stream "^2.0.6"
+
+argparse@^1.0.7, argparse@~1.0.2:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.9.tgz#73d83bc263f86e97f8cc4f6bae1b0e90a7d22c86"
+  dependencies:
+    sprintf-js "~1.0.2"
+
+arr-diff@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-2.0.0.tgz#8f3b827f955a8bd669697e4a4256ac3ceae356cf"
+  dependencies:
+    arr-flatten "^1.0.1"
+
+arr-flatten@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.0.3.tgz#a274ed85ac08849b6bd7847c4580745dc51adfb1"
+
+array-equal@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/array-equal/-/array-equal-1.0.0.tgz#8c2a5ef2472fd9ea742b04c77a75093ba2757c93"
+
+array-filter@~0.0.0:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/array-filter/-/array-filter-0.0.1.tgz#7da8cf2e26628ed732803581fd21f67cacd2eeec"
+
+array-flatten@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2"
+
+array-index@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/array-index/-/array-index-1.0.0.tgz#ec56a749ee103e4e08c790b9c353df16055b97f9"
+  dependencies:
+    debug "^2.2.0"
+    es6-symbol "^3.0.2"
+
+array-map@~0.0.0:
+  version "0.0.0"
+  resolved "https://registry.yarnpkg.com/array-map/-/array-map-0.0.0.tgz#88a2bab73d1cf7bcd5c1b118a003f66f665fa662"
+
+array-reduce@~0.0.0:
+  version "0.0.0"
+  resolved "https://registry.yarnpkg.com/array-reduce/-/array-reduce-0.0.0.tgz#173899d3ffd1c7d9383e4479525dbe278cab5f2b"
+
+array-to-error@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/array-to-error/-/array-to-error-1.1.1.tgz#d68812926d14097a205579a667eeaf1856a44c07"
+  dependencies:
+    array-to-sentence "^1.1.0"
+
+array-to-sentence@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/array-to-sentence/-/array-to-sentence-1.1.0.tgz#c804956dafa53232495b205a9452753a258d39fc"
+
+array-unique@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.2.1.tgz#a1d97ccafcbc2625cc70fadceb36a50c58b01a53"
+
+arraybuffer.slice@0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/arraybuffer.slice/-/arraybuffer.slice-0.0.6.tgz#f33b2159f0532a3f3107a272c0ccfbd1ad2979ca"
+
+arrify@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d"
+
+asap@^2.0.0:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/asap/-/asap-2.0.5.tgz#522765b50c3510490e52d7dcfe085ef9ba96958f"
+
+asn1@0.1.11:
+  version "0.1.11"
+  resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.1.11.tgz#559be18376d08a4ec4dbe80877d27818639b2df7"
+
+asn1@~0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.3.tgz#dac8787713c9966849fc8180777ebe9c1ddf3b86"
+
+assert-plus@1.0.0, assert-plus@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525"
+
+assert-plus@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.1.5.tgz#ee74009413002d84cec7219c6ac811812e723160"
+
+assert-plus@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.2.0.tgz#d74e1b87e7affc0db8aadb7021f3fe48101ab234"
+
+ast-traverse@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/ast-traverse/-/ast-traverse-0.1.1.tgz#69cf2b8386f19dcda1bb1e05d68fe359d8897de6"
+
+ast-types@0.8.12:
+  version "0.8.12"
+  resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.8.12.tgz#a0d90e4351bb887716c83fd637ebf818af4adfcc"
+
+ast-types@0.9.6:
+  version "0.9.6"
+  resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.9.6.tgz#102c9e9e9005d3e7e3829bf0c4fa24ee862ee9b9"
+
+async-disk-cache@^1.2.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/async-disk-cache/-/async-disk-cache-1.3.1.tgz#3394010d9448b16205b01e0e2e704180805413d3"
+  dependencies:
+    debug "^2.1.3"
+    heimdalljs "^0.2.3"
+    istextorbinary "2.1.0"
+    mkdirp "^0.5.0"
+    rimraf "^2.5.3"
+    rsvp "^3.0.18"
+
+async-some@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/async-some/-/async-some-1.0.2.tgz#4d8a81620d5958791b5b98f802d3207776e95509"
+  dependencies:
+    dezalgo "^1.0.2"
+
+async@^0.2.8, async@~0.2.6, async@~0.2.9:
+  version "0.2.10"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.2.10.tgz#b6bbe0b0674b9d719708ca38de8c237cb526c3d1"
+
+async@^1.4.0, async@^1.4.2, async@^1.5.2:
+  version "1.5.2"
+  resolved "https://registry.yarnpkg.com/async/-/async-1.5.2.tgz#ec6a61ae56480c0c3cb241c95618e20892f9672a"
+
+async@^2.0.1:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/async/-/async-2.3.0.tgz#1013d1051047dd320fe24e494d5c66ecaf6147d9"
+  dependencies:
+    lodash "^4.14.0"
+
+async@~0.9.0:
+  version "0.9.2"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.9.2.tgz#aea74d5e61c1f899613bf64bda66d4c78f2fd17d"
+
+aws-sign2@~0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.5.0.tgz#c57103f7a17fc037f02d7c2e64b602ea223f7d63"
+
+aws-sign2@~0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.6.0.tgz#14342dd38dbcc94d0e5b87d763cd63612c0e794f"
+
+aws4@^1.2.1:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.6.0.tgz#83ef5ca860b2b32e4a0deedee8c771b9db57471e"
+
+babel-core@^5.0.0:
+  version "5.8.38"
+  resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-5.8.38.tgz#1fcaee79d7e61b750b00b8e54f6dfc9d0af86558"
+  dependencies:
+    babel-plugin-constant-folding "^1.0.1"
+    babel-plugin-dead-code-elimination "^1.0.2"
+    babel-plugin-eval "^1.0.1"
+    babel-plugin-inline-environment-variables "^1.0.1"
+    babel-plugin-jscript "^1.0.4"
+    babel-plugin-member-expression-literals "^1.0.1"
+    babel-plugin-property-literals "^1.0.1"
+    babel-plugin-proto-to-assign "^1.0.3"
+    babel-plugin-react-constant-elements "^1.0.3"
+    babel-plugin-react-display-name "^1.0.3"
+    babel-plugin-remove-console "^1.0.1"
+    babel-plugin-remove-debugger "^1.0.1"
+    babel-plugin-runtime "^1.0.7"
+    babel-plugin-undeclared-variables-check "^1.0.2"
+    babel-plugin-undefined-to-void "^1.1.6"
+    babylon "^5.8.38"
+    bluebird "^2.9.33"
+    chalk "^1.0.0"
+    convert-source-map "^1.1.0"
+    core-js "^1.0.0"
+    debug "^2.1.1"
+    detect-indent "^3.0.0"
+    esutils "^2.0.0"
+    fs-readdir-recursive "^0.1.0"
+    globals "^6.4.0"
+    home-or-tmp "^1.0.0"
+    is-integer "^1.0.4"
+    js-tokens "1.0.1"
+    json5 "^0.4.0"
+    lodash "^3.10.0"
+    minimatch "^2.0.3"
+    output-file-sync "^1.1.0"
+    path-exists "^1.0.0"
+    path-is-absolute "^1.0.0"
+    private "^0.1.6"
+    regenerator "0.8.40"
+    regexpu "^1.3.0"
+    repeating "^1.1.2"
+    resolve "^1.1.6"
+    shebang-regex "^1.0.0"
+    slash "^1.0.0"
+    source-map "^0.5.0"
+    source-map-support "^0.2.10"
+    to-fast-properties "^1.0.0"
+    trim-right "^1.0.0"
+    try-resolve "^1.0.0"
+
+babel-plugin-constant-folding@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-constant-folding/-/babel-plugin-constant-folding-1.0.1.tgz#8361d364c98e449c3692bdba51eff0844290aa8e"
+
+babel-plugin-dead-code-elimination@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/babel-plugin-dead-code-elimination/-/babel-plugin-dead-code-elimination-1.0.2.tgz#5f7c451274dcd7cccdbfbb3e0b85dd28121f0f65"
+
+babel-plugin-eval@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-eval/-/babel-plugin-eval-1.0.1.tgz#a2faed25ce6be69ade4bfec263f70169195950da"
+
+babel-plugin-feature-flags@^0.2.1:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/babel-plugin-feature-flags/-/babel-plugin-feature-flags-0.2.3.tgz#81d81ed77bda2014098fa8243abcf03a551cbd4d"
+  dependencies:
+    json-stable-stringify "^1.0.1"
+
+babel-plugin-filter-imports@^0.2.0:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-filter-imports/-/babel-plugin-filter-imports-0.2.1.tgz#784f96a892f2f7ed2ccf0955688bd8916cd2e212"
+  dependencies:
+    json-stable-stringify "^1.0.1"
+
+babel-plugin-htmlbars-inline-precompile@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-htmlbars-inline-precompile/-/babel-plugin-htmlbars-inline-precompile-0.1.0.tgz#b784723bd1f108796b56faf9f1c05eb5ca442983"
+
+babel-plugin-inline-environment-variables@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-inline-environment-variables/-/babel-plugin-inline-environment-variables-1.0.1.tgz#1f58ce91207ad6a826a8bf645fafe68ff5fe3ffe"
+
+babel-plugin-jscript@^1.0.4:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/babel-plugin-jscript/-/babel-plugin-jscript-1.0.4.tgz#8f342c38276e87a47d5fa0a8bd3d5eb6ccad8fcc"
+
+babel-plugin-member-expression-literals@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-member-expression-literals/-/babel-plugin-member-expression-literals-1.0.1.tgz#cc5edb0faa8dc927170e74d6d1c02440021624d3"
+
+babel-plugin-property-literals@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-property-literals/-/babel-plugin-property-literals-1.0.1.tgz#0252301900192980b1c118efea48ce93aab83336"
+
+babel-plugin-proto-to-assign@^1.0.3:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/babel-plugin-proto-to-assign/-/babel-plugin-proto-to-assign-1.0.4.tgz#c49e7afd02f577bc4da05ea2df002250cf7cd123"
+  dependencies:
+    lodash "^3.9.3"
+
+babel-plugin-react-constant-elements@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/babel-plugin-react-constant-elements/-/babel-plugin-react-constant-elements-1.0.3.tgz#946736e8378429cbc349dcff62f51c143b34e35a"
+
+babel-plugin-react-display-name@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/babel-plugin-react-display-name/-/babel-plugin-react-display-name-1.0.3.tgz#754fe38926e8424a4e7b15ab6ea6139dee0514fc"
+
+babel-plugin-remove-console@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-remove-console/-/babel-plugin-remove-console-1.0.1.tgz#d8f24556c3a05005d42aaaafd27787f53ff013a7"
+
+babel-plugin-remove-debugger@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-remove-debugger/-/babel-plugin-remove-debugger-1.0.1.tgz#fd2ea3cd61a428ad1f3b9c89882ff4293e8c14c7"
+
+babel-plugin-runtime@^1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/babel-plugin-runtime/-/babel-plugin-runtime-1.0.7.tgz#bf7c7d966dd56ecd5c17fa1cb253c9acb7e54aaf"
+
+babel-plugin-undeclared-variables-check@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/babel-plugin-undeclared-variables-check/-/babel-plugin-undeclared-variables-check-1.0.2.tgz#5cf1aa539d813ff64e99641290af620965f65dee"
+  dependencies:
+    leven "^1.0.2"
+
+babel-plugin-undefined-to-void@^1.1.6:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/babel-plugin-undefined-to-void/-/babel-plugin-undefined-to-void-1.1.6.tgz#7f578ef8b78dfae6003385d8417a61eda06e2f81"
+
+babylon@^5.8.38:
+  version "5.8.38"
+  resolved "https://registry.yarnpkg.com/babylon/-/babylon-5.8.38.tgz#ec9b120b11bf6ccd4173a18bf217e60b79859ffd"
+
+backbone@^1.1.2:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/backbone/-/backbone-1.3.3.tgz#4cc80ea7cb1631ac474889ce40f2f8bc683b2999"
+  dependencies:
+    underscore ">=1.8.3"
+
+backo2@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/backo2/-/backo2-1.0.2.tgz#31ab1ac8b129363463e35b3ebb69f4dfcfba7947"
+
+balanced-match@^0.4.1:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-0.4.2.tgz#cb3f3e3c732dc0f01ee70b403f302e61d7709838"
+
+base64-arraybuffer@0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/base64-arraybuffer/-/base64-arraybuffer-0.1.5.tgz#73926771923b5a19747ad666aa5cd4bf9c6e9ce8"
+
+base64id@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/base64id/-/base64id-0.1.0.tgz#02ce0fdeee0cef4f40080e1e73e834f0b1bfce3f"
+
+basic-auth@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/basic-auth/-/basic-auth-1.1.0.tgz#45221ee429f7ee1e5035be3f51533f1cdfd29884"
+
+bcrypt-pbkdf@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.1.tgz#63bc5dcb61331b92bc05fd528953c33462a06f8d"
+  dependencies:
+    tweetnacl "^0.14.3"
+
+better-assert@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/better-assert/-/better-assert-1.0.2.tgz#40866b9e1b9e0b55b481894311e68faffaebc522"
+  dependencies:
+    callsite "1.0.0"
+
+binary@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/binary/-/binary-0.3.0.tgz#9f60553bc5ce8c3386f3b553cff47462adecaa79"
+  dependencies:
+    buffers "~0.1.1"
+    chainsaw "~0.1.0"
+
+"binaryextensions@1 || 2":
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/binaryextensions/-/binaryextensions-2.0.0.tgz#e597d1a7a6a3558a2d1c7241a16c99965e6aa40f"
+
+bl@^1.0.0, bl@~1.0.0:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/bl/-/bl-1.0.3.tgz#fc5421a28fd4226036c3b3891a66a25bc64d226e"
+  dependencies:
+    readable-stream "~2.0.5"
+
+bl@~0.9.0:
+  version "0.9.5"
+  resolved "https://registry.yarnpkg.com/bl/-/bl-0.9.5.tgz#c06b797af085ea00bc527afc8efcf11de2232054"
+  dependencies:
+    readable-stream "~1.0.26"
+
+blank-object@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/blank-object/-/blank-object-1.0.2.tgz#f990793fbe9a8c8dd013fb3219420bec81d5f4b9"
+
+blob@0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/blob/-/blob-0.0.4.tgz#bcf13052ca54463f30f9fc7e95b9a47630a94921"
+
+block-stream@*, block-stream@0.0.8:
+  version "0.0.8"
+  resolved "https://registry.yarnpkg.com/block-stream/-/block-stream-0.0.8.tgz#0688f46da2bbf9cff0c4f68225a0cb95cbe8a46b"
+  dependencies:
+    inherits "~2.0.0"
+
+bluebird@^2.9.33:
+  version "2.11.0"
+  resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-2.11.0.tgz#534b9033c022c9579c56ba3b3e5a5caafbb650e1"
+
+bluebird@^3.1.1, bluebird@^3.4.6:
+  version "3.5.0"
+  resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.5.0.tgz#791420d7f551eea2897453a8a77653f96606d67c"
+
+body-parser@~1.14.0:
+  version "1.14.2"
+  resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.14.2.tgz#1015cb1fe2c443858259581db53332f8d0cf50f9"
+  dependencies:
+    bytes "2.2.0"
+    content-type "~1.0.1"
+    debug "~2.2.0"
+    depd "~1.1.0"
+    http-errors "~1.3.1"
+    iconv-lite "0.4.13"
+    on-finished "~2.3.0"
+    qs "5.2.0"
+    raw-body "~2.1.5"
+    type-is "~1.6.10"
+
+boom@2.x.x:
+  version "2.10.1"
+  resolved "https://registry.yarnpkg.com/boom/-/boom-2.10.1.tgz#39c8918ceff5799f83f9492a848f625add0c766f"
+  dependencies:
+    hoek "2.x.x"
+
+bower-config@^1.3.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/bower-config/-/bower-config-1.4.0.tgz#16c38c1135f8071c19f25938d61b0d8cbf18d3f1"
+  dependencies:
+    graceful-fs "^4.1.3"
+    mout "^1.0.0"
+    optimist "^0.6.1"
+    osenv "^0.1.3"
+    untildify "^2.1.0"
+
+bower-endpoint-parser@0.2.2, bower-endpoint-parser@^0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/bower-endpoint-parser/-/bower-endpoint-parser-0.2.2.tgz#00b565adbfab6f2d35addde977e97962acbcb3f6"
+
+bower-json@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/bower-json/-/bower-json-0.4.0.tgz#a99c3ccf416ef0590ed0ded252c760f1c6d93766"
+  dependencies:
+    deep-extend "~0.2.5"
+    graceful-fs "~2.0.0"
+    intersect "~0.0.3"
+
+bower-logger@^0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/bower-logger/-/bower-logger-0.2.2.tgz#39be07e979b2fc8e03a94634205ed9422373d381"
+
+bower-registry-client@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/bower-registry-client/-/bower-registry-client-1.0.0.tgz#697c3499067549a106b49f26d03e6dd1017a9241"
+  dependencies:
+    async "^0.2.8"
+    graceful-fs "^4.0.0"
+    lru-cache "^2.3.0"
+    mkdirp "^0.3.5"
+    request "^2.51.0"
+    request-replay "^0.2.0"
+    rimraf "^2.2.0"
+
+bower@1.7.2, bower@^1.3.12:
+  version "1.7.2"
+  resolved "https://registry.yarnpkg.com/bower/-/bower-1.7.2.tgz#b04228f9970f11777017e64ae39d71f9346c9575"
+  dependencies:
+    abbrev "^1.0.5"
+    archy "1.0.0"
+    bower-config "^1.3.0"
+    bower-endpoint-parser "^0.2.2"
+    bower-json "^0.4.0"
+    bower-logger "^0.2.2"
+    bower-registry-client "^1.0.0"
+    cardinal "0.4.4"
+    chalk "^1.0.0"
+    chmodr "^1.0.2"
+    configstore "^0.3.2"
+    decompress-zip "^0.1.0"
+    destroy "^1.0.3"
+    fs-write-stream-atomic "1.0.5"
+    fstream "^1.0.3"
+    fstream-ignore "^1.0.2"
+    github "^0.2.3"
+    glob "^4.3.2"
+    graceful-fs "^3.0.5"
+    handlebars "^2.0.0"
+    inquirer "0.10.0"
+    insight "^0.7.0"
+    is-root "^1.0.0"
+    junk "^1.0.0"
+    lockfile "^1.0.0"
+    lru-cache "^2.5.0"
+    md5-hex "^1.0.2"
+    mkdirp "0.5.0"
+    mout "^0.11.0"
+    nopt "^3.0.1"
+    opn "^1.0.1"
+    p-throttler "0.1.1"
+    promptly "0.2.0"
+    q "^1.1.2"
+    request "2.53.0"
+    request-progress "0.3.1"
+    retry "0.6.1"
+    rimraf "^2.2.8"
+    semver "^2.3.0"
+    semver-utils "^1.1.1"
+    shell-quote "^1.4.2"
+    stringify-object "^1.0.0"
+    tar-fs "^1.4.1"
+    tmp "0.0.24"
+    update-notifier "^0.6.0"
+    user-home "^1.1.0"
+    which "^1.0.8"
+
+boxen@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/boxen/-/boxen-0.3.1.tgz#a7d898243ae622f7abb6bb604d740a76c6a5461b"
+  dependencies:
+    chalk "^1.1.1"
+    filled-array "^1.0.0"
+    object-assign "^4.0.1"
+    repeating "^2.0.0"
+    string-width "^1.0.1"
+    widest-line "^1.0.0"
+
+brace-expansion@^1.0.0:
+  version "1.1.7"
+  resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.7.tgz#3effc3c50e000531fb720eaff80f0ae8ef23cf59"
+  dependencies:
+    balanced-match "^0.4.1"
+    concat-map "0.0.1"
+
+braces@^1.8.2:
+  version "1.8.5"
+  resolved "https://registry.yarnpkg.com/braces/-/braces-1.8.5.tgz#ba77962e12dff969d6b76711e914b737857bf6a7"
+  dependencies:
+    expand-range "^1.8.1"
+    preserve "^0.2.0"
+    repeat-element "^1.1.2"
+
+breakable@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/breakable/-/breakable-1.0.0.tgz#784a797915a38ead27bad456b5572cb4bbaa78c1"
+
+broccoli-asset-rev@^2.4.2:
+  version "2.5.0"
+  resolved "https://registry.yarnpkg.com/broccoli-asset-rev/-/broccoli-asset-rev-2.5.0.tgz#f5f66eac962bf9f086286921f0eaeaab6d00d819"
+  dependencies:
+    broccoli-asset-rewrite "^1.1.0"
+    broccoli-filter "^1.2.2"
+    json-stable-stringify "^1.0.0"
+    matcher-collection "^1.0.1"
+    rsvp "^3.0.6"
+
+broccoli-asset-rewrite@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-asset-rewrite/-/broccoli-asset-rewrite-1.1.0.tgz#77a5da56157aa318c59113245e8bafb4617f8830"
+  dependencies:
+    broccoli-filter "^1.2.3"
+
+broccoli-babel-transpiler@^5.4.5, broccoli-babel-transpiler@^5.5.0, broccoli-babel-transpiler@^5.6.2:
+  version "5.6.2"
+  resolved "https://registry.yarnpkg.com/broccoli-babel-transpiler/-/broccoli-babel-transpiler-5.6.2.tgz#958c72e43575b2f0a862a5096dba1ce1ebc7d74d"
+  dependencies:
+    babel-core "^5.0.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-persistent-filter "^1.0.1"
+    clone "^0.2.0"
+    hash-for-dep "^1.0.2"
+    json-stable-stringify "^1.0.0"
+
+broccoli-caching-writer@^2.0.4, broccoli-caching-writer@^2.2.0, broccoli-caching-writer@^2.3.1:
+  version "2.3.1"
+  resolved "https://registry.yarnpkg.com/broccoli-caching-writer/-/broccoli-caching-writer-2.3.1.tgz#b93cf58f9264f003075868db05774f4e7f25bd07"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.2.5"
+    broccoli-plugin "1.1.0"
+    debug "^2.1.1"
+    rimraf "^2.2.8"
+    rsvp "^3.0.17"
+    walk-sync "^0.2.5"
+
+broccoli-clean-css@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-clean-css/-/broccoli-clean-css-1.1.0.tgz#9db143d9af7e0ae79c26e3ac5a9bb2d720ea19fa"
+  dependencies:
+    broccoli-persistent-filter "^1.1.6"
+    clean-css-promise "^0.1.0"
+    inline-source-map-comment "^1.0.5"
+    json-stable-stringify "^1.0.0"
+
+broccoli-concat@^2.0.4, broccoli-concat@^2.2.0:
+  version "2.3.8"
+  resolved "https://registry.yarnpkg.com/broccoli-concat/-/broccoli-concat-2.3.8.tgz#590cdcc021bb905b6c121d87c2d1d57df44a2a48"
+  dependencies:
+    broccoli-caching-writer "^2.3.1"
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-stew "^1.3.3"
+    fast-sourcemap-concat "^1.0.1"
+    fs-extra "^0.30.0"
+    lodash.merge "^4.3.0"
+    lodash.omit "^4.1.0"
+    lodash.uniq "^4.2.0"
+
+broccoli-config-loader@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/broccoli-config-loader/-/broccoli-config-loader-1.0.0.tgz#c3cf5ecfaffc04338c6f1d5d38dc36baeaa131ba"
+  dependencies:
+    broccoli-caching-writer "^2.0.4"
+
+broccoli-config-replace@^1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/broccoli-config-replace/-/broccoli-config-replace-1.1.2.tgz#6ea879d92a5bad634d11329b51fc5f4aafda9c00"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-plugin "^1.2.0"
+    debug "^2.2.0"
+    fs-extra "^0.24.0"
+
+broccoli-file-creator@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/broccoli-file-creator/-/broccoli-file-creator-1.1.1.tgz#1b35b67d215abdfadd8d49eeb69493c39e6c3450"
+  dependencies:
+    broccoli-kitchen-sink-helpers "~0.2.0"
+    broccoli-plugin "^1.1.0"
+    broccoli-writer "~0.1.1"
+    mkdirp "^0.5.1"
+    rsvp "~3.0.6"
+    symlink-or-copy "^1.0.1"
+
+broccoli-filter@^1.2.2, broccoli-filter@^1.2.3:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/broccoli-filter/-/broccoli-filter-1.2.4.tgz#409afb94b9a3a6da9fac8134e91e205f40cc7330"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-plugin "^1.0.0"
+    copy-dereference "^1.0.0"
+    debug "^2.2.0"
+    mkdirp "^0.5.1"
+    promise-map-series "^0.2.1"
+    rsvp "^3.0.18"
+    symlink-or-copy "^1.0.1"
+    walk-sync "^0.3.1"
+
+broccoli-funnel-reducer@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/broccoli-funnel-reducer/-/broccoli-funnel-reducer-1.0.0.tgz#11365b2a785aec9b17972a36df87eef24c5cc0ea"
+
+broccoli-funnel@^1.0.0, broccoli-funnel@^1.0.1:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/broccoli-funnel/-/broccoli-funnel-1.2.0.tgz#cddc3afc5ff1685a8023488fff74ce6fb5a51296"
+  dependencies:
+    array-equal "^1.0.0"
+    blank-object "^1.0.1"
+    broccoli-plugin "^1.3.0"
+    debug "^2.2.0"
+    exists-sync "0.0.4"
+    fast-ordered-set "^1.0.0"
+    fs-tree-diff "^0.5.3"
+    heimdalljs "^0.2.0"
+    minimatch "^3.0.0"
+    mkdirp "^0.5.0"
+    path-posix "^1.0.0"
+    rimraf "^2.4.3"
+    symlink-or-copy "^1.0.0"
+    walk-sync "^0.3.1"
+
+broccoli-jshint@^1.0.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/broccoli-jshint/-/broccoli-jshint-1.2.0.tgz#8cd565d11a04bfd32cb8f85a0f7ede1e5be7a6a2"
+  dependencies:
+    broccoli-persistent-filter "^1.2.0"
+    chalk "~0.4.0"
+    findup-sync "^0.3.0"
+    jshint "^2.7.0"
+    json-stable-stringify "^1.0.0"
+    mkdirp "~0.4.0"
+
+broccoli-kitchen-sink-helpers@^0.2.5, broccoli-kitchen-sink-helpers@~0.2.0:
+  version "0.2.9"
+  resolved "https://registry.yarnpkg.com/broccoli-kitchen-sink-helpers/-/broccoli-kitchen-sink-helpers-0.2.9.tgz#a5e0986ed8d76fb5984b68c3f0450d3a96e36ecc"
+  dependencies:
+    glob "^5.0.10"
+    mkdirp "^0.5.1"
+
+broccoli-kitchen-sink-helpers@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/broccoli-kitchen-sink-helpers/-/broccoli-kitchen-sink-helpers-0.3.1.tgz#77c7c18194b9664163ec4fcee2793444926e0c06"
+  dependencies:
+    glob "^5.0.10"
+    mkdirp "^0.5.1"
+
+broccoli-merge-trees@^1.0.0, broccoli-merge-trees@^1.1.0:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/broccoli-merge-trees/-/broccoli-merge-trees-1.2.4.tgz#a001519bb5067f06589d91afa2942445a2d0fdb5"
+  dependencies:
+    broccoli-plugin "^1.3.0"
+    can-symlink "^1.0.0"
+    fast-ordered-set "^1.0.2"
+    fs-tree-diff "^0.5.4"
+    heimdalljs "^0.2.1"
+    heimdalljs-logger "^0.1.7"
+    rimraf "^2.4.3"
+    symlink-or-copy "^1.0.0"
+
+broccoli-persistent-filter@^1.0.1, broccoli-persistent-filter@^1.0.3, broccoli-persistent-filter@^1.1.6, broccoli-persistent-filter@^1.2.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/broccoli-persistent-filter/-/broccoli-persistent-filter-1.3.1.tgz#d02556a135c77dfb859bba7844bc3539be7168e1"
+  dependencies:
+    async-disk-cache "^1.2.1"
+    broccoli-plugin "^1.0.0"
+    fs-tree-diff "^0.5.2"
+    hash-for-dep "^1.0.2"
+    heimdalljs "^0.2.1"
+    heimdalljs-logger "^0.1.7"
+    md5-hex "^1.0.2"
+    mkdirp "^0.5.1"
+    promise-map-series "^0.2.1"
+    rimraf "^2.6.1"
+    rsvp "^3.0.18"
+    symlink-or-copy "^1.0.1"
+    walk-sync "^0.3.1"
+
+broccoli-plugin@1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-plugin/-/broccoli-plugin-1.1.0.tgz#73e2cfa05f8ea1e3fc1420c40c3d9e7dc724bf02"
+  dependencies:
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.3"
+    rimraf "^2.3.4"
+    symlink-or-copy "^1.0.1"
+
+broccoli-plugin@^1.0.0, broccoli-plugin@^1.1.0, broccoli-plugin@^1.2.0, broccoli-plugin@^1.2.1, broccoli-plugin@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/broccoli-plugin/-/broccoli-plugin-1.3.0.tgz#bee704a8e42da08cb58e513aaa436efb7f0ef1ee"
+  dependencies:
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.3"
+    rimraf "^2.3.4"
+    symlink-or-copy "^1.1.8"
+
+broccoli-sane-watcher@^1.1.1:
+  version "1.1.5"
+  resolved "https://registry.yarnpkg.com/broccoli-sane-watcher/-/broccoli-sane-watcher-1.1.5.tgz#f2b0af9cf0afb74c7a49cd88eb11c6869ee8c0c0"
+  dependencies:
+    broccoli-slow-trees "^1.1.0"
+    debug "^2.1.0"
+    rsvp "^3.0.18"
+    sane "^1.1.1"
+
+broccoli-slow-trees@^1.0.0, broccoli-slow-trees@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-slow-trees/-/broccoli-slow-trees-1.1.0.tgz#426c5724e008107e4573f73e8a9ca702916b78f7"
+
+broccoli-source@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-source/-/broccoli-source-1.1.0.tgz#54f0e82c8b73f46580cbbc4f578f0b32fca8f809"
+
+broccoli-sri-hash@^2.1.0:
+  version "2.1.2"
+  resolved "https://registry.yarnpkg.com/broccoli-sri-hash/-/broccoli-sri-hash-2.1.2.tgz#bc69905ed7a381ad325cc0d02ded071328ebf3f3"
+  dependencies:
+    broccoli-caching-writer "^2.2.0"
+    mkdirp "^0.5.1"
+    rsvp "^3.1.0"
+    sri-toolbox "^0.2.0"
+    symlink-or-copy "^1.0.1"
+
+broccoli-stew@^1.3.3:
+  version "1.4.2"
+  resolved "https://registry.yarnpkg.com/broccoli-stew/-/broccoli-stew-1.4.2.tgz#9ec4062fd7162c6026561a2fbf64558363aff8d6"
+  dependencies:
+    broccoli-funnel "^1.0.1"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-persistent-filter "^1.1.6"
+    broccoli-plugin "^1.3.0"
+    chalk "^1.1.3"
+    debug "^2.4.0"
+    ensure-posix-path "^1.0.1"
+    fs-extra "^2.0.0"
+    minimatch "^3.0.2"
+    resolve "^1.1.6"
+    rsvp "^3.0.16"
+    sanitize-filename "^1.5.3"
+    symlink-or-copy "^1.1.8"
+    walk-sync "^0.3.0"
+
+broccoli-uglify-sourcemap@^1.0.0:
+  version "1.5.2"
+  resolved "https://registry.yarnpkg.com/broccoli-uglify-sourcemap/-/broccoli-uglify-sourcemap-1.5.2.tgz#04f84ab0db539031fa868ccfa563c9932d50cedb"
+  dependencies:
+    broccoli-plugin "^1.2.1"
+    debug "^2.2.0"
+    lodash.merge "^4.5.1"
+    matcher-collection "^1.0.0"
+    mkdirp "^0.5.0"
+    source-map-url "^0.3.0"
+    symlink-or-copy "^1.0.1"
+    uglify-js "^2.7.0"
+    walk-sync "^0.1.3"
+
+broccoli-viz@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/broccoli-viz/-/broccoli-viz-2.0.1.tgz#3f3ed2fb83e368aa5306fae460801dea552e40db"
+
+broccoli-writer@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/broccoli-writer/-/broccoli-writer-0.1.1.tgz#d4d71aa8f2afbc67a3866b91a2da79084b96ab2d"
+  dependencies:
+    quick-temp "^0.1.0"
+    rsvp "^3.0.6"
+
+bser@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/bser/-/bser-1.0.2.tgz#381116970b2a6deea5646dd15dd7278444b56169"
+  dependencies:
+    node-int64 "^0.4.0"
+
+buffer-shims@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/buffer-shims/-/buffer-shims-1.0.0.tgz#9978ce317388c649ad8793028c3477ef044a8b51"
+
+buffers@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/buffers/-/buffers-0.1.1.tgz#b24579c3bed4d6d396aeee6d9a8ae7f5482ab7bb"
+
+builtin-modules@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/builtin-modules/-/builtin-modules-1.1.1.tgz#270f076c5a72c02f5b65a47df94c5fe3a278892f"
+
+builtins@0.0.7:
+  version "0.0.7"
+  resolved "https://registry.yarnpkg.com/builtins/-/builtins-0.0.7.tgz#355219cd6cf18dbe7c01cc7fd2dce765cfdc549a"
+
+builtins@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/builtins/-/builtins-1.0.3.tgz#cb94faeb61c8696451db36534e1422f94f0aee88"
+
+bytes@2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.2.0.tgz#fd35464a403f6f9117c2de3609ecff9cae000588"
+
+bytes@2.3.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.3.0.tgz#d5b680a165b6201739acb611542aabc2d8ceb070"
+
+bytes@2.4.0:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.4.0.tgz#7d97196f9d5baf7f6935e25985549edd2a6c2339"
+
+callsite@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/callsite/-/callsite-1.0.0.tgz#280398e5d664bd74038b6f0905153e6e8af1bc20"
+
+camelcase@^1.0.2, camelcase@^1.2.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-1.2.1.tgz#9bb5304d2e0b56698b2c758b08a3eaa9daa58a39"
+
+can-symlink@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/can-symlink/-/can-symlink-1.0.0.tgz#97b607d8a84bb6c6e228b902d864ecb594b9d219"
+  dependencies:
+    tmp "0.0.28"
+
+capture-stack-trace@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/capture-stack-trace/-/capture-stack-trace-1.0.0.tgz#4a6fa07399c26bba47f0b2496b4d0fb408c5550d"
+
+cardinal@0.4.4:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/cardinal/-/cardinal-0.4.4.tgz#ca5bb68a5b511b90fe93b9acea49bdee5c32bfe2"
+  dependencies:
+    ansicolors "~0.2.1"
+    redeyed "~0.4.0"
+
+cardinal@^0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/cardinal/-/cardinal-0.5.0.tgz#00d5f661dbd4aabfdf7d41ce48a5a59bca35a291"
+  dependencies:
+    ansicolors "~0.2.1"
+    redeyed "~0.5.0"
+
+caseless@~0.11.0:
+  version "0.11.0"
+  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.11.0.tgz#715b96ea9841593cc33067923f5ec60ebda4f7d7"
+
+caseless@~0.9.0:
+  version "0.9.0"
+  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.9.0.tgz#b7b65ce6bf1413886539cfd533f0b30effa9cf88"
+
+center-align@^0.1.1:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/center-align/-/center-align-0.1.3.tgz#aa0d32629b6ee972200411cbd4461c907bc2b7ad"
+  dependencies:
+    align-text "^0.1.3"
+    lazy-cache "^1.0.3"
+
+chainsaw@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/chainsaw/-/chainsaw-0.1.0.tgz#5eab50b28afe58074d0d58291388828b5e5fbc98"
+  dependencies:
+    traverse ">=0.3.0 <0.4"
+
+chalk@^0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-0.5.1.tgz#663b3a648b68b55d04690d49167aa837858f2174"
+  dependencies:
+    ansi-styles "^1.1.0"
+    escape-string-regexp "^1.0.0"
+    has-ansi "^0.1.0"
+    strip-ansi "^0.3.0"
+    supports-color "^0.2.0"
+
+chalk@^1.0.0, chalk@^1.1.1, chalk@^1.1.3:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98"
+  dependencies:
+    ansi-styles "^2.2.1"
+    escape-string-regexp "^1.0.2"
+    has-ansi "^2.0.0"
+    strip-ansi "^3.0.0"
+    supports-color "^2.0.0"
+
+chalk@~0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-0.4.0.tgz#5199a3ddcd0c1efe23bc08c1b027b06176e0c64f"
+  dependencies:
+    ansi-styles "~1.0.0"
+    has-color "~0.1.0"
+    strip-ansi "~0.1.0"
+
+char-spinner@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/char-spinner/-/char-spinner-1.0.1.tgz#e6ea67bd247e107112983b7ab0479ed362800081"
+
+charm@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/charm/-/charm-1.0.2.tgz#8add367153a6d9a581331052c4090991da995e35"
+  dependencies:
+    inherits "^2.0.1"
+
+chmodr@^1.0.2, chmodr@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/chmodr/-/chmodr-1.0.2.tgz#04662b932d0f02ec66deaa2b0ea42811968e3eb9"
+
+chownr@^1.0.1, chownr@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.0.1.tgz#e2a75042a9551908bebd25b8523d5f9769d79181"
+
+clean-base-url@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/clean-base-url/-/clean-base-url-1.0.0.tgz#c901cf0a20b972435b0eccd52d056824a4351b7b"
+
+clean-css-promise@^0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/clean-css-promise/-/clean-css-promise-0.1.1.tgz#43f3d2c8dfcb2bf071481252cd9b76433c08eecb"
+  dependencies:
+    array-to-error "^1.0.0"
+    clean-css "^3.4.5"
+    pinkie-promise "^2.0.0"
+
+clean-css@^3.4.5:
+  version "3.4.25"
+  resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-3.4.25.tgz#9e9a52d5c1e6bc5123e1b2783fa65fe958946ede"
+  dependencies:
+    commander "2.8.x"
+    source-map "0.4.x"
+
+cli-cursor@^1.0.1, cli-cursor@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/cli-cursor/-/cli-cursor-1.0.2.tgz#64da3f7d56a54412e59794bd62dc35295e8f2987"
+  dependencies:
+    restore-cursor "^1.0.1"
+
+cli-spinners@^0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/cli-spinners/-/cli-spinners-0.1.2.tgz#bb764d88e185fb9e1e6a2a1f19772318f605e31c"
+
+cli-table@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/cli-table/-/cli-table-0.3.1.tgz#f53b05266a8b1a0b934b3d0821e6e2dc5914ae23"
+  dependencies:
+    colors "1.0.3"
+
+cli-width@^1.0.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/cli-width/-/cli-width-1.1.1.tgz#a4d293ef67ebb7b88d4a4d42c0ccf00c4d1e366d"
+
+cli-width@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/cli-width/-/cli-width-2.1.0.tgz#b234ca209b29ef66fc518d9b98d5847b00edf00a"
+
+cli@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/cli/-/cli-1.0.1.tgz#22817534f24bfa4950c34d532d48ecbc621b8c14"
+  dependencies:
+    exit "0.1.2"
+    glob "^7.1.1"
+
+cliui@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/cliui/-/cliui-2.1.0.tgz#4b475760ff80264c762c3a1719032e91c7fea0d1"
+  dependencies:
+    center-align "^0.1.1"
+    right-align "^0.1.1"
+    wordwrap "0.0.2"
+
+clone@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/clone/-/clone-0.2.0.tgz#c6126a90ad4f72dbf5acdb243cc37724fe93fc1f"
+
+clone@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/clone/-/clone-1.0.2.tgz#260b7a99ebb1edfe247538175f783243cb19d149"
+
+clone@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/clone/-/clone-2.1.1.tgz#d217d1e961118e3ac9a4b8bba3285553bf647cdb"
+
+cmd-shim@~2.0.2:
+  version "2.0.2"
+  resolved "https://registry.yarnpkg.com/cmd-shim/-/cmd-shim-2.0.2.tgz#6fcbda99483a8fd15d7d30a196ca69d688a2efdb"
+  dependencies:
+    graceful-fs "^4.1.2"
+    mkdirp "~0.5.0"
+
+code-point-at@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77"
+
+colors@1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/colors/-/colors-1.0.3.tgz#0433f44d809680fdeb60ed260f1b0c262e82a40b"
+
+colors@~0.6.0-1:
+  version "0.6.2"
+  resolved "https://registry.yarnpkg.com/colors/-/colors-0.6.2.tgz#2423fe6678ac0c5dae8852e5d0e5be08c997abcc"
+
+columnify@~1.5.4:
+  version "1.5.4"
+  resolved "https://registry.yarnpkg.com/columnify/-/columnify-1.5.4.tgz#4737ddf1c7b69a8a7c340570782e947eec8e78bb"
+  dependencies:
+    strip-ansi "^3.0.0"
+    wcwidth "^1.0.0"
+
+combined-stream@^1.0.5, combined-stream@~1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.5.tgz#938370a57b4a51dea2c77c15d5c5fdf895164009"
+  dependencies:
+    delayed-stream "~1.0.0"
+
+combined-stream@~0.0.4, combined-stream@~0.0.5:
+  version "0.0.7"
+  resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-0.0.7.tgz#0137e657baa5a7541c57ac37ac5fc07d73b4dc1f"
+  dependencies:
+    delayed-stream "0.0.5"
+
+commander@2.8.x:
+  version "2.8.1"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.8.1.tgz#06be367febfda0c330aa1e2a072d3dc9762425d4"
+  dependencies:
+    graceful-readlink ">= 1.0.0"
+
+commander@^2.5.0, commander@^2.6.0, commander@^2.9.0:
+  version "2.9.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.9.0.tgz#9c99094176e12240cb22d6c5146098400fe0f7d4"
+  dependencies:
+    graceful-readlink ">= 1.0.0"
+
+commander@~2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.1.0.tgz#d121bbae860d9992a3d517ba96f56588e47c6781"
+
+commoner@~0.10.3:
+  version "0.10.8"
+  resolved "https://registry.yarnpkg.com/commoner/-/commoner-0.10.8.tgz#34fc3672cd24393e8bb47e70caa0293811f4f2c5"
+  dependencies:
+    commander "^2.5.0"
+    detective "^4.3.1"
+    glob "^5.0.15"
+    graceful-fs "^4.1.2"
+    iconv-lite "^0.4.5"
+    mkdirp "^0.5.0"
+    private "^0.1.6"
+    q "^1.1.2"
+    recast "^0.11.17"
+
+component-bind@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/component-bind/-/component-bind-1.0.0.tgz#00c608ab7dcd93897c0009651b1d3a8e1e73bbd1"
+
+component-emitter@1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.1.2.tgz#296594f2753daa63996d2af08d15a95116c9aec3"
+
+component-emitter@1.2.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.2.1.tgz#137918d6d78283f7df7a6b7c5a63e140e69425e6"
+
+component-inherit@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/component-inherit/-/component-inherit-0.0.3.tgz#645fc4adf58b72b649d5cae65135619db26ff143"
+
+compressible@~2.0.8:
+  version "2.0.10"
+  resolved "https://registry.yarnpkg.com/compressible/-/compressible-2.0.10.tgz#feda1c7f7617912732b29bf8cf26252a20b9eecd"
+  dependencies:
+    mime-db ">= 1.27.0 < 2"
+
+compression@^1.4.4:
+  version "1.6.2"
+  resolved "https://registry.yarnpkg.com/compression/-/compression-1.6.2.tgz#cceb121ecc9d09c52d7ad0c3350ea93ddd402bc3"
+  dependencies:
+    accepts "~1.3.3"
+    bytes "2.3.0"
+    compressible "~2.0.8"
+    debug "~2.2.0"
+    on-headers "~1.0.1"
+    vary "~1.1.0"
+
+concat-map@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b"
+
+concat-stream@^1.4.6:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.6.0.tgz#0aac662fd52be78964d5532f694784e70110acf7"
+  dependencies:
+    inherits "^2.0.3"
+    readable-stream "^2.2.2"
+    typedarray "^0.0.6"
+
+config-chain@~1.1.10:
+  version "1.1.11"
+  resolved "https://registry.yarnpkg.com/config-chain/-/config-chain-1.1.11.tgz#aba09747dfbe4c3e70e766a6e41586e1859fc6f2"
+  dependencies:
+    ini "^1.3.4"
+    proto-list "~1.2.1"
+
+configstore@^0.3.2:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/configstore/-/configstore-0.3.2.tgz#25e4c16c3768abf75c5a65bc61761f495055b459"
+  dependencies:
+    graceful-fs "^3.0.1"
+    js-yaml "^3.1.0"
+    mkdirp "^0.5.0"
+    object-assign "^2.0.0"
+    osenv "^0.1.0"
+    user-home "^1.0.0"
+    uuid "^2.0.1"
+    xdg-basedir "^1.0.0"
+
+configstore@^1.0.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/configstore/-/configstore-1.4.0.tgz#c35781d0501d268c25c54b8b17f6240e8a4fb021"
+  dependencies:
+    graceful-fs "^4.1.2"
+    mkdirp "^0.5.0"
+    object-assign "^4.0.1"
+    os-tmpdir "^1.0.0"
+    osenv "^0.1.0"
+    uuid "^2.0.1"
+    write-file-atomic "^1.1.2"
+    xdg-basedir "^2.0.0"
+
+configstore@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/configstore/-/configstore-2.1.0.tgz#737a3a7036e9886102aa6099e47bb33ab1aba1a1"
+  dependencies:
+    dot-prop "^3.0.0"
+    graceful-fs "^4.1.2"
+    mkdirp "^0.5.0"
+    object-assign "^4.0.1"
+    os-tmpdir "^1.0.0"
+    osenv "^0.1.0"
+    uuid "^2.0.1"
+    write-file-atomic "^1.1.2"
+    xdg-basedir "^2.0.0"
+
+connect@^3.3.3:
+  version "3.6.1"
+  resolved "https://registry.yarnpkg.com/connect/-/connect-3.6.1.tgz#b7760693a74f0454face1d9378edb3f885b43227"
+  dependencies:
+    debug "2.6.3"
+    finalhandler "1.0.1"
+    parseurl "~1.3.1"
+    utils-merge "1.0.0"
+
+console-browserify@1.1.x:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/console-browserify/-/console-browserify-1.1.0.tgz#f0241c45730a9fc6323b206dbf38edc741d0bb10"
+  dependencies:
+    date-now "^0.1.4"
+
+console-control-strings@^1.0.0, console-control-strings@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/console-control-strings/-/console-control-strings-1.1.0.tgz#3d7cf4464db6446ea644bf4b39507f9851008e8e"
+
+consolidate@^0.14.0:
+  version "0.14.5"
+  resolved "https://registry.yarnpkg.com/consolidate/-/consolidate-0.14.5.tgz#5a25047bc76f73072667c8cb52c989888f494c63"
+  dependencies:
+    bluebird "^3.1.1"
+
+content-disposition@0.5.2:
+  version "0.5.2"
+  resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.2.tgz#0cf68bb9ddf5f2be7961c3a85178cb85dba78cb4"
+
+content-type@~1.0.1, content-type@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.2.tgz#b7d113aee7a8dd27bd21133c4dc2529df1721eed"
+
+convert-source-map@^1.1.0:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.5.0.tgz#9acd70851c6d5dfdd93d9282e5edf94a03ff46b5"
+
+cookie-signature@1.0.6:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c"
+
+cookie@0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.3.1.tgz#e7e0a1f9ef43b4c8ba925c5c5a96e806d16873bb"
+
+copy-dereference@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/copy-dereference/-/copy-dereference-1.0.0.tgz#6b131865420fd81b413ba994b44d3655311152b6"
+
+core-js@^1.0.0:
+  version "1.2.7"
+  resolved "https://registry.yarnpkg.com/core-js/-/core-js-1.2.7.tgz#652294c14651db28fa93bd2d5ff2983a4f08c636"
+
+core-object@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/core-object/-/core-object-0.0.2.tgz#c9a6fee8f712e281fa9f6fba10243409ea2debc3"
+  dependencies:
+    lodash-node "^2.4.1"
+
+core-util-is@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7"
+
+cpr@0.4.2:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/cpr/-/cpr-0.4.2.tgz#cc5083e6d2fa31f52bbfeefae508a445fe6180f2"
+  dependencies:
+    graceful-fs "~4.1.2"
+    mkdirp "~0.5.0"
+    rimraf "~2.4.3"
+
+create-error-class@^3.0.1:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/create-error-class/-/create-error-class-3.0.2.tgz#06be7abef947a3f14a30fd610671d401bca8b7b6"
+  dependencies:
+    capture-stack-trace "^1.0.0"
+
+cross-spawn@^5.1.0:
+  version "5.1.0"
+  resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-5.1.0.tgz#e8bd0efee58fcff6f8f94510a0a554bbfa235449"
+  dependencies:
+    lru-cache "^4.0.1"
+    shebang-command "^1.2.0"
+    which "^1.2.9"
+
+cryptiles@2.x.x:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/cryptiles/-/cryptiles-2.0.5.tgz#3bdfecdc608147c1c67202fa291e7dca59eaa3b8"
+  dependencies:
+    boom "2.x.x"
+
+ctype@0.5.3:
+  version "0.5.3"
+  resolved "https://registry.yarnpkg.com/ctype/-/ctype-0.5.3.tgz#82c18c2461f74114ef16c135224ad0b9144ca12f"
+
+d@1:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/d/-/d-1.0.0.tgz#754bb5bfe55451da69a58b94d45f4c5b0462d58f"
+  dependencies:
+    es5-ext "^0.10.9"
+
+dashdash@^1.12.0:
+  version "1.14.1"
+  resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0"
+  dependencies:
+    assert-plus "^1.0.0"
+
+date-now@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/date-now/-/date-now-0.1.4.tgz#eaf439fd4d4848ad74e5cc7dbef200672b9e345b"
+
+debug@2.2.0, debug@~2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.2.0.tgz#f87057e995b1a1f6ae6a4960664137bc56f039da"
+  dependencies:
+    ms "0.7.1"
+
+debug@2.3.3:
+  version "2.3.3"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.3.3.tgz#40c453e67e6e13c901ddec317af8986cda9eff8c"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.1:
+  version "2.6.1"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.1.tgz#79855090ba2c4e3115cc7d8769491d58f0491351"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.3:
+  version "2.6.3"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.3.tgz#0f7eb8c30965ec08c72accfa0130c8b79984141d"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.4:
+  version "2.6.4"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.4.tgz#7586a9b3c39741c0282ae33445c4e8ac74734fe0"
+  dependencies:
+    ms "0.7.3"
+
+debug@^2.1.0, debug@^2.1.1, debug@^2.1.3, debug@^2.2.0, debug@^2.4.0:
+  version "2.6.6"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.6.tgz#a9fa6fbe9ca43cf1e79f73b75c0189cbb7d6db5a"
+  dependencies:
+    ms "0.7.3"
+
+debuglog@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/debuglog/-/debuglog-1.0.1.tgz#aa24ffb9ac3df9a2351837cfb2d279360cd78492"
+
+decamelize@^1.0.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290"
+
+decompress-zip@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/decompress-zip/-/decompress-zip-0.1.0.tgz#bce60c11664f2d660fca4bcf634af6de5d6c14c7"
+  dependencies:
+    binary "^0.3.0"
+    graceful-fs "^3.0.0"
+    mkpath "^0.1.0"
+    nopt "^3.0.1"
+    q "^1.1.2"
+    readable-stream "^1.1.8"
+    touch "0.0.3"
+
+deep-extend@~0.2.5:
+  version "0.2.11"
+  resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.2.11.tgz#7a16ba69729132340506170494bc83f7076fe08f"
+
+deep-extend@~0.4.0:
+  version "0.4.1"
+  resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.4.1.tgz#efe4113d08085f4e6f9687759810f807469e2253"
+
+defaults@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/defaults/-/defaults-1.0.3.tgz#c656051e9817d9ff08ed881477f3fe4019f3ef7d"
+  dependencies:
+    clone "^1.0.2"
+
+defined@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/defined/-/defined-1.0.0.tgz#c98d9bcef75674188e110969151199e39b1fa693"
+
+defs@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/defs/-/defs-1.1.1.tgz#b22609f2c7a11ba7a3db116805c139b1caffa9d2"
+  dependencies:
+    alter "~0.2.0"
+    ast-traverse "~0.1.1"
+    breakable "~1.0.0"
+    esprima-fb "~15001.1001.0-dev-harmony-fb"
+    simple-fmt "~0.1.0"
+    simple-is "~0.2.0"
+    stringmap "~0.2.2"
+    stringset "~0.2.1"
+    tryor "~0.1.2"
+    yargs "~3.27.0"
+
+delayed-stream@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-0.0.5.tgz#d4b1f43a93e8296dfe02694f4680bc37a313c73f"
+
+delayed-stream@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619"
+
+delegates@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a"
+
+depd@1.1.0, depd@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.0.tgz#e1bd82c6aab6ced965b97b88b17ed3e528ca18c3"
+
+destroy@^1.0.3, destroy@~1.0.4:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.0.4.tgz#978857442c44749e4206613e37946205826abd80"
+
+detect-indent@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/detect-indent/-/detect-indent-3.0.1.tgz#9dc5e5ddbceef8325764b9451b02bc6d54084f75"
+  dependencies:
+    get-stdin "^4.0.1"
+    minimist "^1.1.0"
+    repeating "^1.1.0"
+
+detective@^4.3.1:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/detective/-/detective-4.5.0.tgz#6e5a8c6b26e6c7a254b1c6b6d7490d98ec91edd1"
+  dependencies:
+    acorn "^4.0.3"
+    defined "^1.0.0"
+
+dezalgo@^1.0.0, dezalgo@^1.0.1, dezalgo@^1.0.2, dezalgo@~1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/dezalgo/-/dezalgo-1.0.3.tgz#7f742de066fc748bc8db820569dddce49bf0d456"
+  dependencies:
+    asap "^2.0.0"
+    wrappy "1"
+
+diff@^2.2.2:
+  version "2.2.3"
+  resolved "https://registry.yarnpkg.com/diff/-/diff-2.2.3.tgz#60eafd0d28ee906e4e8ff0a52c1229521033bf99"
+
+dom-serializer@0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-0.1.0.tgz#073c697546ce0780ce23be4a28e293e40bc30c82"
+  dependencies:
+    domelementtype "~1.1.1"
+    entities "~1.1.1"
+
+domelementtype@1:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.3.0.tgz#b17aed82e8ab59e52dd9c19b1756e0fc187204c2"
+
+domelementtype@~1.1.1:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.1.3.tgz#bd28773e2642881aec51544924299c5cd822185b"
+
+domhandler@2.3:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-2.3.0.tgz#2de59a0822d5027fabff6f032c2b25a2a8abe738"
+  dependencies:
+    domelementtype "1"
+
+domutils@1.5:
+  version "1.5.1"
+  resolved "https://registry.yarnpkg.com/domutils/-/domutils-1.5.1.tgz#dcd8488a26f563d61079e48c9f7b7e32373682cf"
+  dependencies:
+    dom-serializer "0"
+    domelementtype "1"
+
+dot-prop@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/dot-prop/-/dot-prop-3.0.0.tgz#1b708af094a49c9a0e7dbcad790aba539dac1177"
+  dependencies:
+    is-obj "^1.0.0"
+
+duplexer2@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/duplexer2/-/duplexer2-0.1.4.tgz#8b12dab878c0d69e3e7891051662a32fc6bddcc1"
+  dependencies:
+    readable-stream "^2.0.2"
+
+ecc-jsbn@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.1.tgz#0fc73a9ed5f0d53c38193398523ef7e543777505"
+  dependencies:
+    jsbn "~0.1.0"
+
+editions@^1.1.1:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/editions/-/editions-1.3.3.tgz#0907101bdda20fac3cbe334c27cbd0688dc99a5b"
+
+editor@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/editor/-/editor-1.0.0.tgz#60c7f87bd62bcc6a894fa8ccd6afb7823a24f742"
+
+ee-first@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d"
+
+ember-ajax@0.7.1:
+  version "0.7.1"
+  resolved "https://registry.yarnpkg.com/ember-ajax/-/ember-ajax-0.7.1.tgz#0b3d1eeb99ed9d9251c013cc6ab6a1e7d4d14507"
+  dependencies:
+    ember-cli-babel "^5.1.5"
+
+ember-basic-dropdown@^0.11.3:
+  version "0.11.7"
+  resolved "https://registry.yarnpkg.com/ember-basic-dropdown/-/ember-basic-dropdown-0.11.7.tgz#af9e7c2be222860f279a13a125d532159dae8c71"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+    ember-cli-htmlbars "^1.0.3"
+    ember-get-config "0.0.2"
+    ember-getowner-polyfill "^1.0.0"
+    ember-wormhole "0.3.4"
+
+ember-cli-app-version@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-app-version/-/ember-cli-app-version-1.0.1.tgz#d135eba75f30e791d8a5e5844f1251dcbcc40438"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+    ember-cli-htmlbars "^1.0.0"
+    git-repo-version "0.3.0"
+
+ember-cli-auto-complete@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-auto-complete/-/ember-cli-auto-complete-0.2.1.tgz#2a159a2fd922eda9a9ad065a3d3d98c7a0455b37"
+  dependencies:
+    ember-cli-babel "^5.0.0"
+
+ember-cli-babel@^5.0.0, ember-cli-babel@^5.1.10, ember-cli-babel@^5.1.3, ember-cli-babel@^5.1.5, ember-cli-babel@^5.1.6, ember-cli-babel@^5.1.7:
+  version "5.2.4"
+  resolved "https://registry.yarnpkg.com/ember-cli-babel/-/ember-cli-babel-5.2.4.tgz#5ce4f46b08ed6f6d21e878619fb689719d6e8e13"
+  dependencies:
+    broccoli-babel-transpiler "^5.6.2"
+    broccoli-funnel "^1.0.0"
+    clone "^2.0.0"
+    ember-cli-version-checker "^1.0.2"
+    resolve "^1.1.2"
+
+ember-cli-broccoli@0.16.9:
+  version "0.16.9"
+  resolved "https://registry.yarnpkg.com/ember-cli-broccoli/-/ember-cli-broccoli-0.16.9.tgz#4e9128f59ffaee99705c01e9a44a691a0ae199db"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.2.5"
+    broccoli-slow-trees "^1.0.0"
+    commander "^2.5.0"
+    connect "^3.3.3"
+    copy-dereference "^1.0.0"
+    findup-sync "^0.2.1"
+    handlebars "^4.0.4"
+    mime "^1.2.11"
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.2"
+    rimraf "^2.2.8"
+    rsvp "^3.0.17"
+
+ember-cli-datepicker@2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-datepicker/-/ember-cli-datepicker-2.0.1.tgz#a89c8852d472e778e457e0e67ec40a5fa56d11c5"
+
+ember-cli-dependency-checker@^1.2.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-dependency-checker/-/ember-cli-dependency-checker-1.4.0.tgz#2b13f977e1eea843fc1a21a001be6ca5d4ef1942"
+  dependencies:
+    chalk "^0.5.1"
+    is-git-url "^0.2.0"
+    semver "^4.1.0"
+
+ember-cli-get-component-path-option@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-get-component-path-option/-/ember-cli-get-component-path-option-1.0.0.tgz#0d7b595559e2f9050abed804f1d8eff1b08bc771"
+
+ember-cli-htmlbars-inline-precompile@^0.3.1:
+  version "0.3.6"
+  resolved "https://registry.yarnpkg.com/ember-cli-htmlbars-inline-precompile/-/ember-cli-htmlbars-inline-precompile-0.3.6.tgz#4095fe423f93102724c0725e4dd1a31f25e24de5"
+  dependencies:
+    babel-plugin-htmlbars-inline-precompile "^0.1.0"
+    ember-cli-babel "^5.1.3"
+    ember-cli-htmlbars "^1.0.0"
+    hash-for-dep "^1.0.2"
+
+ember-cli-htmlbars@^1.0.0, ember-cli-htmlbars@^1.0.3:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-1.3.0.tgz#e090f011239153bf45dab29625f94a46fce205af"
+  dependencies:
+    broccoli-persistent-filter "^1.0.3"
+    ember-cli-version-checker "^1.0.2"
+    hash-for-dep "^1.0.2"
+    json-stable-stringify "^1.0.0"
+    strip-bom "^2.0.0"
+
+ember-cli-inject-live-reload@^1.4.0:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-inject-live-reload/-/ember-cli-inject-live-reload-1.6.1.tgz#82b8f5be454815a75e7f6d42c9ce0bc883a914a3"
+
+ember-cli-is-package-missing@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-is-package-missing/-/ember-cli-is-package-missing-1.0.0.tgz#6e6184cafb92635dd93ca6c946b104292d4e3390"
+
+ember-cli-normalize-entity-name@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-normalize-entity-name/-/ember-cli-normalize-entity-name-1.0.0.tgz#0b14f7bcbc599aa117b5fddc81e4fd03c4bad5b7"
+  dependencies:
+    silent-error "^1.0.0"
+
+ember-cli-pace@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-pace/-/ember-cli-pace-0.1.0.tgz#c70a16a513415f1b026a7de919353bea14cda7e4"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+    uglify-js "^2.6.1"
+
+ember-cli-path-utils@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-path-utils/-/ember-cli-path-utils-1.0.0.tgz#4e39af8b55301cddc5017739b77a804fba2071ed"
+
+ember-cli-preprocess-registry@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-preprocess-registry/-/ember-cli-preprocess-registry-2.0.0.tgz#45c8b985eba06bb443b3abce1c3c6220fdcb8094"
+  dependencies:
+    broccoli-clean-css "^1.1.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    debug "^2.2.0"
+    exists-sync "0.0.3"
+    lodash "^3.10.0"
+    process-relative-require "^1.0.0"
+    silent-error "^1.0.0"
+
+ember-cli-qunit@^1.4.0:
+  version "1.4.2"
+  resolved "https://registry.yarnpkg.com/ember-cli-qunit/-/ember-cli-qunit-1.4.2.tgz#7ca25495c70ca347106d44fc00f0d7aeca027475"
+  dependencies:
+    broccoli-babel-transpiler "^5.5.0"
+    broccoli-concat "^2.2.0"
+    broccoli-jshint "^1.0.0"
+    broccoli-merge-trees "^1.1.0"
+    ember-cli-babel "^5.1.5"
+    ember-cli-version-checker "^1.1.4"
+    ember-qunit "^0.4.18"
+    qunitjs "^1.20.0"
+    resolve "^1.1.6"
+
+ember-cli-release@0.2.8:
+  version "0.2.8"
+  resolved "https://registry.yarnpkg.com/ember-cli-release/-/ember-cli-release-0.2.8.tgz#e9fddd06058c0f3bc2ea57ab2667e9611f8fb205"
+  dependencies:
+    chalk "^1.0.0"
+    git-tools "^0.1.4"
+    make-array "^0.1.2"
+    merge "^1.2.0"
+    moment-timezone "^0.3.0"
+    nopt "^3.0.3"
+    rsvp "^3.0.17"
+    semver "^4.3.1"
+    silent-error "^1.0.0"
+
+ember-cli-selectize@0.5.3:
+  version "0.5.3"
+  resolved "https://registry.yarnpkg.com/ember-cli-selectize/-/ember-cli-selectize-0.5.3.tgz#dc011da70ba6a7d4532c3c4b1ce88d2a5a5a2e37"
+  dependencies:
+    ember-cli-babel "^5.1.5"
+    ember-new-computed "^1.0.0"
+
+ember-cli-sri@^2.1.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-sri/-/ember-cli-sri-2.1.1.tgz#971620934a4b9183cf7923cc03e178b83aa907fd"
+  dependencies:
+    broccoli-sri-hash "^2.1.0"
+
+ember-cli-string-utils@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-string-utils/-/ember-cli-string-utils-1.1.0.tgz#39b677fc2805f55173735376fcef278eaa4452a1"
+
+ember-cli-test-info@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-test-info/-/ember-cli-test-info-1.0.0.tgz#ed4e960f249e97523cf891e4aed2072ce84577b4"
+  dependencies:
+    ember-cli-string-utils "^1.0.0"
+
+ember-cli-uglify@^1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-uglify/-/ember-cli-uglify-1.2.0.tgz#3208c32b54bc2783056e8bb0d5cfe9bbaf17ffb2"
+  dependencies:
+    broccoli-uglify-sourcemap "^1.0.0"
+
+ember-cli-valid-component-name@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-valid-component-name/-/ember-cli-valid-component-name-1.0.0.tgz#71550ce387e0233065f30b30b1510aa2dfbe87ef"
+  dependencies:
+    silent-error "^1.0.0"
+
+ember-cli-version-checker@^1.0.2, ember-cli-version-checker@^1.1.4, ember-cli-version-checker@^1.1.6, ember-cli-version-checker@^1.2.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-version-checker/-/ember-cli-version-checker-1.3.1.tgz#0bc2d134c830142da64bf9627a0eded10b61ae72"
+  dependencies:
+    semver "^5.3.0"
+
+ember-cli@2.4.3:
+  version "2.4.3"
+  resolved "https://registry.yarnpkg.com/ember-cli/-/ember-cli-2.4.3.tgz#20a1704c20b48ead444a1cb335f9d3ef1d18e7ab"
+  dependencies:
+    amd-name-resolver "0.0.5"
+    bower "^1.3.12"
+    bower-config "^1.3.0"
+    bower-endpoint-parser "0.2.2"
+    broccoli-babel-transpiler "^5.4.5"
+    broccoli-concat "^2.0.4"
+    broccoli-config-loader "^1.0.0"
+    broccoli-config-replace "^1.1.2"
+    broccoli-funnel "^1.0.0"
+    broccoli-funnel-reducer "^1.0.0"
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-plugin "^1.2.0"
+    broccoli-sane-watcher "^1.1.1"
+    broccoli-source "^1.1.0"
+    broccoli-viz "^2.0.1"
+    chalk "^1.1.1"
+    clean-base-url "^1.0.0"
+    compression "^1.4.4"
+    configstore "^2.0.0"
+    core-object "0.0.2"
+    cpr "0.4.2"
+    debug "^2.1.3"
+    diff "^2.2.2"
+    ember-cli-broccoli "0.16.9"
+    ember-cli-get-component-path-option "^1.0.0"
+    ember-cli-is-package-missing "^1.0.0"
+    ember-cli-normalize-entity-name "^1.0.0"
+    ember-cli-path-utils "^1.0.0"
+    ember-cli-preprocess-registry "^2.0.0"
+    ember-cli-string-utils "^1.0.0"
+    ember-cli-test-info "^1.0.0"
+    ember-cli-valid-component-name "^1.0.0"
+    ember-router-generator "^1.0.0"
+    escape-string-regexp "^1.0.3"
+    exists-sync "0.0.3"
+    exit "^0.1.2"
+    express "^4.12.3"
+    filesize "^3.1.3"
+    findup "0.1.5"
+    findup-sync "^0.2.1"
+    fs-extra "0.26.7"
+    fs-monitor-stack "^1.0.2"
+    fs-tree-diff "^0.4.4"
+    get-caller-file "^1.0.0"
+    git-repo-info "^1.0.4"
+    glob "7.0.3"
+    http-proxy "^1.9.0"
+    inflection "^1.7.0"
+    inquirer "^0.12.0"
+    is-git-url "^0.2.0"
+    isbinaryfile "^2.0.3"
+    leek "0.0.21"
+    lodash "^4.6.1"
+    markdown-it "4.3.0"
+    markdown-it-terminal "0.0.3"
+    merge-defaults "^0.2.1"
+    minimatch "^3.0.0"
+    mkdirp "^0.5.1"
+    morgan "^1.5.2"
+    node-modules-path "^1.0.0"
+    node-uuid "^1.4.3"
+    nopt "^3.0.1"
+    npm "2.14.21"
+    ora "^0.2.0"
+    portfinder "^1.0.3"
+    promise-map-series "^0.2.1"
+    quick-temp "0.1.5"
+    readline2 "0.1.1"
+    resolve "^1.1.6"
+    rimraf "^2.4.4"
+    rsvp "^3.0.17"
+    sane "^1.1.1"
+    semver "^5.1.0"
+    silent-error "^1.0.0"
+    symlink-or-copy "^1.0.1"
+    temp "0.8.3"
+    testem "^1.6.0"
+    through "^2.3.6"
+    tiny-lr "0.2.1"
+    tree-sync "^1.1.0"
+    walk-sync "^0.2.6"
+    yam "0.0.18"
+
+ember-data@2.7.0:
+  version "2.7.0"
+  resolved "https://registry.yarnpkg.com/ember-data/-/ember-data-2.7.0.tgz#09341c9825657907a1846d9f3dac2dfaafcdaa10"
+  dependencies:
+    amd-name-resolver "0.0.5"
+    babel-plugin-feature-flags "^0.2.1"
+    babel-plugin-filter-imports "^0.2.0"
+    broccoli-babel-transpiler "^5.5.0"
+    broccoli-file-creator "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    chalk "^1.1.1"
+    ember-cli-babel "^5.1.3"
+    ember-cli-path-utils "^1.0.0"
+    ember-cli-string-utils "^1.0.0"
+    ember-cli-test-info "^1.0.0"
+    ember-cli-version-checker "^1.1.4"
+    ember-inflector "^1.9.4"
+    exists-sync "0.0.3"
+    git-repo-info "^1.1.2"
+    inflection "^1.8.0"
+    npm-git-info "^1.0.0"
+    semver "^5.1.0"
+    silent-error "^1.0.0"
+
+ember-export-application-global@^1.0.5:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ember-export-application-global/-/ember-export-application-global-1.1.1.tgz#f257d5271268932a89d7392679ce4db89d7154af"
+  dependencies:
+    ember-cli-babel "^5.1.10"
+
+ember-factory-for-polyfill@^1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ember-factory-for-polyfill/-/ember-factory-for-polyfill-1.1.1.tgz#c1124d541a058baaa6681d9611340c16f0baf660"
+  dependencies:
+    ember-cli-babel "^5.1.7"
+    ember-cli-version-checker "^1.2.0"
+
+ember-get-config@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/ember-get-config/-/ember-get-config-0.0.2.tgz#eff6a314c5592a398000a331ddf36dfd1f5aa08d"
+  dependencies:
+    ember-cli-babel "^5.1.3"
+
+ember-getowner-polyfill@^1.0.0:
+  version "1.2.3"
+  resolved "https://registry.yarnpkg.com/ember-getowner-polyfill/-/ember-getowner-polyfill-1.2.3.tgz#ea70f4a48b1c05b91056371d1878bbafe018222e"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+    ember-cli-version-checker "^1.2.0"
+    ember-factory-for-polyfill "^1.1.0"
+
+ember-hash-helper-polyfill@^0.1.0:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/ember-hash-helper-polyfill/-/ember-hash-helper-polyfill-0.1.2.tgz#bc8ee6fa59e9541fce07d2cf4f263cf785e9e1db"
+  dependencies:
+    ember-cli-babel "^5.1.7"
+    ember-cli-version-checker "^1.2.0"
+
+ember-inflector@^1.9.4:
+  version "1.12.1"
+  resolved "https://registry.yarnpkg.com/ember-inflector/-/ember-inflector-1.12.1.tgz#d8bd2ca2f327b439720f89923fe614d46b5da1ca"
+  dependencies:
+    ember-cli-babel "^5.1.7"
+
+ember-load-initializers@^0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/ember-load-initializers/-/ember-load-initializers-0.5.1.tgz#76e3db23c111dbdcd3ae6f687036bf0b56be0cbe"
+
+ember-new-computed@^1.0.0:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/ember-new-computed/-/ember-new-computed-1.0.3.tgz#592af8a778e0260ce7e812687c3aedface1622bf"
+  dependencies:
+    ember-cli-babel "^5.1.5"
+
+ember-power-select@0.10.4:
+  version "0.10.4"
+  resolved "https://registry.yarnpkg.com/ember-power-select/-/ember-power-select-0.10.4.tgz#c136852a24228bbf2e515c7f08673438c9c887e0"
+  dependencies:
+    ember-basic-dropdown "^0.11.3"
+    ember-cli-babel "^5.1.6"
+    ember-cli-htmlbars "^1.0.3"
+    ember-hash-helper-polyfill "^0.1.0"
+    ember-truth-helpers "^1.2.0"
+
+ember-qunit@^0.4.18:
+  version "0.4.24"
+  resolved "https://registry.yarnpkg.com/ember-qunit/-/ember-qunit-0.4.24.tgz#b54cf6688c442d07eacea47c3285879cdd7c2163"
+  dependencies:
+    ember-test-helpers "^0.5.32"
+
+ember-resolver@^2.0.3:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/ember-resolver/-/ember-resolver-2.1.1.tgz#5e4c1fffe9f5f48fc2194ad7592274ed0cd74f72"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+    ember-cli-version-checker "^1.1.6"
+
+ember-router-generator@^1.0.0:
+  version "1.2.3"
+  resolved "https://registry.yarnpkg.com/ember-router-generator/-/ember-router-generator-1.2.3.tgz#8ed2ca86ff323363120fc14278191e9e8f1315ee"
+  dependencies:
+    recast "^0.11.3"
+
+ember-test-helpers@^0.5.32:
+  version "0.5.34"
+  resolved "https://registry.yarnpkg.com/ember-test-helpers/-/ember-test-helpers-0.5.34.tgz#c8439108d1cba1d7d838c212208a5c4061471b83"
+  dependencies:
+    klassy "^0.1.3"
+
+ember-truth-helpers@^1.2.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/ember-truth-helpers/-/ember-truth-helpers-1.3.0.tgz#6ed9f83ce9a49f52bb416d55e227426339a64c60"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+
+ember-wormhole@0.3.4:
+  version "0.3.4"
+  resolved "https://registry.yarnpkg.com/ember-wormhole/-/ember-wormhole-0.3.4.tgz#b672e405ace19354b756746d4a4452be78d84a6b"
+  dependencies:
+    ember-cli-babel "^5.0.0"
+
+emberx-select@2.1.2:
+  version "2.1.2"
+  resolved "https://registry.yarnpkg.com/emberx-select/-/emberx-select-2.1.2.tgz#30f4eddd4e2c02a981d90be3c108265fdad7df32"
+  dependencies:
+    ember-cli-babel "^5.1.3"
+    ember-cli-htmlbars "^1.0.0"
+    ember-cli-version-checker "^1.1.6"
+
+encodeurl@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.1.tgz#79e3d58655346909fe6f0f45a5de68103b294d20"
+
+end-of-stream@^1.0.0, end-of-stream@^1.1.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-1.4.0.tgz#7a90d833efda6cfa6eac0f4949dbb0fad3a63206"
+  dependencies:
+    once "^1.4.0"
+
+engine.io-client@1.8.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/engine.io-client/-/engine.io-client-1.8.0.tgz#7b730e4127414087596d9be3c88d2bc5fdb6cf5c"
+  dependencies:
+    component-emitter "1.2.1"
+    component-inherit "0.0.3"
+    debug "2.3.3"
+    engine.io-parser "1.3.1"
+    has-cors "1.1.0"
+    indexof "0.0.1"
+    parsejson "0.0.3"
+    parseqs "0.0.5"
+    parseuri "0.0.5"
+    ws "1.1.1"
+    xmlhttprequest-ssl "1.5.3"
+    yeast "0.1.2"
+
+engine.io-parser@1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/engine.io-parser/-/engine.io-parser-1.3.1.tgz#9554f1ae33107d6fbd170ca5466d2f833f6a07cf"
+  dependencies:
+    after "0.8.1"
+    arraybuffer.slice "0.0.6"
+    base64-arraybuffer "0.1.5"
+    blob "0.0.4"
+    has-binary "0.1.6"
+    wtf-8 "1.0.0"
+
+engine.io@1.8.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/engine.io/-/engine.io-1.8.0.tgz#3eeb5f264cb75dbbec1baaea26d61f5a4eace2aa"
+  dependencies:
+    accepts "1.3.3"
+    base64id "0.1.0"
+    cookie "0.3.1"
+    debug "2.3.3"
+    engine.io-parser "1.3.1"
+    ws "1.1.1"
+
+ensure-posix-path@^1.0.0, ensure-posix-path@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/ensure-posix-path/-/ensure-posix-path-1.0.2.tgz#a65b3e42d0b71cfc585eb774f9943c8d9b91b0c2"
+
+entities@1.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/entities/-/entities-1.0.0.tgz#b2987aa3821347fcde642b24fdfc9e4fb712bf26"
+
+entities@~1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/entities/-/entities-1.1.1.tgz#6e5c2d0a5621b5dadaecef80b90edfb5cd7772f0"
+
+error-ex@^1.2.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.1.tgz#f855a86ce61adc4e8621c3cda21e7a7612c3a8dc"
+  dependencies:
+    is-arrayish "^0.2.1"
+
+es5-ext@^0.10.14, es5-ext@^0.10.9, es5-ext@~0.10.14:
+  version "0.10.15"
+  resolved "https://registry.yarnpkg.com/es5-ext/-/es5-ext-0.10.15.tgz#c330a5934c1ee21284a7c081a86e5fd937c91ea6"
+  dependencies:
+    es6-iterator "2"
+    es6-symbol "~3.1"
+
+es6-iterator@2:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/es6-iterator/-/es6-iterator-2.0.1.tgz#8e319c9f0453bf575d374940a655920e59ca5512"
+  dependencies:
+    d "1"
+    es5-ext "^0.10.14"
+    es6-symbol "^3.1"
+
+es6-symbol@^3.0.2, es6-symbol@^3.1, es6-symbol@~3.1:
+  version "3.1.1"
+  resolved "https://registry.yarnpkg.com/es6-symbol/-/es6-symbol-3.1.1.tgz#bf00ef4fdab6ba1b46ecb7b629b4c7ed5715cc77"
+  dependencies:
+    d "1"
+    es5-ext "~0.10.14"
+
+escape-html@~1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988"
+
+escape-string-regexp@^1.0.0, escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.3, escape-string-regexp@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4"
+
+esprima-fb@~12001.1.0-dev-harmony-fb:
+  version "12001.1.0-dev-harmony-fb"
+  resolved "https://registry.yarnpkg.com/esprima-fb/-/esprima-fb-12001.1.0-dev-harmony-fb.tgz#d84400384ba95ce2678c617ad24a7f40808da915"
+
+esprima-fb@~15001.1001.0-dev-harmony-fb:
+  version "15001.1001.0-dev-harmony-fb"
+  resolved "https://registry.yarnpkg.com/esprima-fb/-/esprima-fb-15001.1001.0-dev-harmony-fb.tgz#43beb57ec26e8cf237d3dd8b33e42533577f2659"
+
+esprima@^2.6.0:
+  version "2.7.3"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-2.7.3.tgz#96e3b70d5779f6ad49cd032673d1c312767ba581"
+
+esprima@^3.1.1, esprima@~3.1.0:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-3.1.3.tgz#fdca51cee6133895e3c88d535ce49dbff62a4633"
+
+esprima@~1.0.4:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-1.0.4.tgz#9f557e08fc3b4d26ece9dd34f8fbf476b62585ad"
+
+esutils@^2.0.0:
+  version "2.0.2"
+  resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.2.tgz#0abf4f1caa5bcb1f7a9d8acc6dea4faaa04bac9b"
+
+etag@~1.8.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.0.tgz#6f631aef336d6c46362b51764044ce216be3c051"
+
+eventemitter3@1.x.x:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-1.2.0.tgz#1c86991d816ad1e504750e73874224ecf3bec508"
+
+events-to-array@^1.0.1:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/events-to-array/-/events-to-array-1.1.2.tgz#2d41f563e1fe400ed4962fe1a4d5c6a7539df7f6"
+
+exec-sh@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/exec-sh/-/exec-sh-0.2.0.tgz#14f75de3f20d286ef933099b2ce50a90359cef10"
+  dependencies:
+    merge "^1.1.3"
+
+exists-sync@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/exists-sync/-/exists-sync-0.0.3.tgz#b910000bedbb113b378b82f5f5a7638107622dcf"
+
+exists-sync@0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/exists-sync/-/exists-sync-0.0.4.tgz#9744c2c428cc03b01060db454d4b12f0ef3c8879"
+
+exit-hook@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/exit-hook/-/exit-hook-1.1.1.tgz#f05ca233b48c05d54fff07765df8507e95c02ff8"
+
+exit@0.1.2, exit@0.1.x, exit@^0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/exit/-/exit-0.1.2.tgz#0632638f8d877cc82107d30a0fff1a17cba1cd0c"
+
+expand-brackets@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-0.1.5.tgz#df07284e342a807cd733ac5af72411e581d1177b"
+  dependencies:
+    is-posix-bracket "^0.1.0"
+
+expand-range@^1.8.1:
+  version "1.8.2"
+  resolved "https://registry.yarnpkg.com/expand-range/-/expand-range-1.8.2.tgz#a299effd335fe2721ebae8e257ec79644fc85337"
+  dependencies:
+    fill-range "^2.1.0"
+
+express@^4.10.7, express@^4.12.3:
+  version "4.15.2"
+  resolved "https://registry.yarnpkg.com/express/-/express-4.15.2.tgz#af107fc148504457f2dca9a6f2571d7129b97b35"
+  dependencies:
+    accepts "~1.3.3"
+    array-flatten "1.1.1"
+    content-disposition "0.5.2"
+    content-type "~1.0.2"
+    cookie "0.3.1"
+    cookie-signature "1.0.6"
+    debug "2.6.1"
+    depd "~1.1.0"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    etag "~1.8.0"
+    finalhandler "~1.0.0"
+    fresh "0.5.0"
+    merge-descriptors "1.0.1"
+    methods "~1.1.2"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    path-to-regexp "0.1.7"
+    proxy-addr "~1.1.3"
+    qs "6.4.0"
+    range-parser "~1.2.0"
+    send "0.15.1"
+    serve-static "1.12.1"
+    setprototypeof "1.0.3"
+    statuses "~1.3.1"
+    type-is "~1.6.14"
+    utils-merge "1.0.0"
+    vary "~1.1.0"
+
+extend@~3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.1.tgz#a755ea7bc1adfcc5a31ce7e762dbaadc5e636444"
+
+extglob@^0.3.1:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/extglob/-/extglob-0.3.2.tgz#2e18ff3d2f49ab2765cec9023f011daa8d8349a1"
+  dependencies:
+    is-extglob "^1.0.0"
+
+extsprintf@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.0.2.tgz#e1080e0658e300b06294990cc70e1502235fd550"
+
+fast-ordered-set@^1.0.0, fast-ordered-set@^1.0.2:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/fast-ordered-set/-/fast-ordered-set-1.0.3.tgz#3fbb36634f7be79e4f7edbdb4a357dee25d184eb"
+  dependencies:
+    blank-object "^1.0.1"
+
+fast-sourcemap-concat@^1.0.1:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/fast-sourcemap-concat/-/fast-sourcemap-concat-1.1.0.tgz#a800767abed5eda02e67238ec063a709be61f9d4"
+  dependencies:
+    chalk "^0.5.1"
+    debug "^2.2.0"
+    fs-extra "^0.30.0"
+    memory-streams "^0.1.0"
+    mkdirp "^0.5.0"
+    rsvp "^3.0.14"
+    source-map "^0.4.2"
+    source-map-url "^0.3.0"
+
+faye-websocket@~0.10.0:
+  version "0.10.0"
+  resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.10.0.tgz#4e492f8d04dfb6f89003507f6edbf2d501e7c6f4"
+  dependencies:
+    websocket-driver ">=0.5.1"
+
+fb-watchman@^1.8.0:
+  version "1.9.2"
+  resolved "https://registry.yarnpkg.com/fb-watchman/-/fb-watchman-1.9.2.tgz#a24cf47827f82d38fb59a69ad70b76e3b6ae7383"
+  dependencies:
+    bser "1.0.2"
+
+figures@^1.3.5:
+  version "1.7.0"
+  resolved "https://registry.yarnpkg.com/figures/-/figures-1.7.0.tgz#cbe1e3affcf1cd44b80cadfed28dc793a9701d2e"
+  dependencies:
+    escape-string-regexp "^1.0.5"
+    object-assign "^4.1.0"
+
+filename-regex@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/filename-regex/-/filename-regex-2.0.1.tgz#c1c4b9bee3e09725ddb106b75c1e301fe2f18b26"
+
+filesize@^3.1.3:
+  version "3.5.6"
+  resolved "https://registry.yarnpkg.com/filesize/-/filesize-3.5.6.tgz#5fd98f3eac94ec9516ef8ed5782fad84a01a0a1a"
+
+fill-range@^2.1.0:
+  version "2.2.3"
+  resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-2.2.3.tgz#50b77dfd7e469bc7492470963699fe7a8485a723"
+  dependencies:
+    is-number "^2.1.0"
+    isobject "^2.0.0"
+    randomatic "^1.1.3"
+    repeat-element "^1.1.2"
+    repeat-string "^1.5.2"
+
+filled-array@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/filled-array/-/filled-array-1.1.0.tgz#c3c4f6c663b923459a9aa29912d2d031f1507f84"
+
+finalhandler@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.0.1.tgz#bcd15d1689c0e5ed729b6f7f541a6df984117db8"
+  dependencies:
+    debug "2.6.3"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    statuses "~1.3.1"
+    unpipe "~1.0.0"
+
+finalhandler@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.0.2.tgz#d0e36f9dbc557f2de14423df6261889e9d60c93a"
+  dependencies:
+    debug "2.6.4"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    statuses "~1.3.1"
+    unpipe "~1.0.0"
+
+findup-sync@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/findup-sync/-/findup-sync-0.2.1.tgz#e0a90a450075c49466ee513732057514b81e878c"
+  dependencies:
+    glob "~4.3.0"
+
+findup-sync@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/findup-sync/-/findup-sync-0.3.0.tgz#37930aa5d816b777c03445e1966cc6790a4c0b16"
+  dependencies:
+    glob "~5.0.0"
+
+findup@0.1.5, findup@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/findup/-/findup-0.1.5.tgz#8ad929a3393bac627957a7e5de4623b06b0e2ceb"
+  dependencies:
+    colors "~0.6.0-1"
+    commander "~2.1.0"
+
+fireworm@^0.7.0:
+  version "0.7.1"
+  resolved "https://registry.yarnpkg.com/fireworm/-/fireworm-0.7.1.tgz#ccf20f7941f108883fcddb99383dbe6e1861c758"
+  dependencies:
+    async "~0.2.9"
+    is-type "0.0.1"
+    lodash.debounce "^3.1.1"
+    lodash.flatten "^3.0.2"
+    minimatch "^3.0.2"
+
+for-in@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80"
+
+for-own@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/for-own/-/for-own-0.1.5.tgz#5265c681a4f294dabbf17c9509b6763aa84510ce"
+  dependencies:
+    for-in "^1.0.1"
+
+forever-agent@~0.5.0:
+  version "0.5.2"
+  resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.5.2.tgz#6d0e09c4921f94a27f63d3b49c5feff1ea4c5130"
+
+forever-agent@~0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91"
+
+form-data@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-0.2.0.tgz#26f8bc26da6440e299cbdcfb69035c4f77a6e466"
+  dependencies:
+    async "~0.9.0"
+    combined-stream "~0.0.4"
+    mime-types "~2.0.3"
+
+form-data@~1.0.0-rc3:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-1.0.1.tgz#ae315db9a4907fa065502304a66d7733475ee37c"
+  dependencies:
+    async "^2.0.1"
+    combined-stream "^1.0.5"
+    mime-types "^2.1.11"
+
+forwarded@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.1.0.tgz#19ef9874c4ae1c297bcf078fde63a09b66a84363"
+
+fresh@0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.0.tgz#f474ca5e6a9246d6fd8e0953cfa9b9c805afa78e"
+
+fs-extra@0.26.7:
+  version "0.26.7"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.26.7.tgz#9ae1fdd94897798edab76d0918cf42d0c3184fa9"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    klaw "^1.0.0"
+    path-is-absolute "^1.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@^0.16.3:
+  version "0.16.5"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.16.5.tgz#1ad661fa6c86c9608cd1b49efc6fce834939a750"
+  dependencies:
+    graceful-fs "^3.0.5"
+    jsonfile "^2.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@^0.24.0:
+  version "0.24.0"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.24.0.tgz#d4e4342a96675cb7846633a6099249332b539952"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    path-is-absolute "^1.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@^0.30.0:
+  version "0.30.0"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.30.0.tgz#f233ffcc08d4da7d432daa449776989db1df93f0"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    klaw "^1.0.0"
+    path-is-absolute "^1.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@^2.0.0:
+  version "2.1.2"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-2.1.2.tgz#046c70163cef9aad46b0e4a7fa467fb22d71de35"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+
+fs-monitor-stack@^1.0.2:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/fs-monitor-stack/-/fs-monitor-stack-1.1.1.tgz#c4038d5977939b6b4e38396d7e7cd0895a7ac6b3"
+
+fs-readdir-recursive@^0.1.0:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/fs-readdir-recursive/-/fs-readdir-recursive-0.1.2.tgz#315b4fb8c1ca5b8c47defef319d073dad3568059"
+
+fs-tree-diff@^0.4.4:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/fs-tree-diff/-/fs-tree-diff-0.4.4.tgz#f6b75d70db22c1f3b05d592270f4ed6c9c2f82dd"
+  dependencies:
+    debug "^2.2.0"
+    fast-ordered-set "^1.0.2"
+
+fs-tree-diff@^0.5.2, fs-tree-diff@^0.5.3, fs-tree-diff@^0.5.4, fs-tree-diff@^0.5.6:
+  version "0.5.6"
+  resolved "https://registry.yarnpkg.com/fs-tree-diff/-/fs-tree-diff-0.5.6.tgz#342665749e8dca406800b672268c8f5073f3e623"
+  dependencies:
+    heimdalljs-logger "^0.1.7"
+    object-assign "^4.1.0"
+    path-posix "^1.0.0"
+    symlink-or-copy "^1.1.8"
+
+fs-vacuum@~1.2.7:
+  version "1.2.10"
+  resolved "https://registry.yarnpkg.com/fs-vacuum/-/fs-vacuum-1.2.10.tgz#b7629bec07a4031a2548fdf99f5ecf1cc8b31e36"
+  dependencies:
+    graceful-fs "^4.1.2"
+    path-is-inside "^1.0.1"
+    rimraf "^2.5.2"
+
+fs-write-stream-atomic@1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.5.tgz#862a4dabdffcafabfc16499458e37310c39925f6"
+  dependencies:
+    graceful-fs "^4.1.2"
+    imurmurhash "^0.1.4"
+
+fs-write-stream-atomic@~1.0.8:
+  version "1.0.10"
+  resolved "https://registry.yarnpkg.com/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.10.tgz#b47df53493ef911df75731e70a9ded0189db40c9"
+  dependencies:
+    graceful-fs "^4.1.2"
+    iferr "^0.1.5"
+    imurmurhash "^0.1.4"
+    readable-stream "1 || 2"
+
+fs.realpath@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f"
+
+fstream-ignore@^1.0.0, fstream-ignore@^1.0.2:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/fstream-ignore/-/fstream-ignore-1.0.5.tgz#9c31dae34767018fe1d249b24dada67d092da105"
+  dependencies:
+    fstream "^1.0.0"
+    inherits "2"
+    minimatch "^3.0.0"
+
+fstream-npm@~1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/fstream-npm/-/fstream-npm-1.0.7.tgz#7ed0d1ac13d7686dd9e1bf6ceb8be273bf6d2f86"
+  dependencies:
+    fstream-ignore "^1.0.0"
+    inherits "2"
+
+fstream@^1.0.0, fstream@^1.0.2, fstream@^1.0.3, fstream@~1.0.8:
+  version "1.0.11"
+  resolved "https://registry.yarnpkg.com/fstream/-/fstream-1.0.11.tgz#5c1fb1f117477114f0632a0eb4b71b3cb0fd3171"
+  dependencies:
+    graceful-fs "^4.1.2"
+    inherits "~2.0.0"
+    mkdirp ">=0.5 0"
+    rimraf "2"
+
+gauge@~1.2.5:
+  version "1.2.7"
+  resolved "https://registry.yarnpkg.com/gauge/-/gauge-1.2.7.tgz#e9cec5483d3d4ee0ef44b60a7d99e4935e136d93"
+  dependencies:
+    ansi "^0.3.0"
+    has-unicode "^2.0.0"
+    lodash.pad "^4.1.0"
+    lodash.padend "^4.1.0"
+    lodash.padstart "^4.1.0"
+
+gauge@~2.7.1:
+  version "2.7.4"
+  resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.4.tgz#2c03405c7538c39d7eb37b317022e325fb018bf7"
+  dependencies:
+    aproba "^1.0.3"
+    console-control-strings "^1.0.0"
+    has-unicode "^2.0.0"
+    object-assign "^4.1.0"
+    signal-exit "^3.0.0"
+    string-width "^1.0.1"
+    strip-ansi "^3.0.1"
+    wide-align "^1.1.0"
+
+generate-function@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/generate-function/-/generate-function-2.0.0.tgz#6858fe7c0969b7d4e9093337647ac79f60dfbe74"
+
+generate-object-property@^1.1.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/generate-object-property/-/generate-object-property-1.2.0.tgz#9c0e1c40308ce804f4783618b937fa88f99d50d0"
+  dependencies:
+    is-property "^1.0.0"
+
+get-caller-file@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-1.0.2.tgz#f702e63127e7e231c160a80c1554acb70d5047e5"
+
+get-stdin@^4.0.1:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-4.0.1.tgz#b968c6b0a04384324902e8bf1a5df32579a450fe"
+
+getpass@^0.1.1:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa"
+  dependencies:
+    assert-plus "^1.0.0"
+
+git-repo-info@^1.0.4, git-repo-info@^1.1.2:
+  version "1.4.1"
+  resolved "https://registry.yarnpkg.com/git-repo-info/-/git-repo-info-1.4.1.tgz#2a072823254aaf62fcf0766007d7b6651bd41943"
+
+git-repo-version@0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/git-repo-version/-/git-repo-version-0.3.0.tgz#c9b97d0d21c4357d669dc1269c2b6a75da6cc0e9"
+  dependencies:
+    git-repo-info "^1.0.4"
+
+git-tools@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/git-tools/-/git-tools-0.1.4.tgz#5e43e59443b8a5dedb39dba663da49e79f943978"
+  dependencies:
+    spawnback "~1.0.0"
+
+github-url-from-git@~1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/github-url-from-git/-/github-url-from-git-1.4.0.tgz#285e6b520819001bde128674704379e4ff03e0de"
+
+github-url-from-username-repo@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/github-url-from-username-repo/-/github-url-from-username-repo-1.0.2.tgz#7dd79330d2abe69c10c2cef79714c97215791dfa"
+
+github@^0.2.3:
+  version "0.2.4"
+  resolved "https://registry.yarnpkg.com/github/-/github-0.2.4.tgz#24fa7f0e13fa11b946af91134c51982a91ce538b"
+  dependencies:
+    mime "^1.2.11"
+
+glob-base@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/glob-base/-/glob-base-0.3.0.tgz#dbb164f6221b1c0b1ccf82aea328b497df0ea3c4"
+  dependencies:
+    glob-parent "^2.0.0"
+    is-glob "^2.0.0"
+
+glob-parent@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-2.0.0.tgz#81383d72db054fcccf5336daa902f182f6edbb28"
+  dependencies:
+    is-glob "^2.0.0"
+
+"glob@3 || 4", glob@^4.3.2:
+  version "4.5.3"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-4.5.3.tgz#c6cb73d3226c1efef04de3c56d012f03377ee15f"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^2.0.1"
+    once "^1.3.0"
+
+glob@7.0.3:
+  version "7.0.3"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-7.0.3.tgz#0aa235931a4a96ac13d60ffac2fb877bd6ed4f58"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "2 || 3"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@^5.0.10, glob@^5.0.15, glob@~5.0.0, glob@~5.0.15:
+  version "5.0.15"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.15.tgz#1bc936b9e02f4a603fcc222ecf7633d30b8b93b1"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "2 || 3"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@^6.0.1:
+  version "6.0.4"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-6.0.4.tgz#0f08860f6a155127b2fadd4f9ce24b1aab6e4d22"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "2 || 3"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@^7.0.4, glob@^7.0.5, glob@^7.1.1:
+  version "7.1.1"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.1.tgz#805211df04faaf1c63a3600306cdf5ade50b2ec8"
+  dependencies:
+    fs.realpath "^1.0.0"
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^3.0.2"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@~4.3.0:
+  version "4.3.5"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-4.3.5.tgz#80fbb08ca540f238acce5d11d1e9bc41e75173d3"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^2.0.1"
+    once "^1.3.0"
+
+globals@^6.4.0:
+  version "6.4.1"
+  resolved "https://registry.yarnpkg.com/globals/-/globals-6.4.1.tgz#8498032b3b6d1cc81eebc5f79690d8fe29fabf4f"
+
+got@^5.0.0:
+  version "5.7.1"
+  resolved "https://registry.yarnpkg.com/got/-/got-5.7.1.tgz#5f81635a61e4a6589f180569ea4e381680a51f35"
+  dependencies:
+    create-error-class "^3.0.1"
+    duplexer2 "^0.1.4"
+    is-redirect "^1.0.0"
+    is-retry-allowed "^1.0.0"
+    is-stream "^1.0.0"
+    lowercase-keys "^1.0.0"
+    node-status-codes "^1.0.0"
+    object-assign "^4.0.1"
+    parse-json "^2.1.0"
+    pinkie-promise "^2.0.0"
+    read-all-stream "^3.0.0"
+    readable-stream "^2.0.5"
+    timed-out "^3.0.0"
+    unzip-response "^1.0.2"
+    url-parse-lax "^1.0.0"
+
+graceful-fs@^3.0.0, graceful-fs@^3.0.1, graceful-fs@^3.0.5:
+  version "3.0.11"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-3.0.11.tgz#7613c778a1afea62f25c630a086d7f3acbbdd818"
+  dependencies:
+    natives "^1.1.0"
+
+graceful-fs@^4.0.0, graceful-fs@^4.1.11, graceful-fs@^4.1.2, graceful-fs@^4.1.3, graceful-fs@^4.1.4, graceful-fs@^4.1.6, graceful-fs@^4.1.9, graceful-fs@~4.1.2, graceful-fs@~4.1.3:
+  version "4.1.11"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.11.tgz#0e8bdfe4d1ddb8854d64e04ea7c00e2a026e5658"
+
+graceful-fs@~2.0.0:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-2.0.3.tgz#7cd2cdb228a4a3f36e95efa6cc142de7d1a136d0"
+
+"graceful-readlink@>= 1.0.0":
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/graceful-readlink/-/graceful-readlink-1.0.1.tgz#4cafad76bc62f02fa039b2f94e9a3dd3a391a725"
+
+growly@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/growly/-/growly-1.3.0.tgz#f10748cbe76af964b7c96c93c6bcc28af120c081"
+
+handlebars@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-2.0.0.tgz#6e9d7f8514a3467fa5e9f82cc158ecfc1d5ac76f"
+  dependencies:
+    optimist "~0.3"
+  optionalDependencies:
+    uglify-js "~2.3"
+
+handlebars@^4.0.4:
+  version "4.0.6"
+  resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-4.0.6.tgz#2ce4484850537f9c97a8026d5399b935c4ed4ed7"
+  dependencies:
+    async "^1.4.0"
+    optimist "^0.6.1"
+    source-map "^0.4.4"
+  optionalDependencies:
+    uglify-js "^2.6"
+
+har-validator@~2.0.6:
+  version "2.0.6"
+  resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-2.0.6.tgz#cdcbc08188265ad119b6a5a7c8ab70eecfb5d27d"
+  dependencies:
+    chalk "^1.1.1"
+    commander "^2.9.0"
+    is-my-json-valid "^2.12.4"
+    pinkie-promise "^2.0.0"
+
+has-ansi@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-0.1.0.tgz#84f265aae8c0e6a88a12d7022894b7568894c62e"
+  dependencies:
+    ansi-regex "^0.2.0"
+
+has-ansi@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91"
+  dependencies:
+    ansi-regex "^2.0.0"
+
+has-binary@0.1.6:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/has-binary/-/has-binary-0.1.6.tgz#25326f39cfa4f616ad8787894e3af2cfbc7b6e10"
+  dependencies:
+    isarray "0.0.1"
+
+has-binary@0.1.7:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/has-binary/-/has-binary-0.1.7.tgz#68e61eb16210c9545a0a5cce06a873912fe1e68c"
+  dependencies:
+    isarray "0.0.1"
+
+has-color@~0.1.0:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/has-color/-/has-color-0.1.7.tgz#67144a5260c34fc3cca677d041daf52fe7b78b2f"
+
+has-cors@1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/has-cors/-/has-cors-1.1.0.tgz#5e474793f7ea9843d1bb99c23eef49ff126fff39"
+
+has-unicode@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9"
+
+hash-for-dep@^1.0.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/hash-for-dep/-/hash-for-dep-1.1.2.tgz#e3347ed92960eb0bb53a2c6c2b70e36d75b7cd0c"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    heimdalljs "^0.2.3"
+    heimdalljs-logger "^0.1.7"
+    resolve "^1.1.6"
+
+hawk@~2.3.0:
+  version "2.3.1"
+  resolved "https://registry.yarnpkg.com/hawk/-/hawk-2.3.1.tgz#1e731ce39447fa1d0f6d707f7bceebec0fd1ec1f"
+  dependencies:
+    boom "2.x.x"
+    cryptiles "2.x.x"
+    hoek "2.x.x"
+    sntp "1.x.x"
+
+hawk@~3.1.0:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/hawk/-/hawk-3.1.3.tgz#078444bd7c1640b0fe540d2c9b73d59678e8e1c4"
+  dependencies:
+    boom "2.x.x"
+    cryptiles "2.x.x"
+    hoek "2.x.x"
+    sntp "1.x.x"
+
+heimdalljs-logger@^0.1.7:
+  version "0.1.9"
+  resolved "https://registry.yarnpkg.com/heimdalljs-logger/-/heimdalljs-logger-0.1.9.tgz#d76ada4e45b7bb6f786fc9c010a68eb2e2faf176"
+  dependencies:
+    debug "^2.2.0"
+    heimdalljs "^0.2.0"
+
+heimdalljs@^0.2.0, heimdalljs@^0.2.1, heimdalljs@^0.2.3:
+  version "0.2.4"
+  resolved "https://registry.yarnpkg.com/heimdalljs/-/heimdalljs-0.2.4.tgz#34ead16eab422c94803065d33abeba1f7b24a910"
+  dependencies:
+    rsvp "~3.2.1"
+
+hoek@2.x.x:
+  version "2.16.3"
+  resolved "https://registry.yarnpkg.com/hoek/-/hoek-2.16.3.tgz#20bb7403d3cea398e91dc4710a8ff1b8274a25ed"
+
+home-or-tmp@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/home-or-tmp/-/home-or-tmp-1.0.0.tgz#4b9f1e40800c3e50c6c27f781676afcce71f3985"
+  dependencies:
+    os-tmpdir "^1.0.1"
+    user-home "^1.1.1"
+
+hosted-git-info@^2.1.4, hosted-git-info@~2.1.4:
+  version "2.1.5"
+  resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.1.5.tgz#0ba81d90da2e25ab34a332e6ec77936e1598118b"
+
+htmlparser2@3.8.x:
+  version "3.8.3"
+  resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-3.8.3.tgz#996c28b191516a8be86501a7d79757e5c70c1068"
+  dependencies:
+    domelementtype "1"
+    domhandler "2.3"
+    domutils "1.5"
+    entities "1.0"
+    readable-stream "1.1"
+
+http-errors@~1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.3.1.tgz#197e22cdebd4198585e8694ef6786197b91ed942"
+  dependencies:
+    inherits "~2.0.1"
+    statuses "1"
+
+http-errors@~1.6.1:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.6.1.tgz#5f8b8ed98aca545656bf572997387f904a722257"
+  dependencies:
+    depd "1.1.0"
+    inherits "2.0.3"
+    setprototypeof "1.0.3"
+    statuses ">= 1.3.1 < 2"
+
+http-proxy@^1.13.1, http-proxy@^1.9.0:
+  version "1.16.2"
+  resolved "https://registry.yarnpkg.com/http-proxy/-/http-proxy-1.16.2.tgz#06dff292952bf64dbe8471fa9df73066d4f37742"
+  dependencies:
+    eventemitter3 "1.x.x"
+    requires-port "1.x.x"
+
+http-signature@~0.10.0:
+  version "0.10.1"
+  resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-0.10.1.tgz#4fbdac132559aa8323121e540779c0a012b27e66"
+  dependencies:
+    asn1 "0.1.11"
+    assert-plus "^0.1.5"
+    ctype "0.5.3"
+
+http-signature@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.1.1.tgz#df72e267066cd0ac67fb76adf8e134a8fbcf91bf"
+  dependencies:
+    assert-plus "^0.2.0"
+    jsprim "^1.2.2"
+    sshpk "^1.7.0"
+
+iconv-lite@0.4.13, iconv-lite@^0.4.5:
+  version "0.4.13"
+  resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.13.tgz#1f88aba4ab0b1508e8312acc39345f36e992e2f2"
+
+iferr@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/iferr/-/iferr-0.1.5.tgz#c60eed69e6d8fdb6b3104a1fcbca1c192dc5b501"
+
+imurmurhash@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea"
+
+indexof@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/indexof/-/indexof-0.0.1.tgz#82dc336d232b9062179d05ab3293a66059fd435d"
+
+inflection@^1.7.0, inflection@^1.8.0:
+  version "1.12.0"
+  resolved "https://registry.yarnpkg.com/inflection/-/inflection-1.12.0.tgz#a200935656d6f5f6bc4dc7502e1aecb703228416"
+
+inflight@^1.0.4, inflight@~1.0.4:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9"
+  dependencies:
+    once "^1.3.0"
+    wrappy "1"
+
+inherits@2, inherits@2.0.3, inherits@^2.0.1, inherits@^2.0.3, inherits@~2.0.0, inherits@~2.0.1:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de"
+
+ini@^1.3.4, ini@~1.3.0, ini@~1.3.4:
+  version "1.3.4"
+  resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.4.tgz#0537cb79daf59b59a1a517dff706c86ec039162e"
+
+init-package-json@~1.9.3:
+  version "1.9.6"
+  resolved "https://registry.yarnpkg.com/init-package-json/-/init-package-json-1.9.6.tgz#789fc2b74466a4952b9ea77c0575bc78ebd60a61"
+  dependencies:
+    glob "^7.1.1"
+    npm-package-arg "^4.0.0 || ^5.0.0"
+    promzard "^0.3.0"
+    read "~1.0.1"
+    read-package-json "1 || 2"
+    semver "2.x || 3.x || 4 || 5"
+    validate-npm-package-license "^3.0.1"
+    validate-npm-package-name "^3.0.0"
+
+inline-source-map-comment@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/inline-source-map-comment/-/inline-source-map-comment-1.0.5.tgz#50a8a44c2a790dfac441b5c94eccd5462635faf6"
+  dependencies:
+    chalk "^1.0.0"
+    get-stdin "^4.0.1"
+    minimist "^1.1.1"
+    sum-up "^1.0.1"
+    xtend "^4.0.0"
+
+inquirer@0.10.0, inquirer@^0.10.0:
+  version "0.10.0"
+  resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-0.10.0.tgz#48cd3e23f8d989a52d47dc5e10ec75324387e908"
+  dependencies:
+    ansi-escapes "^1.1.0"
+    ansi-regex "^2.0.0"
+    chalk "^1.0.0"
+    cli-cursor "^1.0.1"
+    cli-width "^1.0.1"
+    figures "^1.3.5"
+    lodash "^3.3.1"
+    readline2 "^1.0.1"
+    run-async "^0.1.0"
+    rx-lite "^3.1.2"
+    strip-ansi "^3.0.0"
+    through "^2.3.6"
+
+inquirer@^0.12.0:
+  version "0.12.0"
+  resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-0.12.0.tgz#1ef2bfd63504df0bc75785fff8c2c41df12f077e"
+  dependencies:
+    ansi-escapes "^1.1.0"
+    ansi-regex "^2.0.0"
+    chalk "^1.0.0"
+    cli-cursor "^1.0.1"
+    cli-width "^2.0.0"
+    figures "^1.3.5"
+    lodash "^4.3.0"
+    readline2 "^1.0.1"
+    run-async "^0.1.0"
+    rx-lite "^3.1.2"
+    string-width "^1.0.1"
+    strip-ansi "^3.0.0"
+    through "^2.3.6"
+
+insight@^0.7.0:
+  version "0.7.0"
+  resolved "https://registry.yarnpkg.com/insight/-/insight-0.7.0.tgz#061f9189835bd38a97a60c2b76ea0c6b30099ff6"
+  dependencies:
+    async "^1.4.2"
+    chalk "^1.0.0"
+    configstore "^1.0.0"
+    inquirer "^0.10.0"
+    lodash.debounce "^3.0.1"
+    object-assign "^4.0.1"
+    os-name "^1.0.0"
+    request "^2.40.0"
+    tough-cookie "^2.0.0"
+
+intersect@~0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/intersect/-/intersect-0.0.3.tgz#c1a4a5e5eac6ede4af7504cc07e0ada7bc9f4920"
+
+invert-kv@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/invert-kv/-/invert-kv-1.0.0.tgz#104a8e4aaca6d3d8cd157a8ef8bfab2d7a3ffdb6"
+
+ipaddr.js@1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.3.0.tgz#1e03a52fdad83a8bbb2b25cbf4998b4cffcd3dec"
+
+is-arrayish@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d"
+
+is-buffer@^1.1.5:
+  version "1.1.5"
+  resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.5.tgz#1f3b26ef613b214b88cbca23cc6c01d87961eecc"
+
+is-builtin-module@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-builtin-module/-/is-builtin-module-1.0.0.tgz#540572d34f7ac3119f8f76c30cbc1b1e037affbe"
+  dependencies:
+    builtin-modules "^1.0.0"
+
+is-dotfile@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-dotfile/-/is-dotfile-1.0.2.tgz#2c132383f39199f8edc268ca01b9b007d205cc4d"
+
+is-equal-shallow@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/is-equal-shallow/-/is-equal-shallow-0.1.3.tgz#2238098fc221de0bcfa5d9eac4c45d638aa1c534"
+  dependencies:
+    is-primitive "^2.0.0"
+
+is-extendable@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89"
+
+is-extglob@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-1.0.0.tgz#ac468177c4943405a092fc8f29760c6ffc6206c0"
+
+is-finite@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-finite/-/is-finite-1.0.2.tgz#cc6677695602be550ef11e8b4aa6305342b6d0aa"
+  dependencies:
+    number-is-nan "^1.0.0"
+
+is-fullwidth-code-point@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb"
+  dependencies:
+    number-is-nan "^1.0.0"
+
+is-git-url@^0.2.0:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/is-git-url/-/is-git-url-0.2.3.tgz#445200d6fbd6da028fb5e01440d9afc93f3ccb64"
+
+is-glob@^2.0.0, is-glob@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-2.0.1.tgz#d096f926a3ded5600f3fdfd91198cb0888c2d863"
+  dependencies:
+    is-extglob "^1.0.0"
+
+is-integer@^1.0.4:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/is-integer/-/is-integer-1.0.6.tgz#5273819fada880d123e1ac00a938e7172dd8d95e"
+  dependencies:
+    is-finite "^1.0.0"
+
+is-my-json-valid@^2.12.4:
+  version "2.16.0"
+  resolved "https://registry.yarnpkg.com/is-my-json-valid/-/is-my-json-valid-2.16.0.tgz#f079dd9bfdae65ee2038aae8acbc86ab109e3693"
+  dependencies:
+    generate-function "^2.0.0"
+    generate-object-property "^1.1.0"
+    jsonpointer "^4.0.0"
+    xtend "^4.0.0"
+
+is-npm@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-npm/-/is-npm-1.0.0.tgz#f2fb63a65e4905b406c86072765a1a4dc793b9f4"
+
+is-number@^2.0.2, is-number@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/is-number/-/is-number-2.1.0.tgz#01fcbbb393463a548f2f466cce16dece49db908f"
+  dependencies:
+    kind-of "^3.0.2"
+
+is-obj@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/is-obj/-/is-obj-1.0.1.tgz#3e4729ac1f5fde025cd7d83a896dab9f4f67db0f"
+
+is-posix-bracket@^0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/is-posix-bracket/-/is-posix-bracket-0.1.1.tgz#3334dc79774368e92f016e6fbc0a88f5cd6e6bc4"
+
+is-primitive@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/is-primitive/-/is-primitive-2.0.0.tgz#207bab91638499c07b2adf240a41a87210034575"
+
+is-property@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-property/-/is-property-1.0.2.tgz#57fe1c4e48474edd65b09911f26b1cd4095dda84"
+
+is-redirect@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-redirect/-/is-redirect-1.0.0.tgz#1d03dded53bd8db0f30c26e4f95d36fc7c87dc24"
+
+is-retry-allowed@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/is-retry-allowed/-/is-retry-allowed-1.1.0.tgz#11a060568b67339444033d0125a61a20d564fb34"
+
+is-root@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-root/-/is-root-1.0.0.tgz#07b6c233bc394cd9d02ba15c966bd6660d6342d5"
+
+is-stream@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-1.1.0.tgz#12d4a3dd4e68e0b79ceb8dbc84173ae80d91ca44"
+
+is-type@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/is-type/-/is-type-0.0.1.tgz#f651d85c365d44955d14a51d8d7061f3f6b4779c"
+  dependencies:
+    core-util-is "~1.0.0"
+
+is-typedarray@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a"
+
+is-utf8@^0.2.0:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/is-utf8/-/is-utf8-0.2.1.tgz#4b0da1442104d1b336340e80797e865cf39f7d72"
+
+isarray@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf"
+
+isarray@1.0.0, isarray@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11"
+
+isbinaryfile@^2.0.3:
+  version "2.0.4"
+  resolved "https://registry.yarnpkg.com/isbinaryfile/-/isbinaryfile-2.0.4.tgz#d23592e6a6f093efb84c2e6152056be294e414a1"
+
+isexe@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10"
+
+isobject@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/isobject/-/isobject-2.1.0.tgz#f065561096a3f1da2ef46272f815c840d87e0c89"
+  dependencies:
+    isarray "1.0.0"
+
+isstream@~0.1.1, isstream@~0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a"
+
+istextorbinary@2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/istextorbinary/-/istextorbinary-2.1.0.tgz#dbed2a6f51be2f7475b68f89465811141b758874"
+  dependencies:
+    binaryextensions "1 || 2"
+    editions "^1.1.1"
+    textextensions "1 || 2"
+
+jju@^1.1.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/jju/-/jju-1.3.0.tgz#dadd9ef01924bc728b03f2f7979bdbd62f7a2aaa"
+
+jodid25519@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/jodid25519/-/jodid25519-1.0.2.tgz#06d4912255093419477d425633606e0e90782967"
+  dependencies:
+    jsbn "~0.1.0"
+
+js-tokens@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-1.0.1.tgz#cc435a5c8b94ad15acb7983140fc80182c89aeae"
+
+js-yaml@^3.1.0, js-yaml@^3.2.5, js-yaml@^3.2.7:
+  version "3.8.3"
+  resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.8.3.tgz#33a05ec481c850c8875929166fe1beb61c728766"
+  dependencies:
+    argparse "^1.0.7"
+    esprima "^3.1.1"
+
+jsbn@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513"
+
+jsesc@~0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-0.5.0.tgz#e7dee66e35d6fc16f710fe91d5cf69f70f08911d"
+
+jshint@^2.7.0:
+  version "2.9.4"
+  resolved "https://registry.yarnpkg.com/jshint/-/jshint-2.9.4.tgz#5e3ba97848d5290273db514aee47fe24cf592934"
+  dependencies:
+    cli "~1.0.0"
+    console-browserify "1.1.x"
+    exit "0.1.x"
+    htmlparser2 "3.8.x"
+    lodash "3.7.x"
+    minimatch "~3.0.2"
+    shelljs "0.3.x"
+    strip-json-comments "1.0.x"
+
+json-parse-helpfulerror@^1.0.2:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/json-parse-helpfulerror/-/json-parse-helpfulerror-1.0.3.tgz#13f14ce02eed4e981297b64eb9e3b932e2dd13dc"
+  dependencies:
+    jju "^1.1.0"
+
+json-schema@0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.2.3.tgz#b480c892e59a2f05954ce727bd3f2a4e882f9e13"
+
+json-stable-stringify@^1.0.0, json-stable-stringify@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/json-stable-stringify/-/json-stable-stringify-1.0.1.tgz#9a759d39c5f2ff503fd5300646ed445f88c4f9af"
+  dependencies:
+    jsonify "~0.0.0"
+
+json-stringify-safe@~5.0.0, json-stringify-safe@~5.0.1:
+  version "5.0.1"
+  resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb"
+
+json3@3.3.2:
+  version "3.3.2"
+  resolved "https://registry.yarnpkg.com/json3/-/json3-3.3.2.tgz#3c0434743df93e2f5c42aee7b19bcb483575f4e1"
+
+json5@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/json5/-/json5-0.4.0.tgz#054352e4c4c80c86c0923877d449de176a732c8d"
+
+jsonfile@^2.0.0, jsonfile@^2.1.0:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-2.4.0.tgz#3736a2b428b87bbda0cc83b53fa3d633a35c2ae8"
+  optionalDependencies:
+    graceful-fs "^4.1.6"
+
+jsonify@~0.0.0:
+  version "0.0.0"
+  resolved "https://registry.yarnpkg.com/jsonify/-/jsonify-0.0.0.tgz#2c74b6ee41d93ca51b7b5aaee8f503631d252a73"
+
+jsonpointer@^4.0.0:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/jsonpointer/-/jsonpointer-4.0.1.tgz#4fd92cb34e0e9db3c89c8622ecf51f9b978c6cb9"
+
+jsprim@^1.2.2:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.0.tgz#a3b87e40298d8c380552d8cc7628a0bb95a22918"
+  dependencies:
+    assert-plus "1.0.0"
+    extsprintf "1.0.2"
+    json-schema "0.2.3"
+    verror "1.3.6"
+
+junk@^1.0.0:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/junk/-/junk-1.0.3.tgz#87be63488649cbdca6f53ab39bec9ccd2347f592"
+
+kind-of@^3.0.2:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.2.0.tgz#b58abe4d5c044ad33726a8c1525b48cf891bff07"
+  dependencies:
+    is-buffer "^1.1.5"
+
+klassy@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/klassy/-/klassy-0.1.3.tgz#c31d5756d583197d75f582b6e692872be497067f"
+
+klaw@^1.0.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/klaw/-/klaw-1.3.1.tgz#4088433b46b3b1ba259d78785d8e96f73ba02439"
+  optionalDependencies:
+    graceful-fs "^4.1.9"
+
+latest-version@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/latest-version/-/latest-version-2.0.0.tgz#56f8d6139620847b8017f8f1f4d78e211324168b"
+  dependencies:
+    package-json "^2.0.0"
+
+lazy-cache@^1.0.3:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/lazy-cache/-/lazy-cache-1.0.4.tgz#a1d78fc3a50474cb80845d3b3b6e1da49a446e8e"
+
+lcid@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/lcid/-/lcid-1.0.0.tgz#308accafa0bc483a3867b4b6f2b9506251d1b835"
+  dependencies:
+    invert-kv "^1.0.0"
+
+leek@0.0.21:
+  version "0.0.21"
+  resolved "https://registry.yarnpkg.com/leek/-/leek-0.0.21.tgz#09804bf70f8aefbba745f5d56d2a4debf22711ff"
+  dependencies:
+    debug "^2.1.0"
+    lodash.assign "^3.2.0"
+    request "^2.27.0"
+    rsvp "^3.0.21"
+
+leven@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/leven/-/leven-1.0.2.tgz#9144b6eebca5f1d0680169f1a6770dcea60b75c3"
+
+linkify-it@~1.2.0:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/linkify-it/-/linkify-it-1.2.4.tgz#0773526c317c8fd13bd534ee1d180ff88abf881a"
+  dependencies:
+    uc.micro "^1.0.1"
+
+livereload-js@^2.2.0:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/livereload-js/-/livereload-js-2.2.2.tgz#6c87257e648ab475bc24ea257457edcc1f8d0bc2"
+
+loader.js@^4.0.1:
+  version "4.3.0"
+  resolved "https://registry.yarnpkg.com/loader.js/-/loader.js-4.3.0.tgz#736c13eb8afdf75abd6c2d7b4f7fd40e1105a71f"
+
+lockfile@^1.0.0, lockfile@~1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/lockfile/-/lockfile-1.0.3.tgz#2638fc39a0331e9cac1a04b71799931c9c50df79"
+
+lodash-node@^2.4.1:
+  version "2.4.1"
+  resolved "https://registry.yarnpkg.com/lodash-node/-/lodash-node-2.4.1.tgz#ea82f7b100c733d1a42af76801e506105e2a80ec"
+
+lodash._arraycopy@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/lodash._arraycopy/-/lodash._arraycopy-3.0.0.tgz#76e7b7c1f1fb92547374878a562ed06a3e50f6e1"
+
+lodash._arrayeach@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/lodash._arrayeach/-/lodash._arrayeach-3.0.0.tgz#bab156b2a90d3f1bbd5c653403349e5e5933ef9e"
+
+lodash._baseassign@^3.0.0:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/lodash._baseassign/-/lodash._baseassign-3.2.0.tgz#8c38a099500f215ad09e59f1722fd0c52bfe0a4e"
+  dependencies:
+    lodash._basecopy "^3.0.0"
+    lodash.keys "^3.0.0"
+
+lodash._basecopy@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/lodash._basecopy/-/lodash._basecopy-3.0.1.tgz#8da0e6a876cf344c0ad8a54882111dd3c5c7ca36"
+
+lodash._baseflatten@^3.0.0:
+  version "3.1.4"
+  resolved "https://registry.yarnpkg.com/lodash._baseflatten/-/lodash._baseflatten-3.1.4.tgz#0770ff80131af6e34f3b511796a7ba5214e65ff7"
+  dependencies:
+    lodash.isarguments "^3.0.0"
+    lodash.isarray "^3.0.0"
+
+lodash._basefor@^3.0.0:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/lodash._basefor/-/lodash._basefor-3.0.3.tgz#7550b4e9218ef09fad24343b612021c79b4c20c2"
+
+lodash._bindcallback@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/lodash._bindcallback/-/lodash._bindcallback-3.0.1.tgz#e531c27644cf8b57a99e17ed95b35c748789392e"
+
+lodash._createassigner@^3.0.0:
+  version "3.1.1"
+  resolved "https://registry.yarnpkg.com/lodash._createassigner/-/lodash._createassigner-3.1.1.tgz#838a5bae2fdaca63ac22dee8e19fa4e6d6970b11"
+  dependencies:
+    lodash._bindcallback "^3.0.0"
+    lodash._isiterateecall "^3.0.0"
+    lodash.restparam "^3.0.0"
+
+lodash._getnative@^3.0.0:
+  version "3.9.1"
+  resolved "https://registry.yarnpkg.com/lodash._getnative/-/lodash._getnative-3.9.1.tgz#570bc7dede46d61cdcde687d65d3eecbaa3aaff5"
+
+lodash._isiterateecall@^3.0.0:
+  version "3.0.9"
+  resolved "https://registry.yarnpkg.com/lodash._isiterateecall/-/lodash._isiterateecall-3.0.9.tgz#5203ad7ba425fae842460e696db9cf3e6aac057c"
+
+lodash.assign@^3.2.0:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/lodash.assign/-/lodash.assign-3.2.0.tgz#3ce9f0234b4b2223e296b8fa0ac1fee8ebca64fa"
+  dependencies:
+    lodash._baseassign "^3.0.0"
+    lodash._createassigner "^3.0.0"
+    lodash.keys "^3.0.0"
+
+lodash.assignin@^4.1.0:
+  version "4.2.0"
+  resolved "https://registry.yarnpkg.com/lodash.assignin/-/lodash.assignin-4.2.0.tgz#ba8df5fb841eb0a3e8044232b0e263a8dc6a28a2"
+
+lodash.clonedeep@^4.4.1:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz#e23f3f9c4f8fbdde872529c1071857a086e5ccef"
+
+lodash.debounce@^3.0.1, lodash.debounce@^3.1.1:
+  version "3.1.1"
+  resolved "https://registry.yarnpkg.com/lodash.debounce/-/lodash.debounce-3.1.1.tgz#812211c378a94cc29d5aa4e3346cf0bfce3a7df5"
+  dependencies:
+    lodash._getnative "^3.0.0"
+
+lodash.find@^4.5.1:
+  version "4.6.0"
+  resolved "https://registry.yarnpkg.com/lodash.find/-/lodash.find-4.6.0.tgz#cb0704d47ab71789ffa0de8b97dd926fb88b13b1"
+
+lodash.flatten@^3.0.2:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/lodash.flatten/-/lodash.flatten-3.0.2.tgz#de1cf57758f8f4479319d35c3e9cc60c4501938c"
+  dependencies:
+    lodash._baseflatten "^3.0.0"
+    lodash._isiterateecall "^3.0.0"
+
+lodash.isarguments@^3.0.0:
+  version "3.1.0"
+  resolved "https://registry.yarnpkg.com/lodash.isarguments/-/lodash.isarguments-3.1.0.tgz#2f573d85c6a24289ff00663b491c1d338ff3458a"
+
+lodash.isarray@^3.0.0:
+  version "3.0.4"
+  resolved "https://registry.yarnpkg.com/lodash.isarray/-/lodash.isarray-3.0.4.tgz#79e4eb88c36a8122af86f844aa9bcd851b5fbb55"
+
+lodash.isplainobject@^3.0.0:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/lodash.isplainobject/-/lodash.isplainobject-3.2.0.tgz#9a8238ae16b200432960cd7346512d0123fbf4c5"
+  dependencies:
+    lodash._basefor "^3.0.0"
+    lodash.isarguments "^3.0.0"
+    lodash.keysin "^3.0.0"
+
+lodash.istypedarray@^3.0.0:
+  version "3.0.6"
+  resolved "https://registry.yarnpkg.com/lodash.istypedarray/-/lodash.istypedarray-3.0.6.tgz#c9a477498607501d8e8494d283b87c39281cef62"
+
+lodash.keys@^3.0.0:
+  version "3.1.2"
+  resolved "https://registry.yarnpkg.com/lodash.keys/-/lodash.keys-3.1.2.tgz#4dbc0472b156be50a0b286855d1bd0b0c656098a"
+  dependencies:
+    lodash._getnative "^3.0.0"
+    lodash.isarguments "^3.0.0"
+    lodash.isarray "^3.0.0"
+
+lodash.keysin@^3.0.0:
+  version "3.0.8"
+  resolved "https://registry.yarnpkg.com/lodash.keysin/-/lodash.keysin-3.0.8.tgz#22c4493ebbedb1427962a54b445b2c8a767fb47f"
+  dependencies:
+    lodash.isarguments "^3.0.0"
+    lodash.isarray "^3.0.0"
+
+lodash.merge@^3.0.2, lodash.merge@^3.3.2:
+  version "3.3.2"
+  resolved "https://registry.yarnpkg.com/lodash.merge/-/lodash.merge-3.3.2.tgz#0d90d93ed637b1878437bb3e21601260d7afe994"
+  dependencies:
+    lodash._arraycopy "^3.0.0"
+    lodash._arrayeach "^3.0.0"
+    lodash._createassigner "^3.0.0"
+    lodash._getnative "^3.0.0"
+    lodash.isarguments "^3.0.0"
+    lodash.isarray "^3.0.0"
+    lodash.isplainobject "^3.0.0"
+    lodash.istypedarray "^3.0.0"
+    lodash.keys "^3.0.0"
+    lodash.keysin "^3.0.0"
+    lodash.toplainobject "^3.0.0"
+
+lodash.merge@^4.3.0, lodash.merge@^4.5.1:
+  version "4.6.0"
+  resolved "https://registry.yarnpkg.com/lodash.merge/-/lodash.merge-4.6.0.tgz#69884ba144ac33fe699737a6086deffadd0f89c5"
+
+lodash.omit@^4.1.0:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/lodash.omit/-/lodash.omit-4.5.0.tgz#6eb19ae5a1ee1dd9df0b969e66ce0b7fa30b5e60"
+
+lodash.pad@^4.1.0:
+  version "4.5.1"
+  resolved "https://registry.yarnpkg.com/lodash.pad/-/lodash.pad-4.5.1.tgz#4330949a833a7c8da22cc20f6a26c4d59debba70"
+
+lodash.padend@^4.1.0:
+  version "4.6.1"
+  resolved "https://registry.yarnpkg.com/lodash.padend/-/lodash.padend-4.6.1.tgz#53ccba047d06e158d311f45da625f4e49e6f166e"
+
+lodash.padstart@^4.1.0:
+  version "4.6.1"
+  resolved "https://registry.yarnpkg.com/lodash.padstart/-/lodash.padstart-4.6.1.tgz#d2e3eebff0d9d39ad50f5cbd1b52a7bce6bb611b"
+
+lodash.restparam@^3.0.0:
+  version "3.6.1"
+  resolved "https://registry.yarnpkg.com/lodash.restparam/-/lodash.restparam-3.6.1.tgz#936a4e309ef330a7645ed4145986c85ae5b20805"
+
+lodash.toplainobject@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/lodash.toplainobject/-/lodash.toplainobject-3.0.0.tgz#28790ad942d293d78aa663a07ecf7f52ca04198d"
+  dependencies:
+    lodash._basecopy "^3.0.0"
+    lodash.keysin "^3.0.0"
+
+lodash.uniq@^4.2.0:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773"
+
+lodash.uniqby@^4.7.0:
+  version "4.7.0"
+  resolved "https://registry.yarnpkg.com/lodash.uniqby/-/lodash.uniqby-4.7.0.tgz#d99c07a669e9e6d24e1362dfe266c67616af1302"
+
+lodash@3.7.x:
+  version "3.7.0"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-3.7.0.tgz#3678bd8ab995057c07ade836ed2ef087da811d45"
+
+lodash@^3.10.0, lodash@^3.3.1, lodash@^3.9.3:
+  version "3.10.1"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-3.10.1.tgz#5bf45e8e49ba4189e17d482789dfd15bd140b7b6"
+
+lodash@^4.14.0, lodash@^4.3.0, lodash@^4.6.1:
+  version "4.17.4"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.4.tgz#78203a4d1c328ae1d86dca6460e369b57f4055ae"
+
+lodash@~2.4.1:
+  version "2.4.2"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-2.4.2.tgz#fadd834b9683073da179b3eae6d9c0d15053f73e"
+
+longest@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/longest/-/longest-1.0.1.tgz#30a0b2da38f73770e8294a0d22e6625ed77d0097"
+
+lowercase-keys@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-1.0.0.tgz#4e3366b39e7f5457e35f1324bdf6f88d0bfc7306"
+
+lru-cache@2, lru-cache@^2.3.0, lru-cache@^2.5.0:
+  version "2.7.3"
+  resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-2.7.3.tgz#6d4524e8b955f95d4f5b58851ce21dd72fb4e952"
+
+lru-cache@^4.0.1:
+  version "4.0.2"
+  resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-4.0.2.tgz#1d17679c069cda5d040991a09dbc2c0db377e55e"
+  dependencies:
+    pseudomap "^1.0.1"
+    yallist "^2.0.0"
+
+lru-cache@~3.2.0:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-3.2.0.tgz#71789b3b7f5399bec8565dda38aa30d2a097efee"
+  dependencies:
+    pseudomap "^1.0.1"
+
+make-array@^0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/make-array/-/make-array-0.1.2.tgz#335e36ebb0c5a43154d21213a1ecaeae2a1bb3ef"
+
+makeerror@1.0.x:
+  version "1.0.11"
+  resolved "https://registry.yarnpkg.com/makeerror/-/makeerror-1.0.11.tgz#e01a5c9109f2af79660e4e8b9587790184f5a96c"
+  dependencies:
+    tmpl "1.0.x"
+
+markdown-it-terminal@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/markdown-it-terminal/-/markdown-it-terminal-0.0.3.tgz#c77a8533c2170b46d2a907a3c3452d4d7f4aa5db"
+  dependencies:
+    ansi-styles "^2.1.0"
+    cardinal "^0.5.0"
+    cli-table "^0.3.1"
+    lodash.merge "^3.3.2"
+    markdown-it "^4.4.0"
+
+markdown-it@4.3.0:
+  version "4.3.0"
+  resolved "https://registry.yarnpkg.com/markdown-it/-/markdown-it-4.3.0.tgz#0ee2b0724079d186b3f04b7345ce395ae47cc474"
+  dependencies:
+    argparse "~1.0.2"
+    entities "~1.1.1"
+    linkify-it "~1.2.0"
+    mdurl "~1.0.0"
+    uc.micro "^1.0.0"
+
+markdown-it@^4.4.0:
+  version "4.4.0"
+  resolved "https://registry.yarnpkg.com/markdown-it/-/markdown-it-4.4.0.tgz#3df373dbea587a9a7fef3e56311b68908f75c414"
+  dependencies:
+    argparse "~1.0.2"
+    entities "~1.1.1"
+    linkify-it "~1.2.0"
+    mdurl "~1.0.0"
+    uc.micro "^1.0.0"
+
+matcher-collection@^1.0.0, matcher-collection@^1.0.1:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/matcher-collection/-/matcher-collection-1.0.4.tgz#2f66ae0869996f29e43d0b62c83dd1d43e581755"
+  dependencies:
+    minimatch "^3.0.2"
+
+md5-hex@^1.0.2:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/md5-hex/-/md5-hex-1.3.0.tgz#d2c4afe983c4370662179b8cad145219135046c4"
+  dependencies:
+    md5-o-matic "^0.1.1"
+
+md5-o-matic@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/md5-o-matic/-/md5-o-matic-0.1.1.tgz#822bccd65e117c514fab176b25945d54100a03c3"
+
+mdurl@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/mdurl/-/mdurl-1.0.1.tgz#fe85b2ec75a59037f2adfec100fd6c601761152e"
+
+media-typer@0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748"
+
+memory-streams@^0.1.0:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/memory-streams/-/memory-streams-0.1.2.tgz#273ff777ab60fec599b116355255282cca2c50c2"
+  dependencies:
+    readable-stream "~1.0.2"
+
+merge-defaults@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/merge-defaults/-/merge-defaults-0.2.1.tgz#dd42248eb96bb6a51521724321c72ff9583dde80"
+  dependencies:
+    lodash "~2.4.1"
+
+merge-descriptors@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61"
+
+merge@^1.1.3, merge@^1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/merge/-/merge-1.2.0.tgz#7531e39d4949c281a66b8c5a6e0265e8b05894da"
+
+methods@~1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee"
+
+micromatch@^2.1.5:
+  version "2.3.11"
+  resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-2.3.11.tgz#86677c97d1720b363431d04d0d15293bd38c1565"
+  dependencies:
+    arr-diff "^2.0.0"
+    array-unique "^0.2.1"
+    braces "^1.8.2"
+    expand-brackets "^0.1.4"
+    extglob "^0.3.1"
+    filename-regex "^2.0.0"
+    is-extglob "^1.0.0"
+    is-glob "^2.0.1"
+    kind-of "^3.0.2"
+    normalize-path "^2.0.1"
+    object.omit "^2.0.0"
+    parse-glob "^3.0.4"
+    regex-cache "^0.4.2"
+
+"mime-db@>= 1.27.0 < 2", mime-db@~1.27.0:
+  version "1.27.0"
+  resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.27.0.tgz#820f572296bbd20ec25ed55e5b5de869e5436eb1"
+
+mime-db@~1.12.0:
+  version "1.12.0"
+  resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.12.0.tgz#3d0c63180f458eb10d325aaa37d7c58ae312e9d7"
+
+mime-types@^2.1.11, mime-types@~2.1.11, mime-types@~2.1.15, mime-types@~2.1.7:
+  version "2.1.15"
+  resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.15.tgz#a4ebf5064094569237b8cf70046776d09fc92aed"
+  dependencies:
+    mime-db "~1.27.0"
+
+mime-types@~2.0.1, mime-types@~2.0.3:
+  version "2.0.14"
+  resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.0.14.tgz#310e159db23e077f8bb22b748dabfa4957140aa6"
+  dependencies:
+    mime-db "~1.12.0"
+
+mime@1.3.4, mime@^1.2.11:
+  version "1.3.4"
+  resolved "https://registry.yarnpkg.com/mime/-/mime-1.3.4.tgz#115f9e3b6b3daf2959983cb38f149a2d40eb5d53"
+
+minimatch@1:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-1.0.0.tgz#e0dd2120b49e1b724ce8d714c520822a9438576d"
+  dependencies:
+    lru-cache "2"
+    sigmund "~1.0.0"
+
+"minimatch@2 || 3", minimatch@^3.0.0, minimatch@^3.0.2, minimatch@~3.0.0, minimatch@~3.0.2:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.3.tgz#2a4e4090b96b2db06a9d7df01055a62a77c9b774"
+  dependencies:
+    brace-expansion "^1.0.0"
+
+minimatch@^2.0.1, minimatch@^2.0.3:
+  version "2.0.10"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-2.0.10.tgz#8d087c39c6b38c001b97fca7ce6d0e1e80afbac7"
+  dependencies:
+    brace-expansion "^1.0.0"
+
+minimist@0.0.8, minimist@~0.0.1:
+  version "0.0.8"
+  resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d"
+
+minimist@^1.1.0, minimist@^1.1.1, minimist@^1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.0.tgz#a35008b20f41383eec1fb914f4cd5df79a264284"
+
+mkdirp@0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.0.tgz#1d73076a6df986cd9344e15e71fcc05a4c9abf12"
+  dependencies:
+    minimist "0.0.8"
+
+mkdirp@0.5.x, "mkdirp@>=0.5 0", mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@~0.5.0, mkdirp@~0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903"
+  dependencies:
+    minimist "0.0.8"
+
+mkdirp@^0.3.5:
+  version "0.3.5"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.3.5.tgz#de3e5f8961c88c787ee1368df849ac4413eca8d7"
+
+mkdirp@~0.4.0:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.4.2.tgz#427c8c18ece398b932f6f666f4e1e5b7740e78c8"
+  dependencies:
+    minimist "0.0.8"
+
+mkpath@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/mkpath/-/mkpath-0.1.0.tgz#7554a6f8d871834cc97b5462b122c4c124d6de91"
+
+mktemp@~0.3.4:
+  version "0.3.5"
+  resolved "https://registry.yarnpkg.com/mktemp/-/mktemp-0.3.5.tgz#a1504c706d0d2b198c6a0eb645f7fdaf8181f7de"
+
+moment-timezone@^0.3.0:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/moment-timezone/-/moment-timezone-0.3.1.tgz#3ef47856b02d53b718a10a5ec2023aa299e07bf5"
+  dependencies:
+    moment ">= 2.6.0"
+
+"moment@>= 2.6.0":
+  version "2.18.1"
+  resolved "https://registry.yarnpkg.com/moment/-/moment-2.18.1.tgz#c36193dd3ce1c2eed2adb7c802dbbc77a81b1c0f"
+
+morgan@^1.5.2:
+  version "1.8.1"
+  resolved "https://registry.yarnpkg.com/morgan/-/morgan-1.8.1.tgz#f93023d3887bd27b78dfd6023cea7892ee27a4b1"
+  dependencies:
+    basic-auth "~1.1.0"
+    debug "2.6.1"
+    depd "~1.1.0"
+    on-finished "~2.3.0"
+    on-headers "~1.0.1"
+
+mout@^0.11.0:
+  version "0.11.1"
+  resolved "https://registry.yarnpkg.com/mout/-/mout-0.11.1.tgz#ba3611df5f0e5b1ffbfd01166b8f02d1f5fa2b99"
+
+mout@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/mout/-/mout-1.0.0.tgz#9bdf1d4af57d66d47cb353a6335a3281098e1501"
+
+ms@0.7.1:
+  version "0.7.1"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.1.tgz#9cd13c03adbff25b65effde7ce864ee952017098"
+
+ms@0.7.2:
+  version "0.7.2"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.2.tgz#ae25cf2512b3885a1d95d7f037868d8431124765"
+
+ms@0.7.3:
+  version "0.7.3"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.3.tgz#708155a5e44e33f5fd0fc53e81d0d40a91be1fff"
+
+mustache@^2.2.1:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/mustache/-/mustache-2.3.0.tgz#4028f7778b17708a489930a6e52ac3bca0da41d0"
+
+mute-stream@0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.4.tgz#a9219960a6d5d5d046597aee51252c6655f7177e"
+
+mute-stream@0.0.5, mute-stream@~0.0.4:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.5.tgz#8fbfabb0a98a253d3184331f9e8deb7372fac6c0"
+
+natives@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/natives/-/natives-1.1.0.tgz#e9ff841418a6b2ec7a495e939984f78f163e6e31"
+
+negotiator@0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.1.tgz#2b327184e8992101177b28563fb5e7102acd0ca9"
+
+node-gyp@~3.3.0:
+  version "3.3.1"
+  resolved "https://registry.yarnpkg.com/node-gyp/-/node-gyp-3.3.1.tgz#80f7b6d7c2f9c0495ba42c518a670c99bdf6e4a0"
+  dependencies:
+    fstream "^1.0.0"
+    glob "3 || 4"
+    graceful-fs "^4.1.2"
+    minimatch "1"
+    mkdirp "^0.5.0"
+    nopt "2 || 3"
+    npmlog "0 || 1 || 2"
+    osenv "0"
+    path-array "^1.0.0"
+    request "2"
+    rimraf "2"
+    semver "2.x || 3.x || 4 || 5"
+    tar "^2.0.0"
+    which "1"
+
+node-int64@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/node-int64/-/node-int64-0.4.0.tgz#87a9065cdb355d3182d8f94ce11188b825c68a3b"
+
+node-modules-path@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/node-modules-path/-/node-modules-path-1.0.1.tgz#40096b08ce7ad0ea14680863af449c7c75a5d1c8"
+
+node-notifier@^5.0.1:
+  version "5.1.2"
+  resolved "https://registry.yarnpkg.com/node-notifier/-/node-notifier-5.1.2.tgz#2fa9e12605fa10009d44549d6fcd8a63dde0e4ff"
+  dependencies:
+    growly "^1.3.0"
+    semver "^5.3.0"
+    shellwords "^0.1.0"
+    which "^1.2.12"
+
+node-status-codes@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/node-status-codes/-/node-status-codes-1.0.0.tgz#5ae5541d024645d32a58fcddc9ceecea7ae3ac2f"
+
+node-uuid@^1.4.3, node-uuid@~1.4.0, node-uuid@~1.4.7:
+  version "1.4.8"
+  resolved "https://registry.yarnpkg.com/node-uuid/-/node-uuid-1.4.8.tgz#b040eb0923968afabf8d32fb1f17f1167fdab907"
+
+"nopt@2 || 3", nopt@^3.0.1, nopt@^3.0.3, nopt@~3.0.6:
+  version "3.0.6"
+  resolved "https://registry.yarnpkg.com/nopt/-/nopt-3.0.6.tgz#c6465dbf08abcd4db359317f79ac68a646b28ff9"
+  dependencies:
+    abbrev "1"
+
+nopt@~1.0.10:
+  version "1.0.10"
+  resolved "https://registry.yarnpkg.com/nopt/-/nopt-1.0.10.tgz#6ddd21bd2a31417b92727dd585f8a6f37608ebee"
+  dependencies:
+    abbrev "1"
+
+normalize-git-url@~3.0.1:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/normalize-git-url/-/normalize-git-url-3.0.2.tgz#8e5f14be0bdaedb73e07200310aa416c27350fc4"
+
+normalize-package-data@^2.0.0, "normalize-package-data@~1.0.1 || ^2.0.0", normalize-package-data@~2.3.5:
+  version "2.3.8"
+  resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-2.3.8.tgz#d819eda2a9dedbd1ffa563ea4071d936782295bb"
+  dependencies:
+    hosted-git-info "^2.1.4"
+    is-builtin-module "^1.0.0"
+    semver "2 || 3 || 4 || 5"
+    validate-npm-package-license "^3.0.1"
+
+normalize-path@^2.0.1:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-2.1.1.tgz#1ab28b556e198363a8c1a6f7e6fa20137fe6aed9"
+  dependencies:
+    remove-trailing-separator "^1.0.1"
+
+npm-cache-filename@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/npm-cache-filename/-/npm-cache-filename-1.0.2.tgz#ded306c5b0bfc870a9e9faf823bc5f283e05ae11"
+
+npm-git-info@^1.0.0:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/npm-git-info/-/npm-git-info-1.0.3.tgz#a933c42ec321e80d3646e0d6e844afe94630e1d5"
+
+npm-install-checks@~1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/npm-install-checks/-/npm-install-checks-1.0.7.tgz#6d91aeda0ac96801f1ed7aadee116a6c0a086a57"
+  dependencies:
+    npmlog "0.1 || 1 || 2"
+    semver "^2.3.0 || 3.x || 4 || 5"
+
+"npm-package-arg@^3.0.0 || ^4.0.0", "npm-package-arg@^4.0.0 || ^5.0.0", npm-package-arg@^4.1.1, npm-package-arg@~4.1.0:
+  version "4.1.1"
+  resolved "https://registry.yarnpkg.com/npm-package-arg/-/npm-package-arg-4.1.1.tgz#86d9dca985b4c5e5d59772dfd5de6919998a495a"
+  dependencies:
+    hosted-git-info "^2.1.4"
+    semver "4 || 5"
+
+npm-registry-client@~7.0.9:
+  version "7.0.9"
+  resolved "https://registry.yarnpkg.com/npm-registry-client/-/npm-registry-client-7.0.9.tgz#1baf86ee5285c4e6d38d4556208ded56049231bb"
+  dependencies:
+    chownr "^1.0.1"
+    concat-stream "^1.4.6"
+    graceful-fs "^4.1.2"
+    mkdirp "^0.5.0"
+    normalize-package-data "~1.0.1 || ^2.0.0"
+    npm-package-arg "^3.0.0 || ^4.0.0"
+    once "^1.3.0"
+    request "^2.47.0"
+    retry "^0.8.0"
+    rimraf "2"
+    semver "2 >=2.2.1 || 3.x || 4 || 5"
+    slide "^1.1.3"
+  optionalDependencies:
+    npmlog "~2.0.0"
+
+npm-user-validate@~0.1.2:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/npm-user-validate/-/npm-user-validate-0.1.5.tgz#52465d50c2d20294a57125b996baedbf56c5004b"
+
+npm@2.14.21:
+  version "2.14.21"
+  resolved "https://registry.yarnpkg.com/npm/-/npm-2.14.21.tgz#4be88073d5eb95864fc84c1df2c743bfdeded70e"
+  dependencies:
+    abbrev "~1.0.7"
+    ansi "~0.3.1"
+    ansicolors "~0.3.2"
+    ansistyles "~0.1.3"
+    archy "~1.0.0"
+    async-some "~1.0.2"
+    block-stream "0.0.8"
+    char-spinner "~1.0.1"
+    chmodr "~1.0.2"
+    chownr "~1.0.1"
+    cmd-shim "~2.0.2"
+    columnify "~1.5.4"
+    config-chain "~1.1.10"
+    dezalgo "~1.0.3"
+    editor "~1.0.0"
+    fs-vacuum "~1.2.7"
+    fs-write-stream-atomic "~1.0.8"
+    fstream "~1.0.8"
+    fstream-npm "~1.0.7"
+    github-url-from-git "~1.4.0"
+    github-url-from-username-repo "~1.0.2"
+    glob "~5.0.15"
+    graceful-fs "~4.1.3"
+    hosted-git-info "~2.1.4"
+    inflight "~1.0.4"
+    inherits "~2.0.1"
+    ini "~1.3.4"
+    init-package-json "~1.9.3"
+    lockfile "~1.0.1"
+    lru-cache "~3.2.0"
+    minimatch "~3.0.0"
+    mkdirp "~0.5.1"
+    node-gyp "~3.3.0"
+    nopt "~3.0.6"
+    normalize-git-url "~3.0.1"
+    normalize-package-data "~2.3.5"
+    npm-cache-filename "~1.0.2"
+    npm-install-checks "~1.0.7"
+    npm-package-arg "~4.1.0"
+    npm-registry-client "~7.0.9"
+    npm-user-validate "~0.1.2"
+    npmlog "~2.0.2"
+    once "~1.3.3"
+    opener "~1.4.1"
+    osenv "~0.1.3"
+    path-is-inside "~1.0.0"
+    read "~1.0.7"
+    read-installed "~4.0.3"
+    read-package-json "~2.0.3"
+    readable-stream "~1.1.13"
+    realize-package-specifier "~3.0.1"
+    request "~2.69.0"
+    retry "~0.9.0"
+    rimraf "~2.5.2"
+    semver "~5.1.0"
+    sha "~2.0.1"
+    slide "~1.1.6"
+    sorted-object "~1.0.0"
+    spdx-license-ids "~1.2.0"
+    tar "~2.2.1"
+    text-table "~0.2.0"
+    uid-number "0.0.6"
+    umask "~1.1.0"
+    validate-npm-package-license "~3.0.1"
+    validate-npm-package-name "~2.2.2"
+    which "~1.2.4"
+    wrappy "~1.0.1"
+    write-file-atomic "~1.1.4"
+
+"npmlog@0 || 1 || 2", "npmlog@0.1 || 1 || 2", npmlog@~2.0.0, npmlog@~2.0.2:
+  version "2.0.4"
+  resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-2.0.4.tgz#98b52530f2514ca90d09ec5b22c8846722375692"
+  dependencies:
+    ansi "~0.3.1"
+    are-we-there-yet "~1.1.2"
+    gauge "~1.2.5"
+
+npmlog@^4.0.0:
+  version "4.0.2"
+  resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-4.0.2.tgz#d03950e0e78ce1527ba26d2a7592e9348ac3e75f"
+  dependencies:
+    are-we-there-yet "~1.1.2"
+    console-control-strings "~1.1.0"
+    gauge "~2.7.1"
+    set-blocking "~2.0.0"
+
+number-is-nan@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/number-is-nan/-/number-is-nan-1.0.1.tgz#097b602b53422a522c1afb8790318336941a011d"
+
+oauth-sign@~0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.6.0.tgz#7dbeae44f6ca454e1f168451d630746735813ce3"
+
+oauth-sign@~0.8.0:
+  version "0.8.2"
+  resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.8.2.tgz#46a6ab7f0aead8deae9ec0565780b7d4efeb9d43"
+
+object-assign@4.1.0:
+  version "4.1.0"
+  resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.0.tgz#7a3b3d0e98063d43f4c03f2e8ae6cd51a86883a0"
+
+object-assign@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-2.1.1.tgz#43c36e5d569ff8e4816c4efa8be02d26967c18aa"
+
+object-assign@^4.0.1, object-assign@^4.1.0:
+  version "4.1.1"
+  resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863"
+
+object-component@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/object-component/-/object-component-0.0.3.tgz#f0c69aa50efc95b866c186f400a33769cb2f1291"
+
+object.omit@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/object.omit/-/object.omit-2.0.1.tgz#1a9c744829f39dbb858c76ca3579ae2a54ebd1fa"
+  dependencies:
+    for-own "^0.1.4"
+    is-extendable "^0.1.1"
+
+on-finished@~2.3.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.3.0.tgz#20f1336481b083cd75337992a16971aa2d906947"
+  dependencies:
+    ee-first "1.1.1"
+
+on-headers@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/on-headers/-/on-headers-1.0.1.tgz#928f5d0f470d49342651ea6794b0857c100693f7"
+
+once@^1.3.0, once@^1.3.1, once@^1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1"
+  dependencies:
+    wrappy "1"
+
+once@~1.3.3:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/once/-/once-1.3.3.tgz#b2e261557ce4c314ec8304f3fa82663e4297ca20"
+  dependencies:
+    wrappy "1"
+
+onetime@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/onetime/-/onetime-1.1.0.tgz#a1f7838f8314c516f05ecefcbc4ccfe04b4ed789"
+
+opener@~1.4.1:
+  version "1.4.3"
+  resolved "https://registry.yarnpkg.com/opener/-/opener-1.4.3.tgz#5c6da2c5d7e5831e8ffa3964950f8d6674ac90b8"
+
+opn@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/opn/-/opn-1.0.2.tgz#b909643346d00a1abc977a8b96f3ce3c53d5cf5f"
+
+optimist@^0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.6.1.tgz#da3ea74686fa21a19a111c326e90eb15a0196686"
+  dependencies:
+    minimist "~0.0.1"
+    wordwrap "~0.0.2"
+
+optimist@~0.3, optimist@~0.3.5:
+  version "0.3.7"
+  resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.3.7.tgz#c90941ad59e4273328923074d2cf2e7cbc6ec0d9"
+  dependencies:
+    wordwrap "~0.0.2"
+
+options@>=0.0.5:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/options/-/options-0.0.6.tgz#ec22d312806bb53e731773e7cdaefcf1c643128f"
+
+ora@^0.2.0:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/ora/-/ora-0.2.3.tgz#37527d220adcd53c39b73571d754156d5db657a4"
+  dependencies:
+    chalk "^1.1.1"
+    cli-cursor "^1.0.2"
+    cli-spinners "^0.1.2"
+    object-assign "^4.0.1"
+
+os-homedir@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3"
+
+os-locale@^1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/os-locale/-/os-locale-1.4.0.tgz#20f9f17ae29ed345e8bde583b13d2009803c14d9"
+  dependencies:
+    lcid "^1.0.0"
+
+os-name@^1.0.0:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/os-name/-/os-name-1.0.3.tgz#1b379f64835af7c5a7f498b357cb95215c159edf"
+  dependencies:
+    osx-release "^1.0.0"
+    win-release "^1.0.0"
+
+os-tmpdir@^1.0.0, os-tmpdir@^1.0.1, os-tmpdir@~1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274"
+
+osenv@0, osenv@^0.1.0, osenv@^0.1.3, osenv@~0.1.3:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.1.4.tgz#42fe6d5953df06c8064be6f176c3d05aaaa34644"
+  dependencies:
+    os-homedir "^1.0.0"
+    os-tmpdir "^1.0.0"
+
+osx-release@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/osx-release/-/osx-release-1.1.0.tgz#f217911a28136949af1bf9308b241e2737d3cd6c"
+  dependencies:
+    minimist "^1.1.0"
+
+output-file-sync@^1.1.0:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/output-file-sync/-/output-file-sync-1.1.2.tgz#d0a33eefe61a205facb90092e826598d5245ce76"
+  dependencies:
+    graceful-fs "^4.1.4"
+    mkdirp "^0.5.1"
+    object-assign "^4.1.0"
+
+p-throttler@0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/p-throttler/-/p-throttler-0.1.1.tgz#15246409d225d3eefca85c50de710a83a78cca6a"
+  dependencies:
+    q "~0.9.2"
+
+package-json@^2.0.0:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/package-json/-/package-json-2.4.0.tgz#0d15bd67d1cbbddbb2ca222ff2edb86bcb31a8bb"
+  dependencies:
+    got "^5.0.0"
+    registry-auth-token "^3.0.1"
+    registry-url "^3.0.3"
+    semver "^5.1.0"
+
+parse-glob@^3.0.4:
+  version "3.0.4"
+  resolved "https://registry.yarnpkg.com/parse-glob/-/parse-glob-3.0.4.tgz#b2c376cfb11f35513badd173ef0bb6e3a388391c"
+  dependencies:
+    glob-base "^0.3.0"
+    is-dotfile "^1.0.0"
+    is-extglob "^1.0.0"
+    is-glob "^2.0.0"
+
+parse-json@^2.1.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-2.2.0.tgz#f480f40434ef80741f8469099f8dea18f55a4dc9"
+  dependencies:
+    error-ex "^1.2.0"
+
+parsejson@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/parsejson/-/parsejson-0.0.3.tgz#ab7e3759f209ece99437973f7d0f1f64ae0e64ab"
+  dependencies:
+    better-assert "~1.0.0"
+
+parseqs@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/parseqs/-/parseqs-0.0.5.tgz#d5208a3738e46766e291ba2ea173684921a8b89d"
+  dependencies:
+    better-assert "~1.0.0"
+
+parseuri@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/parseuri/-/parseuri-0.0.5.tgz#80204a50d4dbb779bfdc6ebe2778d90e4bce320a"
+  dependencies:
+    better-assert "~1.0.0"
+
+parseurl@~1.3.0, parseurl@~1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.1.tgz#c8ab8c9223ba34888aa64a297b28853bec18da56"
+
+path-array@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/path-array/-/path-array-1.0.1.tgz#7e2f0f35f07a2015122b868b7eac0eb2c4fec271"
+  dependencies:
+    array-index "^1.0.0"
+
+path-exists@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-1.0.0.tgz#d5a8998eb71ef37a74c34eb0d9eba6e878eea081"
+
+path-is-absolute@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f"
+
+path-is-inside@^1.0.1, path-is-inside@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/path-is-inside/-/path-is-inside-1.0.2.tgz#365417dede44430d1c11af61027facf074bdfc53"
+
+path-parse@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.5.tgz#3c1adf871ea9cd6c9431b6ea2bd74a0ff055c4c1"
+
+path-posix@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/path-posix/-/path-posix-1.0.0.tgz#06b26113f56beab042545a23bfa88003ccac260f"
+
+path-to-regexp@0.1.7:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c"
+
+pinkie-promise@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa"
+  dependencies:
+    pinkie "^2.0.0"
+
+pinkie@^2.0.0:
+  version "2.0.4"
+  resolved "https://registry.yarnpkg.com/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870"
+
+portfinder@^1.0.3:
+  version "1.0.13"
+  resolved "https://registry.yarnpkg.com/portfinder/-/portfinder-1.0.13.tgz#bb32ecd87c27104ae6ee44b5a3ccbf0ebb1aede9"
+  dependencies:
+    async "^1.5.2"
+    debug "^2.2.0"
+    mkdirp "0.5.x"
+
+prepend-http@^1.0.1:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-1.0.4.tgz#d4f4562b0ce3696e41ac52d0e002e57a635dc6dc"
+
+preserve@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/preserve/-/preserve-0.2.0.tgz#815ed1f6ebc65926f865b310c0713bcb3315ce4b"
+
+printf@^0.2.3:
+  version "0.2.5"
+  resolved "https://registry.yarnpkg.com/printf/-/printf-0.2.5.tgz#c438ca2ca33e3927671db4ab69c0e52f936a4f0f"
+
+private@^0.1.6, private@~0.1.5:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/private/-/private-0.1.7.tgz#68ce5e8a1ef0a23bb570cc28537b5332aba63ef1"
+
+process-nextick-args@~1.0.6:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-1.0.7.tgz#150e20b756590ad3f91093f25a4f2ad8bff30ba3"
+
+process-relative-require@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/process-relative-require/-/process-relative-require-1.0.0.tgz#1590dfcf5b8f2983ba53e398446b68240b4cc68a"
+  dependencies:
+    node-modules-path "^1.0.0"
+
+promise-map-series@^0.2.1:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/promise-map-series/-/promise-map-series-0.2.3.tgz#c2d377afc93253f6bd03dbb77755eb88ab20a847"
+  dependencies:
+    rsvp "^3.0.14"
+
+promptly@0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/promptly/-/promptly-0.2.0.tgz#73ef200fa8329d5d3a8df41798950b8646ca46d9"
+  dependencies:
+    read "~1.0.4"
+
+promzard@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/promzard/-/promzard-0.3.0.tgz#26a5d6ee8c7dee4cb12208305acfb93ba382a9ee"
+  dependencies:
+    read "1"
+
+proto-list@~1.2.1:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/proto-list/-/proto-list-1.2.4.tgz#212d5bfe1318306a420f6402b8e26ff39647a849"
+
+proxy-addr@~1.1.3:
+  version "1.1.4"
+  resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-1.1.4.tgz#27e545f6960a44a627d9b44467e35c1b6b4ce2f3"
+  dependencies:
+    forwarded "~0.1.0"
+    ipaddr.js "1.3.0"
+
+pseudomap@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/pseudomap/-/pseudomap-1.0.2.tgz#f052a28da70e618917ef0a8ac34c1ae5a68286b3"
+
+pump@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/pump/-/pump-1.0.2.tgz#3b3ee6512f94f0e575538c17995f9f16990a5d51"
+  dependencies:
+    end-of-stream "^1.1.0"
+    once "^1.3.1"
+
+punycode@^1.4.1:
+  version "1.4.1"
+  resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e"
+
+q@^1.1.2:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/q/-/q-1.5.0.tgz#dd01bac9d06d30e6f219aecb8253ee9ebdc308f1"
+
+q@~0.9.2:
+  version "0.9.7"
+  resolved "https://registry.yarnpkg.com/q/-/q-0.9.7.tgz#4de2e6cb3b29088c9e4cbc03bf9d42fb96ce2f75"
+
+qs@5.2.0:
+  version "5.2.0"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-5.2.0.tgz#a9f31142af468cb72b25b30136ba2456834916be"
+
+qs@6.4.0:
+  version "6.4.0"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-6.4.0.tgz#13e26d28ad6b0ffaa91312cd3bf708ed351e7233"
+
+qs@~2.3.1:
+  version "2.3.3"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-2.3.3.tgz#e9e85adbe75da0bbe4c8e0476a086290f863b404"
+
+qs@~5.1.0:
+  version "5.1.0"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-5.1.0.tgz#4d932e5c7ea411cca76a312d39a606200fd50cd9"
+
+qs@~6.0.2:
+  version "6.0.4"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-6.0.4.tgz#51019d84720c939b82737e84556a782338ecea7b"
+
+quick-temp@0.1.5, quick-temp@^0.1.0, quick-temp@^0.1.2, quick-temp@^0.1.3, quick-temp@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/quick-temp/-/quick-temp-0.1.5.tgz#0d0d67f0fb6a589a0e142f90985f76cdbaf403f7"
+  dependencies:
+    mktemp "~0.3.4"
+    rimraf "~2.2.6"
+    underscore.string "~2.3.3"
+
+qunitjs@^1.20.0:
+  version "1.23.1"
+  resolved "https://registry.yarnpkg.com/qunitjs/-/qunitjs-1.23.1.tgz#1971cf97ac9be01a64d2315508d2e48e6fd4e719"
+
+randomatic@^1.1.3:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/randomatic/-/randomatic-1.1.6.tgz#110dcabff397e9dcff7c0789ccc0a49adf1ec5bb"
+  dependencies:
+    is-number "^2.0.2"
+    kind-of "^3.0.2"
+
+range-parser@~1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.0.tgz#f49be6b487894ddc40dcc94a322f611092e00d5e"
+
+raw-body@~2.1.5:
+  version "2.1.7"
+  resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.1.7.tgz#adfeace2e4fb3098058014d08c072dcc59758774"
+  dependencies:
+    bytes "2.4.0"
+    iconv-lite "0.4.13"
+    unpipe "1.0.0"
+
+rc@^1.0.1, rc@^1.1.6:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/rc/-/rc-1.2.1.tgz#2e03e8e42ee450b8cb3dce65be1bf8974e1dfd95"
+  dependencies:
+    deep-extend "~0.4.0"
+    ini "~1.3.0"
+    minimist "^1.2.0"
+    strip-json-comments "~2.0.1"
+
+read-all-stream@^3.0.0:
+  version "3.1.0"
+  resolved "https://registry.yarnpkg.com/read-all-stream/-/read-all-stream-3.1.0.tgz#35c3e177f2078ef789ee4bfafa4373074eaef4fa"
+  dependencies:
+    pinkie-promise "^2.0.0"
+    readable-stream "^2.0.0"
+
+read-installed@~4.0.3:
+  version "4.0.3"
+  resolved "https://registry.yarnpkg.com/read-installed/-/read-installed-4.0.3.tgz#ff9b8b67f187d1e4c29b9feb31f6b223acd19067"
+  dependencies:
+    debuglog "^1.0.1"
+    read-package-json "^2.0.0"
+    readdir-scoped-modules "^1.0.0"
+    semver "2 || 3 || 4 || 5"
+    slide "~1.1.3"
+    util-extend "^1.0.1"
+  optionalDependencies:
+    graceful-fs "^4.1.2"
+
+"read-package-json@1 || 2", read-package-json@^2.0.0, read-package-json@~2.0.3:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/read-package-json/-/read-package-json-2.0.5.tgz#f93a64e641529df68a08c64de46389e8a3f88845"
+  dependencies:
+    glob "^7.1.1"
+    json-parse-helpfulerror "^1.0.2"
+    normalize-package-data "^2.0.0"
+  optionalDependencies:
+    graceful-fs "^4.1.2"
+
+read@1, read@~1.0.1, read@~1.0.4, read@~1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/read/-/read-1.0.7.tgz#b3da19bd052431a97671d44a42634adf710b40c4"
+  dependencies:
+    mute-stream "~0.0.4"
+
+"readable-stream@1 || 2", readable-stream@^2, readable-stream@^2.0.0, readable-stream@^2.0.2, readable-stream@~2.0.5:
+  version "2.0.6"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.0.6.tgz#8f90341e68a53ccc928788dacfcd11b36eb9b78e"
+  dependencies:
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "~1.0.0"
+    process-nextick-args "~1.0.6"
+    string_decoder "~0.10.x"
+    util-deprecate "~1.0.1"
+
+readable-stream@1.1, readable-stream@^1.1.8, readable-stream@~1.1.13:
+  version "1.1.14"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.1.14.tgz#7cf4c54ef648e3813084c636dd2079e166c081d9"
+  dependencies:
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "0.0.1"
+    string_decoder "~0.10.x"
+
+readable-stream@^2.0.5, readable-stream@^2.0.6, readable-stream@^2.2.2:
+  version "2.2.9"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.2.9.tgz#cf78ec6f4a6d1eb43d26488cac97f042e74b7fc8"
+  dependencies:
+    buffer-shims "~1.0.0"
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "~1.0.0"
+    process-nextick-args "~1.0.6"
+    string_decoder "~1.0.0"
+    util-deprecate "~1.0.1"
+
+readable-stream@~1.0.2, readable-stream@~1.0.26:
+  version "1.0.34"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.0.34.tgz#125820e34bc842d2f2aaafafe4c2916ee32c157c"
+  dependencies:
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "0.0.1"
+    string_decoder "~0.10.x"
+
+readdir-scoped-modules@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/readdir-scoped-modules/-/readdir-scoped-modules-1.0.2.tgz#9fafa37d286be5d92cbaebdee030dc9b5f406747"
+  dependencies:
+    debuglog "^1.0.1"
+    dezalgo "^1.0.0"
+    graceful-fs "^4.1.2"
+    once "^1.3.0"
+
+readline2@0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/readline2/-/readline2-0.1.1.tgz#99443ba6e83b830ef3051bfd7dc241a82728d568"
+  dependencies:
+    mute-stream "0.0.4"
+    strip-ansi "^2.0.1"
+
+readline2@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/readline2/-/readline2-1.0.1.tgz#41059608ffc154757b715d9989d199ffbf372e35"
+  dependencies:
+    code-point-at "^1.0.0"
+    is-fullwidth-code-point "^1.0.0"
+    mute-stream "0.0.5"
+
+realize-package-specifier@~3.0.1:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/realize-package-specifier/-/realize-package-specifier-3.0.3.tgz#d0def882952b8de3f67eba5e91199661271f41f4"
+  dependencies:
+    dezalgo "^1.0.1"
+    npm-package-arg "^4.1.1"
+
+recast@0.10.33, recast@^0.10.10:
+  version "0.10.33"
+  resolved "https://registry.yarnpkg.com/recast/-/recast-0.10.33.tgz#942808f7aa016f1fa7142c461d7e5704aaa8d697"
+  dependencies:
+    ast-types "0.8.12"
+    esprima-fb "~15001.1001.0-dev-harmony-fb"
+    private "~0.1.5"
+    source-map "~0.5.0"
+
+recast@^0.11.17, recast@^0.11.3:
+  version "0.11.23"
+  resolved "https://registry.yarnpkg.com/recast/-/recast-0.11.23.tgz#451fd3004ab1e4df9b4e4b66376b2a21912462d3"
+  dependencies:
+    ast-types "0.9.6"
+    esprima "~3.1.0"
+    private "~0.1.5"
+    source-map "~0.5.0"
+
+redeyed@~0.4.0:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/redeyed/-/redeyed-0.4.4.tgz#37e990a6f2b21b2a11c2e6a48fd4135698cba97f"
+  dependencies:
+    esprima "~1.0.4"
+
+redeyed@~0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/redeyed/-/redeyed-0.5.0.tgz#7ab000e60ee3875ac115d29edb32c1403c6c25d1"
+  dependencies:
+    esprima-fb "~12001.1.0-dev-harmony-fb"
+
+regenerate@^1.2.1:
+  version "1.3.2"
+  resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.3.2.tgz#d1941c67bad437e1be76433add5b385f95b19260"
+
+regenerator@0.8.40:
+  version "0.8.40"
+  resolved "https://registry.yarnpkg.com/regenerator/-/regenerator-0.8.40.tgz#a0e457c58ebdbae575c9f8cd75127e93756435d8"
+  dependencies:
+    commoner "~0.10.3"
+    defs "~1.1.0"
+    esprima-fb "~15001.1001.0-dev-harmony-fb"
+    private "~0.1.5"
+    recast "0.10.33"
+    through "~2.3.8"
+
+regex-cache@^0.4.2:
+  version "0.4.3"
+  resolved "https://registry.yarnpkg.com/regex-cache/-/regex-cache-0.4.3.tgz#9b1a6c35d4d0dfcef5711ae651e8e9d3d7114145"
+  dependencies:
+    is-equal-shallow "^0.1.3"
+    is-primitive "^2.0.0"
+
+regexpu@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/regexpu/-/regexpu-1.3.0.tgz#e534dc991a9e5846050c98de6d7dd4a55c9ea16d"
+  dependencies:
+    esprima "^2.6.0"
+    recast "^0.10.10"
+    regenerate "^1.2.1"
+    regjsgen "^0.2.0"
+    regjsparser "^0.1.4"
+
+registry-auth-token@^3.0.1:
+  version "3.3.0"
+  resolved "https://registry.yarnpkg.com/registry-auth-token/-/registry-auth-token-3.3.0.tgz#57ae67347e73d96345ed1bc01294c7237c02aa63"
+  dependencies:
+    rc "^1.1.6"
+    safe-buffer "^5.0.1"
+
+registry-url@^3.0.3:
+  version "3.1.0"
+  resolved "https://registry.yarnpkg.com/registry-url/-/registry-url-3.1.0.tgz#3d4ef870f73dde1d77f0cf9a381432444e174942"
+  dependencies:
+    rc "^1.0.1"
+
+regjsgen@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.2.0.tgz#6c016adeac554f75823fe37ac05b92d5a4edb1f7"
+
+regjsparser@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.1.5.tgz#7ee8f84dc6fa792d3fd0ae228d24bd949ead205c"
+  dependencies:
+    jsesc "~0.5.0"
+
+remove-trailing-separator@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/remove-trailing-separator/-/remove-trailing-separator-1.0.1.tgz#615ebb96af559552d4bf4057c8436d486ab63cc4"
+
+repeat-element@^1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/repeat-element/-/repeat-element-1.1.2.tgz#ef089a178d1483baae4d93eb98b4f9e4e11d990a"
+
+repeat-string@^1.5.2:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637"
+
+repeating@^1.1.0, repeating@^1.1.2:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/repeating/-/repeating-1.1.3.tgz#3d4114218877537494f97f77f9785fab810fa4ac"
+  dependencies:
+    is-finite "^1.0.0"
+
+repeating@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/repeating/-/repeating-2.0.1.tgz#5214c53a926d3552707527fbab415dbc08d06dda"
+  dependencies:
+    is-finite "^1.0.0"
+
+request-progress@0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/request-progress/-/request-progress-0.3.1.tgz#0721c105d8a96ac6b2ce8b2c89ae2d5ecfcf6b3a"
+  dependencies:
+    throttleit "~0.0.2"
+
+request-replay@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/request-replay/-/request-replay-0.2.0.tgz#9b693a5d118b39f5c596ead5ed91a26444057f60"
+  dependencies:
+    retry "~0.6.0"
+
+request@2, request@^2.47.0, request@~2.69.0:
+  version "2.69.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.69.0.tgz#cf91d2e000752b1217155c005241911991a2346a"
+  dependencies:
+    aws-sign2 "~0.6.0"
+    aws4 "^1.2.1"
+    bl "~1.0.0"
+    caseless "~0.11.0"
+    combined-stream "~1.0.5"
+    extend "~3.0.0"
+    forever-agent "~0.6.1"
+    form-data "~1.0.0-rc3"
+    har-validator "~2.0.6"
+    hawk "~3.1.0"
+    http-signature "~1.1.0"
+    is-typedarray "~1.0.0"
+    isstream "~0.1.2"
+    json-stringify-safe "~5.0.1"
+    mime-types "~2.1.7"
+    node-uuid "~1.4.7"
+    oauth-sign "~0.8.0"
+    qs "~6.0.2"
+    stringstream "~0.0.4"
+    tough-cookie "~2.2.0"
+    tunnel-agent "~0.4.1"
+
+request@2.53.0, request@^2.27.0, request@^2.40.0, request@^2.51.0:
+  version "2.53.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.53.0.tgz#180a3ae92b7b639802e4f9545dd8fcdeb71d760c"
+  dependencies:
+    aws-sign2 "~0.5.0"
+    bl "~0.9.0"
+    caseless "~0.9.0"
+    combined-stream "~0.0.5"
+    forever-agent "~0.5.0"
+    form-data "~0.2.0"
+    hawk "~2.3.0"
+    http-signature "~0.10.0"
+    isstream "~0.1.1"
+    json-stringify-safe "~5.0.0"
+    mime-types "~2.0.1"
+    node-uuid "~1.4.0"
+    oauth-sign "~0.6.0"
+    qs "~2.3.1"
+    stringstream "~0.0.4"
+    tough-cookie ">=0.12.0"
+    tunnel-agent "~0.4.0"
+
+requires-port@1.x.x:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/requires-port/-/requires-port-1.0.0.tgz#925d2601d39ac485e091cf0da5c6e694dc3dcaff"
+
+resolve@^1.1.2, resolve@^1.1.6:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.3.3.tgz#655907c3469a8680dc2de3a275a8fdd69691f0e5"
+  dependencies:
+    path-parse "^1.0.5"
+
+restore-cursor@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/restore-cursor/-/restore-cursor-1.0.1.tgz#34661f46886327fed2991479152252df92daa541"
+  dependencies:
+    exit-hook "^1.0.0"
+    onetime "^1.0.0"
+
+retry@0.6.1, retry@~0.6.0:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/retry/-/retry-0.6.1.tgz#fdc90eed943fde11b893554b8cc63d0e899ba918"
+
+retry@^0.8.0:
+  version "0.8.0"
+  resolved "https://registry.yarnpkg.com/retry/-/retry-0.8.0.tgz#2367628dc0edb247b1eab649dc53ac8628ac2d5f"
+
+retry@~0.9.0:
+  version "0.9.0"
+  resolved "https://registry.yarnpkg.com/retry/-/retry-0.9.0.tgz#6f697e50a0e4ddc8c8f7fb547a9b60dead43678d"
+
+right-align@^0.1.1:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/right-align/-/right-align-0.1.3.tgz#61339b722fe6a3515689210d24e14c96148613ef"
+  dependencies:
+    align-text "^0.1.1"
+
+rimraf@2, rimraf@^2.2.0, rimraf@^2.2.8, rimraf@^2.3.4, rimraf@^2.4.3, rimraf@^2.4.4, rimraf@^2.5.2, rimraf@^2.5.3, rimraf@^2.6.1:
+  version "2.6.1"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.1.tgz#c2338ec643df7a1b7fe5c54fa86f57428a55f33d"
+  dependencies:
+    glob "^7.0.5"
+
+rimraf@~2.2.6:
+  version "2.2.8"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.2.8.tgz#e439be2aaee327321952730f99a8929e4fc50582"
+
+rimraf@~2.4.3:
+  version "2.4.5"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.4.5.tgz#ee710ce5d93a8fdb856fb5ea8ff0e2d75934b2da"
+  dependencies:
+    glob "^6.0.1"
+
+rimraf@~2.5.2:
+  version "2.5.4"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.5.4.tgz#96800093cbf1a0c86bd95b4625467535c29dfa04"
+  dependencies:
+    glob "^7.0.5"
+
+rsvp@^3.0.14, rsvp@^3.0.16, rsvp@^3.0.17, rsvp@^3.0.18, rsvp@^3.0.21, rsvp@^3.0.6, rsvp@^3.1.0:
+  version "3.5.0"
+  resolved "https://registry.yarnpkg.com/rsvp/-/rsvp-3.5.0.tgz#a62c573a4ae4e1dfd0697ebc6242e79c681eaa34"
+
+rsvp@~3.0.6:
+  version "3.0.21"
+  resolved "https://registry.yarnpkg.com/rsvp/-/rsvp-3.0.21.tgz#49c588fe18ef293bcd0ab9f4e6756e6ac433359f"
+
+rsvp@~3.2.1:
+  version "3.2.1"
+  resolved "https://registry.yarnpkg.com/rsvp/-/rsvp-3.2.1.tgz#07cb4a5df25add9e826ebc67dcc9fd89db27d84a"
+
+run-async@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/run-async/-/run-async-0.1.0.tgz#c8ad4a5e110661e402a7d21b530e009f25f8e389"
+  dependencies:
+    once "^1.3.0"
+
+rx-lite@^3.1.2:
+  version "3.1.2"
+  resolved "https://registry.yarnpkg.com/rx-lite/-/rx-lite-3.1.2.tgz#19ce502ca572665f3b647b10939f97fd1615f102"
+
+safe-buffer@^5.0.1:
+  version "5.0.1"
+  resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.0.1.tgz#d263ca54696cd8a306b5ca6551e92de57918fbe7"
+
+sane@^1.1.1:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/sane/-/sane-1.6.0.tgz#9610c452307a135d29c1fdfe2547034180c46775"
+  dependencies:
+    anymatch "^1.3.0"
+    exec-sh "^0.2.0"
+    fb-watchman "^1.8.0"
+    minimatch "^3.0.2"
+    minimist "^1.1.1"
+    walker "~1.0.5"
+    watch "~0.10.0"
+
+sanitize-filename@^1.5.3:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/sanitize-filename/-/sanitize-filename-1.6.1.tgz#612da1c96473fa02dccda92dcd5b4ab164a6772a"
+  dependencies:
+    truncate-utf8-bytes "^1.0.0"
+
+semver-diff@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/semver-diff/-/semver-diff-2.1.0.tgz#4bbb8437c8d37e4b0cf1a68fd726ec6d645d6d36"
+  dependencies:
+    semver "^5.0.3"
+
+semver-utils@^1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/semver-utils/-/semver-utils-1.1.1.tgz#27d92fec34d27cfa42707d3b40d025ae9855f2df"
+
+"semver@2 >=2.2.1 || 3.x || 4 || 5", "semver@2 || 3 || 4 || 5", "semver@2.x || 3.x || 4 || 5", "semver@4 || 5", "semver@^2.3.0 || 3.x || 4 || 5", semver@^5.0.1, semver@^5.0.3, semver@^5.1.0, semver@^5.3.0:
+  version "5.3.0"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-5.3.0.tgz#9b2ce5d3de02d17c6012ad326aa6b4d0cf54f94f"
+
+semver@^2.3.0:
+  version "2.3.2"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-2.3.2.tgz#b9848f25d6cf36333073ec9ef8856d42f1233e52"
+
+semver@^4.1.0, semver@^4.3.1:
+  version "4.3.6"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-4.3.6.tgz#300bc6e0e86374f7ba61068b5b1ecd57fc6532da"
+
+semver@~5.1.0:
+  version "5.1.1"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-5.1.1.tgz#a3292a373e6f3e0798da0b20641b9a9c5bc47e19"
+
+send@0.15.1:
+  version "0.15.1"
+  resolved "https://registry.yarnpkg.com/send/-/send-0.15.1.tgz#8a02354c26e6f5cca700065f5f0cdeba90ec7b5f"
+  dependencies:
+    debug "2.6.1"
+    depd "~1.1.0"
+    destroy "~1.0.4"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    etag "~1.8.0"
+    fresh "0.5.0"
+    http-errors "~1.6.1"
+    mime "1.3.4"
+    ms "0.7.2"
+    on-finished "~2.3.0"
+    range-parser "~1.2.0"
+    statuses "~1.3.1"
+
+serve-static@1.12.1:
+  version "1.12.1"
+  resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.12.1.tgz#7443a965e3ced647aceb5639fa06bf4d1bbe0039"
+  dependencies:
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    parseurl "~1.3.1"
+    send "0.15.1"
+
+set-blocking@~2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7"
+
+setprototypeof@1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.0.3.tgz#66567e37043eeb4f04d91bd658c0cbefb55b8e04"
+
+sha@~2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/sha/-/sha-2.0.1.tgz#6030822fbd2c9823949f8f72ed6411ee5cf25aae"
+  dependencies:
+    graceful-fs "^4.1.2"
+    readable-stream "^2.0.2"
+
+shebang-command@^1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-1.2.0.tgz#44aac65b695b03398968c39f363fee5deafdf1ea"
+  dependencies:
+    shebang-regex "^1.0.0"
+
+shebang-regex@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-1.0.0.tgz#da42f49740c0b42db2ca9728571cb190c98efea3"
+
+shell-quote@^1.4.2:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/shell-quote/-/shell-quote-1.6.1.tgz#f4781949cce402697127430ea3b3c5476f481767"
+  dependencies:
+    array-filter "~0.0.0"
+    array-map "~0.0.0"
+    array-reduce "~0.0.0"
+    jsonify "~0.0.0"
+
+shelljs@0.3.x:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/shelljs/-/shelljs-0.3.0.tgz#3596e6307a781544f591f37da618360f31db57b1"
+
+shellwords@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/shellwords/-/shellwords-0.1.0.tgz#66afd47b6a12932d9071cbfd98a52e785cd0ba14"
+
+sigmund@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/sigmund/-/sigmund-1.0.1.tgz#3ff21f198cad2175f9f3b781853fd94d0d19b590"
+
+signal-exit@^3.0.0:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.2.tgz#b5fdc08f1287ea1178628e415e25132b73646c6d"
+
+silent-error@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/silent-error/-/silent-error-1.0.1.tgz#71b7d503d1c6f94882b51b56be879b113cb4822c"
+  dependencies:
+    debug "^2.2.0"
+
+simple-fmt@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/simple-fmt/-/simple-fmt-0.1.0.tgz#191bf566a59e6530482cb25ab53b4a8dc85c3a6b"
+
+simple-is@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/simple-is/-/simple-is-0.2.0.tgz#2abb75aade39deb5cc815ce10e6191164850baf0"
+
+slash@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/slash/-/slash-1.0.0.tgz#c41f2f6c39fc16d1cd17ad4b5d896114ae470d55"
+
+slide@^1.1.3, slide@^1.1.5, slide@~1.1.3, slide@~1.1.6:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/slide/-/slide-1.1.6.tgz#56eb027d65b4d2dce6cb2e2d32c4d4afc9e1d707"
+
+sntp@1.x.x:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/sntp/-/sntp-1.0.9.tgz#6541184cc90aeea6c6e7b35e2659082443c66198"
+  dependencies:
+    hoek "2.x.x"
+
+socket.io-adapter@0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/socket.io-adapter/-/socket.io-adapter-0.5.0.tgz#cb6d4bb8bec81e1078b99677f9ced0046066bb8b"
+  dependencies:
+    debug "2.3.3"
+    socket.io-parser "2.3.1"
+
+socket.io-client@1.6.0:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/socket.io-client/-/socket.io-client-1.6.0.tgz#5b668f4f771304dfeed179064708386fa6717853"
+  dependencies:
+    backo2 "1.0.2"
+    component-bind "1.0.0"
+    component-emitter "1.2.1"
+    debug "2.3.3"
+    engine.io-client "1.8.0"
+    has-binary "0.1.7"
+    indexof "0.0.1"
+    object-component "0.0.3"
+    parseuri "0.0.5"
+    socket.io-parser "2.3.1"
+    to-array "0.1.4"
+
+socket.io-parser@2.3.1:
+  version "2.3.1"
+  resolved "https://registry.yarnpkg.com/socket.io-parser/-/socket.io-parser-2.3.1.tgz#dd532025103ce429697326befd64005fcfe5b4a0"
+  dependencies:
+    component-emitter "1.1.2"
+    debug "2.2.0"
+    isarray "0.0.1"
+    json3 "3.3.2"
+
+socket.io@1.6.0:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/socket.io/-/socket.io-1.6.0.tgz#3e40d932637e6bd923981b25caf7c53e83b6e2e1"
+  dependencies:
+    debug "2.3.3"
+    engine.io "1.8.0"
+    has-binary "0.1.7"
+    object-assign "4.1.0"
+    socket.io-adapter "0.5.0"
+    socket.io-client "1.6.0"
+    socket.io-parser "2.3.1"
+
+sorted-object@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/sorted-object/-/sorted-object-1.0.0.tgz#5d1f4f9c1fb2cd48965967304e212eb44cfb6d05"
+
+source-map-support@^0.2.10:
+  version "0.2.10"
+  resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.2.10.tgz#ea5a3900a1c1cb25096a0ae8cc5c2b4b10ded3dc"
+  dependencies:
+    source-map "0.1.32"
+
+source-map-url@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/source-map-url/-/source-map-url-0.3.0.tgz#7ecaf13b57bcd09da8a40c5d269db33799d4aaf9"
+
+source-map@0.1.32:
+  version "0.1.32"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.32.tgz#c8b6c167797ba4740a8ea33252162ff08591b266"
+  dependencies:
+    amdefine ">=0.0.4"
+
+source-map@0.4.x, source-map@^0.4.2, source-map@^0.4.4:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.4.4.tgz#eba4f5da9c0dc999de68032d8b4f76173652036b"
+  dependencies:
+    amdefine ">=0.0.4"
+
+source-map@^0.5.0, source-map@~0.5.0, source-map@~0.5.1:
+  version "0.5.6"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.6.tgz#75ce38f52bf0733c5a7f0c118d81334a2bb5f412"
+
+source-map@~0.1.7:
+  version "0.1.43"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.43.tgz#c24bc146ca517c1471f5dacbe2571b2b7f9e3346"
+  dependencies:
+    amdefine ">=0.0.4"
+
+spawn-args@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/spawn-args/-/spawn-args-0.2.0.tgz#fb7d0bd1d70fd4316bd9e3dec389e65f9d6361bb"
+
+spawnback@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/spawnback/-/spawnback-1.0.0.tgz#f73662f7e54d95367eca74d6426c677dd7ea686f"
+
+spdx-correct@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-1.0.2.tgz#4b3073d933ff51f3912f03ac5519498a4150db40"
+  dependencies:
+    spdx-license-ids "^1.0.2"
+
+spdx-expression-parse@~1.0.0:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-1.0.4.tgz#9bdf2f20e1f40ed447fbe273266191fced51626c"
+
+spdx-license-ids@^1.0.2, spdx-license-ids@~1.2.0:
+  version "1.2.2"
+  resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-1.2.2.tgz#c9df7a3424594ade6bd11900d596696dc06bac57"
+
+sprintf-js@~1.0.2:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c"
+
+sri-toolbox@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/sri-toolbox/-/sri-toolbox-0.2.0.tgz#a7fea5c3fde55e675cf1c8c06f3ebb5c2935835e"
+
+sshpk@^1.7.0:
+  version "1.13.0"
+  resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.13.0.tgz#ff2a3e4fd04497555fed97b39a0fd82fafb3a33c"
+  dependencies:
+    asn1 "~0.2.3"
+    assert-plus "^1.0.0"
+    dashdash "^1.12.0"
+    getpass "^0.1.1"
+  optionalDependencies:
+    bcrypt-pbkdf "^1.0.0"
+    ecc-jsbn "~0.1.1"
+    jodid25519 "^1.0.0"
+    jsbn "~0.1.0"
+    tweetnacl "~0.14.0"
+
+stable@~0.1.3:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/stable/-/stable-0.1.6.tgz#910f5d2aed7b520c6e777499c1f32e139fdecb10"
+
+statuses@1, "statuses@>= 1.3.1 < 2", statuses@~1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.3.1.tgz#faf51b9eb74aaef3b3acf4ad5f61abf24cb7b93e"
+
+string-width@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3"
+  dependencies:
+    code-point-at "^1.0.0"
+    is-fullwidth-code-point "^1.0.0"
+    strip-ansi "^3.0.0"
+
+string_decoder@~0.10.x:
+  version "0.10.31"
+  resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-0.10.31.tgz#62e203bc41766c6c28c9fc84301dab1c5310fa94"
+
+string_decoder@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.0.0.tgz#f06f41157b664d86069f84bdbdc9b0d8ab281667"
+  dependencies:
+    buffer-shims "~1.0.0"
+
+stringify-object@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/stringify-object/-/stringify-object-1.0.1.tgz#86d35e7dbfbce9aa45637d7ecdd7847e159db8a2"
+
+stringmap@~0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/stringmap/-/stringmap-0.2.2.tgz#556c137b258f942b8776f5b2ef582aa069d7d1b1"
+
+stringset@~0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/stringset/-/stringset-0.2.1.tgz#ef259c4e349344377fcd1c913dd2e848c9c042b5"
+
+stringstream@~0.0.4:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/stringstream/-/stringstream-0.0.5.tgz#4e484cd4de5a0bbbee18e46307710a8a81621878"
+
+strip-ansi@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-0.3.0.tgz#25f48ea22ca79187f3174a4db8759347bb126220"
+  dependencies:
+    ansi-regex "^0.2.1"
+
+strip-ansi@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-2.0.1.tgz#df62c1aa94ed2f114e1d0f21fd1d50482b79a60e"
+  dependencies:
+    ansi-regex "^1.0.0"
+
+strip-ansi@^3.0.0, strip-ansi@^3.0.1:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf"
+  dependencies:
+    ansi-regex "^2.0.0"
+
+strip-ansi@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-0.1.1.tgz#39e8a98d044d150660abe4a6808acf70bb7bc991"
+
+strip-bom@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-2.0.0.tgz#6219a85616520491f35788bdbf1447a99c7e6b0e"
+  dependencies:
+    is-utf8 "^0.2.0"
+
+strip-json-comments@1.0.x:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-1.0.4.tgz#1e15fbcac97d3ee99bf2d73b4c656b082bbafb91"
+
+strip-json-comments@~2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a"
+
+styled_string@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/styled_string/-/styled_string-0.0.1.tgz#d22782bd81295459bc4f1df18c4bad8e94dd124a"
+
+sum-up@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/sum-up/-/sum-up-1.0.3.tgz#1c661f667057f63bcb7875aa1438bc162525156e"
+  dependencies:
+    chalk "^1.0.0"
+
+supports-color@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-0.2.0.tgz#d92de2694eb3f67323973d7ae3d8b55b4c22190a"
+
+supports-color@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7"
+
+symlink-or-copy@^1.0.0, symlink-or-copy@^1.0.1, symlink-or-copy@^1.1.8:
+  version "1.1.8"
+  resolved "https://registry.yarnpkg.com/symlink-or-copy/-/symlink-or-copy-1.1.8.tgz#cabe61e0010c1c023c173b25ee5108b37f4b4aa3"
+
+tap-parser@^5.1.0:
+  version "5.3.3"
+  resolved "https://registry.yarnpkg.com/tap-parser/-/tap-parser-5.3.3.tgz#53ec8a90f275d6fff43f169e56a679502a741185"
+  dependencies:
+    events-to-array "^1.0.1"
+    js-yaml "^3.2.7"
+  optionalDependencies:
+    readable-stream "^2"
+
+tar-fs@^1.4.1:
+  version "1.15.2"
+  resolved "https://registry.yarnpkg.com/tar-fs/-/tar-fs-1.15.2.tgz#761f5b32932c7b39461a60d537faea0d8084830c"
+  dependencies:
+    chownr "^1.0.1"
+    mkdirp "^0.5.1"
+    pump "^1.0.0"
+    tar-stream "^1.1.2"
+
+tar-stream@^1.1.2:
+  version "1.5.2"
+  resolved "https://registry.yarnpkg.com/tar-stream/-/tar-stream-1.5.2.tgz#fbc6c6e83c1a19d4cb48c7d96171fc248effc7bf"
+  dependencies:
+    bl "^1.0.0"
+    end-of-stream "^1.0.0"
+    readable-stream "^2.0.0"
+    xtend "^4.0.0"
+
+tar@^2.0.0, tar@~2.2.1:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/tar/-/tar-2.2.1.tgz#8e4d2a256c0e2185c6b18ad694aec968b83cb1d1"
+  dependencies:
+    block-stream "*"
+    fstream "^1.0.2"
+    inherits "2"
+
+temp@0.8.3:
+  version "0.8.3"
+  resolved "https://registry.yarnpkg.com/temp/-/temp-0.8.3.tgz#e0c6bc4d26b903124410e4fed81103014dfc1f59"
+  dependencies:
+    os-tmpdir "^1.0.0"
+    rimraf "~2.2.6"
+
+testem@^1.6.0:
+  version "1.16.0"
+  resolved "https://registry.yarnpkg.com/testem/-/testem-1.16.0.tgz#3933040b5d5b5fbdb6a2b1e7032e511b54a05867"
+  dependencies:
+    backbone "^1.1.2"
+    bluebird "^3.4.6"
+    charm "^1.0.0"
+    commander "^2.6.0"
+    consolidate "^0.14.0"
+    cross-spawn "^5.1.0"
+    express "^4.10.7"
+    fireworm "^0.7.0"
+    glob "^7.0.4"
+    http-proxy "^1.13.1"
+    js-yaml "^3.2.5"
+    lodash.assignin "^4.1.0"
+    lodash.clonedeep "^4.4.1"
+    lodash.find "^4.5.1"
+    lodash.uniqby "^4.7.0"
+    mkdirp "^0.5.1"
+    mustache "^2.2.1"
+    node-notifier "^5.0.1"
+    npmlog "^4.0.0"
+    printf "^0.2.3"
+    rimraf "^2.4.4"
+    socket.io "1.6.0"
+    spawn-args "^0.2.0"
+    styled_string "0.0.1"
+    tap-parser "^5.1.0"
+    xmldom "^0.1.19"
+
+text-table@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4"
+
+"textextensions@1 || 2":
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/textextensions/-/textextensions-2.1.0.tgz#1be0dc2a0dc244d44be8a09af6a85afb93c4dbc3"
+
+throttleit@~0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/throttleit/-/throttleit-0.0.2.tgz#cfedf88e60c00dd9697b61fdd2a8343a9b680eaf"
+
+through@^2.3.6, through@~2.3.8:
+  version "2.3.8"
+  resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5"
+
+timed-out@^3.0.0:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/timed-out/-/timed-out-3.1.3.tgz#95860bfcc5c76c277f8f8326fd0f5b2e20eba217"
+
+tiny-lr@0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/tiny-lr/-/tiny-lr-0.2.1.tgz#b3fdba802e5d56a33c2f6f10794b32e477ac729d"
+  dependencies:
+    body-parser "~1.14.0"
+    debug "~2.2.0"
+    faye-websocket "~0.10.0"
+    livereload-js "^2.2.0"
+    parseurl "~1.3.0"
+    qs "~5.1.0"
+
+tmp@0.0.24:
+  version "0.0.24"
+  resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.24.tgz#d6a5e198d14a9835cc6f2d7c3d9e302428c8cf12"
+
+tmp@0.0.28:
+  version "0.0.28"
+  resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.28.tgz#172735b7f614ea7af39664fa84cf0de4e515d120"
+  dependencies:
+    os-tmpdir "~1.0.1"
+
+tmpl@1.0.x:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/tmpl/-/tmpl-1.0.4.tgz#23640dd7b42d00433911140820e5cf440e521dd1"
+
+to-array@0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/to-array/-/to-array-0.1.4.tgz#17e6c11f73dd4f3d74cda7a4ff3238e9ad9bf890"
+
+to-fast-properties@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-1.0.2.tgz#f3f5c0c3ba7299a7ef99427e44633257ade43320"
+
+touch@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/touch/-/touch-0.0.3.tgz#51aef3d449571d4f287a5d87c9c8b49181a0db1d"
+  dependencies:
+    nopt "~1.0.10"
+
+tough-cookie@>=0.12.0, tough-cookie@^2.0.0:
+  version "2.3.2"
+  resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.3.2.tgz#f081f76e4c85720e6c37a5faced737150d84072a"
+  dependencies:
+    punycode "^1.4.1"
+
+tough-cookie@~2.2.0:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.2.2.tgz#c83a1830f4e5ef0b93ef2a3488e724f8de016ac7"
+
+"traverse@>=0.3.0 <0.4":
+  version "0.3.9"
+  resolved "https://registry.yarnpkg.com/traverse/-/traverse-0.3.9.tgz#717b8f220cc0bb7b44e40514c22b2e8bbc70d8b9"
+
+tree-sync@^1.1.0:
+  version "1.2.2"
+  resolved "https://registry.yarnpkg.com/tree-sync/-/tree-sync-1.2.2.tgz#2cf76b8589f59ffedb58db5a3ac7cb013d0158b7"
+  dependencies:
+    debug "^2.2.0"
+    fs-tree-diff "^0.5.6"
+    mkdirp "^0.5.1"
+    quick-temp "^0.1.5"
+    walk-sync "^0.2.7"
+
+trim-right@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/trim-right/-/trim-right-1.0.1.tgz#cb2e1203067e0c8de1f614094b9fe45704ea6003"
+
+truncate-utf8-bytes@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/truncate-utf8-bytes/-/truncate-utf8-bytes-1.0.2.tgz#405923909592d56f78a5818434b0b78489ca5f2b"
+  dependencies:
+    utf8-byte-length "^1.0.1"
+
+try-resolve@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/try-resolve/-/try-resolve-1.0.1.tgz#cfde6fabd72d63e5797cfaab873abbe8e700e912"
+
+tryor@~0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/tryor/-/tryor-0.1.2.tgz#8145e4ca7caff40acde3ccf946e8b8bb75b4172b"
+
+tunnel-agent@~0.4.0, tunnel-agent@~0.4.1:
+  version "0.4.3"
+  resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.4.3.tgz#6373db76909fe570e08d73583365ed828a74eeeb"
+
+tweetnacl@^0.14.3, tweetnacl@~0.14.0:
+  version "0.14.5"
+  resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64"
+
+type-is@~1.6.10, type-is@~1.6.14:
+  version "1.6.15"
+  resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.15.tgz#cab10fb4909e441c82842eafe1ad646c81804410"
+  dependencies:
+    media-typer "0.3.0"
+    mime-types "~2.1.15"
+
+typedarray@^0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777"
+
+uc.micro@^1.0.0, uc.micro@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/uc.micro/-/uc.micro-1.0.3.tgz#7ed50d5e0f9a9fb0a573379259f2a77458d50192"
+
+uglify-js@^2.6, uglify-js@^2.6.1, uglify-js@^2.7.0:
+  version "2.8.22"
+  resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-2.8.22.tgz#d54934778a8da14903fa29a326fb24c0ab51a1a0"
+  dependencies:
+    source-map "~0.5.1"
+    yargs "~3.10.0"
+  optionalDependencies:
+    uglify-to-browserify "~1.0.0"
+
+uglify-js@~2.3:
+  version "2.3.6"
+  resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-2.3.6.tgz#fa0984770b428b7a9b2a8058f46355d14fef211a"
+  dependencies:
+    async "~0.2.6"
+    optimist "~0.3.5"
+    source-map "~0.1.7"
+
+uglify-to-browserify@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/uglify-to-browserify/-/uglify-to-browserify-1.0.2.tgz#6e0924d6bda6b5afe349e39a6d632850a0f882b7"
+
+uid-number@0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/uid-number/-/uid-number-0.0.6.tgz#0ea10e8035e8eb5b8e4449f06da1c730663baa81"
+
+ultron@1.0.x:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/ultron/-/ultron-1.0.2.tgz#ace116ab557cd197386a4e88f4685378c8b2e4fa"
+
+umask@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/umask/-/umask-1.1.0.tgz#f29cebf01df517912bb58ff9c4e50fde8e33320d"
+
+underscore.string@~2.3.3:
+  version "2.3.3"
+  resolved "https://registry.yarnpkg.com/underscore.string/-/underscore.string-2.3.3.tgz#71c08bf6b428b1133f37e78fa3a21c82f7329b0d"
+
+underscore@>=1.8.3:
+  version "1.8.3"
+  resolved "https://registry.yarnpkg.com/underscore/-/underscore-1.8.3.tgz#4f3fb53b106e6097fcf9cb4109f2a5e9bdfa5022"
+
+unpipe@1.0.0, unpipe@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec"
+
+untildify@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/untildify/-/untildify-2.1.0.tgz#17eb2807987f76952e9c0485fc311d06a826a2e0"
+  dependencies:
+    os-homedir "^1.0.0"
+
+unzip-response@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/unzip-response/-/unzip-response-1.0.2.tgz#b984f0877fc0a89c2c773cc1ef7b5b232b5b06fe"
+
+update-notifier@^0.6.0:
+  version "0.6.3"
+  resolved "https://registry.yarnpkg.com/update-notifier/-/update-notifier-0.6.3.tgz#776dec8daa13e962a341e8a1d98354306b67ae08"
+  dependencies:
+    boxen "^0.3.1"
+    chalk "^1.0.0"
+    configstore "^2.0.0"
+    is-npm "^1.0.0"
+    latest-version "^2.0.0"
+    semver-diff "^2.0.0"
+
+url-parse-lax@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/url-parse-lax/-/url-parse-lax-1.0.0.tgz#7af8f303645e9bd79a272e7a14ac68bc0609da73"
+  dependencies:
+    prepend-http "^1.0.1"
+
+user-home@^1.0.0, user-home@^1.1.0, user-home@^1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/user-home/-/user-home-1.1.1.tgz#2b5be23a32b63a7c9deb8d0f28d485724a3df190"
+
+utf8-byte-length@^1.0.1:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/utf8-byte-length/-/utf8-byte-length-1.0.4.tgz#f45f150c4c66eee968186505ab93fcbb8ad6bf61"
+
+util-deprecate@~1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf"
+
+util-extend@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/util-extend/-/util-extend-1.0.3.tgz#a7c216d267545169637b3b6edc6ca9119e2ff93f"
+
+utils-merge@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.0.tgz#0294fb922bb9375153541c4f7096231f287c8af8"
+
+uuid@^2.0.1:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/uuid/-/uuid-2.0.3.tgz#67e2e863797215530dff318e5bf9dcebfd47b21a"
+
+validate-npm-package-license@^3.0.1, validate-npm-package-license@~3.0.1:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.1.tgz#2804babe712ad3379459acfbe24746ab2c303fbc"
+  dependencies:
+    spdx-correct "~1.0.0"
+    spdx-expression-parse "~1.0.0"
+
+validate-npm-package-name@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/validate-npm-package-name/-/validate-npm-package-name-3.0.0.tgz#5fa912d81eb7d0c74afc140de7317f0ca7df437e"
+  dependencies:
+    builtins "^1.0.3"
+
+validate-npm-package-name@~2.2.2:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/validate-npm-package-name/-/validate-npm-package-name-2.2.2.tgz#f65695b22f7324442019a3c7fa39a6e7fd299085"
+  dependencies:
+    builtins "0.0.7"
+
+vary@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.1.tgz#67535ebb694c1d52257457984665323f587e8d37"
+
+verror@1.3.6:
+  version "1.3.6"
+  resolved "https://registry.yarnpkg.com/verror/-/verror-1.3.6.tgz#cff5df12946d297d2baaefaa2689e25be01c005c"
+  dependencies:
+    extsprintf "1.0.2"
+
+walk-sync@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/walk-sync/-/walk-sync-0.1.3.tgz#8a07261a00bda6cfb1be25e9f100fad57546f583"
+
+walk-sync@^0.2.5, walk-sync@^0.2.6, walk-sync@^0.2.7:
+  version "0.2.7"
+  resolved "https://registry.yarnpkg.com/walk-sync/-/walk-sync-0.2.7.tgz#b49be4ee6867657aeb736978b56a29d10fa39969"
+  dependencies:
+    ensure-posix-path "^1.0.0"
+    matcher-collection "^1.0.0"
+
+walk-sync@^0.3.0, walk-sync@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/walk-sync/-/walk-sync-0.3.1.tgz#558a16aeac8c0db59c028b73c66f397684ece465"
+  dependencies:
+    ensure-posix-path "^1.0.0"
+    matcher-collection "^1.0.0"
+
+walker@~1.0.5:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/walker/-/walker-1.0.7.tgz#2f7f9b8fd10d677262b18a884e28d19618e028fb"
+  dependencies:
+    makeerror "1.0.x"
+
+watch@~0.10.0:
+  version "0.10.0"
+  resolved "https://registry.yarnpkg.com/watch/-/watch-0.10.0.tgz#77798b2da0f9910d595f1ace5b0c2258521f21dc"
+
+wcwidth@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/wcwidth/-/wcwidth-1.0.1.tgz#f0b0dcf915bc5ff1528afadb2c0e17b532da2fe8"
+  dependencies:
+    defaults "^1.0.3"
+
+websocket-driver@>=0.5.1:
+  version "0.6.5"
+  resolved "https://registry.yarnpkg.com/websocket-driver/-/websocket-driver-0.6.5.tgz#5cb2556ceb85f4373c6d8238aa691c8454e13a36"
+  dependencies:
+    websocket-extensions ">=0.1.1"
+
+websocket-extensions@>=0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/websocket-extensions/-/websocket-extensions-0.1.1.tgz#76899499c184b6ef754377c2dbb0cd6cb55d29e7"
+
+which@1, which@^1.0.8, which@^1.2.12, which@^1.2.9, which@~1.2.4:
+  version "1.2.14"
+  resolved "https://registry.yarnpkg.com/which/-/which-1.2.14.tgz#9a87c4378f03e827cecaf1acdf56c736c01c14e5"
+  dependencies:
+    isexe "^2.0.0"
+
+wide-align@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.0.tgz#40edde802a71fea1f070da3e62dcda2e7add96ad"
+  dependencies:
+    string-width "^1.0.1"
+
+widest-line@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/widest-line/-/widest-line-1.0.0.tgz#0c09c85c2a94683d0d7eaf8ee097d564bf0e105c"
+  dependencies:
+    string-width "^1.0.1"
+
+win-release@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/win-release/-/win-release-1.1.1.tgz#5fa55e02be7ca934edfc12665632e849b72e5209"
+  dependencies:
+    semver "^5.0.1"
+
+window-size@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.0.tgz#5438cd2ea93b202efa3a19fe8887aee7c94f9c9d"
+
+window-size@^0.1.2:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.4.tgz#f8e1aa1ee5a53ec5bf151ffa09742a6ad7697876"
+
+wordwrap@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.2.tgz#b79669bb42ecb409f83d583cad52ca17eaa1643f"
+
+wordwrap@~0.0.2:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.3.tgz#a3d5da6cd5c0bc0008d37234bbaf1bed63059107"
+
+wrappy@1, wrappy@~1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f"
+
+write-file-atomic@^1.1.2:
+  version "1.3.4"
+  resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-1.3.4.tgz#f807a4f0b1d9e913ae7a48112e6cc3af1991b45f"
+  dependencies:
+    graceful-fs "^4.1.11"
+    imurmurhash "^0.1.4"
+    slide "^1.1.5"
+
+write-file-atomic@~1.1.4:
+  version "1.1.4"
+  resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-1.1.4.tgz#b1f52dc2e8dc0e3cb04d187a25f758a38a90ca3b"
+  dependencies:
+    graceful-fs "^4.1.2"
+    imurmurhash "^0.1.4"
+    slide "^1.1.5"
+
+ws@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ws/-/ws-1.1.1.tgz#082ddb6c641e85d4bb451f03d52f06eabdb1f018"
+  dependencies:
+    options ">=0.0.5"
+    ultron "1.0.x"
+
+wtf-8@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/wtf-8/-/wtf-8-1.0.0.tgz#392d8ba2d0f1c34d1ee2d630f15d0efb68e1048a"
+
+xdg-basedir@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-1.0.1.tgz#14ff8f63a4fdbcb05d5b6eea22b36f3033b9f04e"
+  dependencies:
+    user-home "^1.0.0"
+
+xdg-basedir@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-2.0.0.tgz#edbc903cc385fc04523d966a335504b5504d1bd2"
+  dependencies:
+    os-homedir "^1.0.0"
+
+xmldom@^0.1.19:
+  version "0.1.27"
+  resolved "https://registry.yarnpkg.com/xmldom/-/xmldom-0.1.27.tgz#d501f97b3bdb403af8ef9ecc20573187aadac0e9"
+
+xmlhttprequest-ssl@1.5.3:
+  version "1.5.3"
+  resolved "https://registry.yarnpkg.com/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.5.3.tgz#185a888c04eca46c3e4070d99f7b49de3528992d"
+
+xtend@^4.0.0:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.1.tgz#a5c6d532be656e23db820efb943a1f04998d63af"
+
+y18n@^3.2.0:
+  version "3.2.1"
+  resolved "https://registry.yarnpkg.com/y18n/-/y18n-3.2.1.tgz#6d15fba884c08679c0d77e88e7759e811e07fa41"
+
+yallist@^2.0.0:
+  version "2.1.2"
+  resolved "https://registry.yarnpkg.com/yallist/-/yallist-2.1.2.tgz#1c11f9218f076089a47dd512f93c6699a6a81d52"
+
+yam@0.0.18:
+  version "0.0.18"
+  resolved "https://registry.yarnpkg.com/yam/-/yam-0.0.18.tgz#e5cab771f0fc80ca599814cb9c269cb8bff00e2c"
+  dependencies:
+    findup "^0.1.5"
+    fs-extra "^0.16.3"
+    lodash.merge "^3.0.2"
+
+yargs@~3.10.0:
+  version "3.10.0"
+  resolved "https://registry.yarnpkg.com/yargs/-/yargs-3.10.0.tgz#f7ee7bd857dd7c1d2d38c0e74efbd681d1431fd1"
+  dependencies:
+    camelcase "^1.0.2"
+    cliui "^2.1.0"
+    decamelize "^1.0.0"
+    window-size "0.1.0"
+
+yargs@~3.27.0:
+  version "3.27.0"
+  resolved "https://registry.yarnpkg.com/yargs/-/yargs-3.27.0.tgz#21205469316e939131d59f2da0c6d7f98221ea40"
+  dependencies:
+    camelcase "^1.2.1"
+    cliui "^2.1.0"
+    decamelize "^1.0.0"
+    os-locale "^1.4.0"
+    window-size "^0.1.2"
+    y18n "^3.2.0"
+
+yeast@0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/yeast/-/yeast-0.1.2.tgz#008e06d8094320c372dbc2f8ed76a0ca6c8ac419"
diff --git a/contrib/views/jobs/pom.xml b/contrib/views/jobs/pom.xml
index 062f15f..ea8a527 100644
--- a/contrib/views/jobs/pom.xml
+++ b/contrib/views/jobs/pom.xml
@@ -86,29 +86,35 @@
             <plugin>
                 <groupId>com.github.eirslett</groupId>
                 <artifactId>frontend-maven-plugin</artifactId>
-                <version>1.3</version>
+                <version>1.4</version>
                 <configuration>
                     <nodeVersion>v4.5.0</nodeVersion>
-                    <npmVersion>2.15.0</npmVersion>
+                    <yarnVersion>v0.23.2</yarnVersion>
                     <workingDirectory>src/main/resources/ui</workingDirectory>
                     <npmInheritsProxyConfigFromMaven>false</npmInheritsProxyConfigFromMaven>
+                    <!-- setting npm_config_tmp environment variable is a workaround for 
+                       https://github.com/Medium/phantomjs/issues/673 -->
+                    <environmentVariables>
+                      <npm_config_tmp>/tmp/npm_config_tmp</npm_config_tmp>
+                    </environmentVariables>
                 </configuration>
                 <executions>
                     <execution>
-                        <id>install node and npm</id>
+                        <id>install node and yarn</id>
                         <phase>generate-sources</phase>
                         <goals>
-                            <goal>install-node-and-npm</goal>
+                            <goal>install-node-and-yarn</goal>
                         </goals>
                     </execution>
                     <execution>
-                        <id>npm install</id>
+                        <id>yarn install --pure-lockfile</id>
                         <phase>generate-sources</phase>
                         <goals>
-                            <goal>npm</goal>
+                            <goal>yarn</goal>
                         </goals>
                         <configuration>
                             <arguments>install --unsafe-perm</arguments>
+                            <arguments>--ignore-engines</arguments>
                         </configuration>
                     </execution>
                 </executions>
diff --git a/contrib/views/jobs/src/main/resources/ui/yarn.lock b/contrib/views/jobs/src/main/resources/ui/yarn.lock
new file mode 100644
index 0000000..ce873c1
--- /dev/null
+++ b/contrib/views/jobs/src/main/resources/ui/yarn.lock
@@ -0,0 +1,2537 @@
+# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
+# yarn lockfile v1
+
+
+abbrev@1, abbrev@~1.0.4:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.0.9.tgz#91b4792588a7738c25f35dd6f63752a2f8776135"
+
+amdefine@>=0.0.4:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5"
+
+ansi-regex@^0.2.0, ansi-regex@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-0.2.1.tgz#0d8e946967a3d8143f93e24e298525fc1b2235f9"
+
+ansi-regex@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-1.1.1.tgz#41c847194646375e6a1a5d10c3ca054ef9fc980d"
+
+ansi-regex@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df"
+
+ansi-styles@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-1.1.0.tgz#eaecbf66cd706882760b2f4691582b8f55d7a7de"
+
+ansi-styles@^2.2.1:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe"
+
+ansi-styles@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-0.2.0.tgz#359ab4b15dcd64ba6d74734b72c36360a9af2c19"
+
+ansi-styles@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-1.0.0.tgz#cb102df1c56f5123eab8b67cd7b98027a0279178"
+
+ansicolors@~0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/ansicolors/-/ansicolors-0.2.1.tgz#be089599097b74a5c9c4a84a0cdbcdb62bd87aef"
+
+archy@~0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/archy/-/archy-0.0.2.tgz#910f43bf66141fc335564597abc189df44b3d35e"
+
+argparse@^1.0.7:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.9.tgz#73d83bc263f86e97f8cc4f6bae1b0e90a7d22c86"
+  dependencies:
+    sprintf-js "~1.0.2"
+
+"argparse@~ 0.1.11":
+  version "0.1.16"
+  resolved "https://registry.yarnpkg.com/argparse/-/argparse-0.1.16.tgz#cfd01e0fbba3d6caed049fbd758d40f65196f57c"
+  dependencies:
+    underscore "~1.7.0"
+    underscore.string "~2.4.0"
+
+array-filter@~0.0.0:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/array-filter/-/array-filter-0.0.1.tgz#7da8cf2e26628ed732803581fd21f67cacd2eeec"
+
+array-map@~0.0.0:
+  version "0.0.0"
+  resolved "https://registry.yarnpkg.com/array-map/-/array-map-0.0.0.tgz#88a2bab73d1cf7bcd5c1b118a003f66f665fa662"
+
+array-reduce@~0.0.0:
+  version "0.0.0"
+  resolved "https://registry.yarnpkg.com/array-reduce/-/array-reduce-0.0.0.tgz#173899d3ffd1c7d9383e4479525dbe278cab5f2b"
+
+asn1@0.1.11:
+  version "0.1.11"
+  resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.1.11.tgz#559be18376d08a4ec4dbe80877d27818639b2df7"
+
+asn1@~0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.3.tgz#dac8787713c9966849fc8180777ebe9c1ddf3b86"
+
+assert-plus@1.0.0, assert-plus@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525"
+
+assert-plus@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.1.5.tgz#ee74009413002d84cec7219c6ac811812e723160"
+
+assert-plus@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.2.0.tgz#d74e1b87e7affc0db8aadb7021f3fe48101ab234"
+
+async@^0.2.10, async@~0.2.6, async@~0.2.8, async@~0.2.9:
+  version "0.2.10"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.2.10.tgz#b6bbe0b0674b9d719708ca38de8c237cb526c3d1"
+
+async@^2.0.1:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/async/-/async-2.3.0.tgz#1013d1051047dd320fe24e494d5c66ecaf6147d9"
+  dependencies:
+    lodash "^4.14.0"
+
+async@~0.1.22:
+  version "0.1.22"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.1.22.tgz#0fc1aaa088a0e3ef0ebe2d8831bab0dcf8845061"
+
+async@~0.8.0:
+  version "0.8.0"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.8.0.tgz#ee65ec77298c2ff1456bc4418a052d0f06435112"
+
+async@~0.9.0:
+  version "0.9.2"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.9.2.tgz#aea74d5e61c1f899613bf64bda66d4c78f2fd17d"
+
+aws-sign2@~0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.5.0.tgz#c57103f7a17fc037f02d7c2e64b602ea223f7d63"
+
+aws-sign2@~0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.6.0.tgz#14342dd38dbcc94d0e5b87d763cd63612c0e794f"
+
+aws-sign@~0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/aws-sign/-/aws-sign-0.3.0.tgz#3d81ca69b474b1e16518728b51c24ff0bbedc6e9"
+
+bcrypt-pbkdf@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.1.tgz#63bc5dcb61331b92bc05fd528953c33462a06f8d"
+  dependencies:
+    tweetnacl "^0.14.3"
+
+binary@~0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/binary/-/binary-0.3.0.tgz#9f60553bc5ce8c3386f3b553cff47462adecaa79"
+  dependencies:
+    buffers "~0.1.1"
+    chainsaw "~0.1.0"
+
+bl@~0.9.0:
+  version "0.9.5"
+  resolved "https://registry.yarnpkg.com/bl/-/bl-0.9.5.tgz#c06b797af085ea00bc527afc8efcf11de2232054"
+  dependencies:
+    readable-stream "~1.0.26"
+
+bl@~1.0.0:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/bl/-/bl-1.0.3.tgz#fc5421a28fd4226036c3b3891a66a25bc64d226e"
+  dependencies:
+    readable-stream "~2.0.5"
+
+block-stream@*:
+  version "0.0.9"
+  resolved "https://registry.yarnpkg.com/block-stream/-/block-stream-0.0.9.tgz#13ebfe778a03205cfe03751481ebb4b3300c126a"
+  dependencies:
+    inherits "~2.0.0"
+
+boom@0.4.x:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/boom/-/boom-0.4.2.tgz#7a636e9ded4efcefb19cef4947a3c67dfaee911b"
+  dependencies:
+    hoek "0.9.x"
+
+boom@2.x.x:
+  version "2.10.1"
+  resolved "https://registry.yarnpkg.com/boom/-/boom-2.10.1.tgz#39c8918ceff5799f83f9492a848f625add0c766f"
+  dependencies:
+    hoek "2.x.x"
+
+bower-config@~0.5.0, bower-config@~0.5.2:
+  version "0.5.2"
+  resolved "https://registry.yarnpkg.com/bower-config/-/bower-config-0.5.2.tgz#1f7d2e899e99b70c29a613e70d4c64590414b22e"
+  dependencies:
+    graceful-fs "~2.0.0"
+    mout "~0.9.0"
+    optimist "~0.6.0"
+    osenv "0.0.3"
+
+bower-endpoint-parser@~0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/bower-endpoint-parser/-/bower-endpoint-parser-0.2.2.tgz#00b565adbfab6f2d35addde977e97962acbcb3f6"
+
+bower-json@~0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/bower-json/-/bower-json-0.4.0.tgz#a99c3ccf416ef0590ed0ded252c760f1c6d93766"
+  dependencies:
+    deep-extend "~0.2.5"
+    graceful-fs "~2.0.0"
+    intersect "~0.0.3"
+
+bower-logger@~0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/bower-logger/-/bower-logger-0.2.2.tgz#39be07e979b2fc8e03a94634205ed9422373d381"
+
+bower-registry-client@~0.2.0:
+  version "0.2.4"
+  resolved "https://registry.yarnpkg.com/bower-registry-client/-/bower-registry-client-0.2.4.tgz#269fc7e898b627fb939d1144a593254d7fbbeebc"
+  dependencies:
+    async "~0.2.8"
+    bower-config "~0.5.0"
+    graceful-fs "~2.0.0"
+    lru-cache "~2.3.0"
+    mkdirp "~0.3.5"
+    request "~2.51.0"
+    request-replay "~0.2.0"
+    rimraf "~2.2.0"
+
+bower@1.3.8:
+  version "1.3.8"
+  resolved "https://registry.yarnpkg.com/bower/-/bower-1.3.8.tgz#afa3338a8a88a6e084c38112ea4a15998cbee3e6"
+  dependencies:
+    abbrev "~1.0.4"
+    archy "~0.0.2"
+    bower-config "~0.5.2"
+    bower-endpoint-parser "~0.2.2"
+    bower-json "~0.4.0"
+    bower-logger "~0.2.2"
+    bower-registry-client "~0.2.0"
+    cardinal "~0.4.0"
+    chalk "~0.4.0"
+    chmodr "~0.1.0"
+    decompress-zip "~0.0.6"
+    fstream "~0.1.22"
+    fstream-ignore "~0.0.6"
+    glob "~4.0.2"
+    graceful-fs "~3.0.1"
+    handlebars "~1.3.0"
+    inquirer "~0.5.1"
+    insight "~0.3.0"
+    is-root "~0.1.0"
+    junk "~0.3.0"
+    lockfile "~0.4.2"
+    lru-cache "~2.5.0"
+    mkdirp "~0.5.0"
+    mout "~0.9.1"
+    nopt "~3.0.0"
+    opn "~0.1.1"
+    osenv "~0.1.0"
+    p-throttler "~0.0.1"
+    promptly "~0.2.0"
+    q "~1.0.1"
+    request "~2.36.0"
+    request-progress "~0.3.0"
+    retry "~0.6.0"
+    rimraf "~2.2.0"
+    semver "~2.3.0"
+    shell-quote "~1.4.1"
+    stringify-object "~0.2.0"
+    tar "~0.1.17"
+    tmp "0.0.23"
+    update-notifier "~0.2.0"
+    which "~1.0.5"
+
+buffer-crc32@0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/buffer-crc32/-/buffer-crc32-0.2.1.tgz#be3e5382fc02b6d6324956ac1af98aa98b08534c"
+
+buffer-shims@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/buffer-shims/-/buffer-shims-1.0.0.tgz#9978ce317388c649ad8793028c3477ef044a8b51"
+
+buffers@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/buffers/-/buffers-0.1.1.tgz#b24579c3bed4d6d396aeee6d9a8ae7f5482ab7bb"
+
+bytes@0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-0.2.0.tgz#aad33ec14e3dc2ca74e8e7d451f9ba053ad4f7a0"
+
+camelcase@^1.0.2:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-1.2.1.tgz#9bb5304d2e0b56698b2c758b08a3eaa9daa58a39"
+
+cardinal@~0.4.0:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/cardinal/-/cardinal-0.4.4.tgz#ca5bb68a5b511b90fe93b9acea49bdee5c32bfe2"
+  dependencies:
+    ansicolors "~0.2.1"
+    redeyed "~0.4.0"
+
+caseless@~0.11.0:
+  version "0.11.0"
+  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.11.0.tgz#715b96ea9841593cc33067923f5ec60ebda4f7d7"
+
+caseless@~0.8.0:
+  version "0.8.0"
+  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.8.0.tgz#5bca2881d41437f54b2407ebe34888c7b9ad4f7d"
+
+chainsaw@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/chainsaw/-/chainsaw-0.1.0.tgz#5eab50b28afe58074d0d58291388828b5e5fbc98"
+  dependencies:
+    traverse ">=0.3.0 <0.4"
+
+chalk@^0.4.0, chalk@~0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-0.4.0.tgz#5199a3ddcd0c1efe23bc08c1b027b06176e0c64f"
+  dependencies:
+    ansi-styles "~1.0.0"
+    has-color "~0.1.0"
+    strip-ansi "~0.1.0"
+
+chalk@^0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-0.5.1.tgz#663b3a648b68b55d04690d49167aa837858f2174"
+  dependencies:
+    ansi-styles "^1.1.0"
+    escape-string-regexp "^1.0.0"
+    has-ansi "^0.1.0"
+    strip-ansi "^0.3.0"
+    supports-color "^0.2.0"
+
+chalk@^1.1.1:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98"
+  dependencies:
+    ansi-styles "^2.2.1"
+    escape-string-regexp "^1.0.2"
+    has-ansi "^2.0.0"
+    strip-ansi "^3.0.0"
+    supports-color "^2.0.0"
+
+chalk@~0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-0.3.0.tgz#1c98437737f1199ebcc1d4c48fd41b9f9c8e8f23"
+  dependencies:
+    ansi-styles "~0.2.0"
+    has-color "~0.1.0"
+
+chmodr@~0.1.0:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/chmodr/-/chmodr-0.1.2.tgz#0dd8041c915087575bec383b47827bb7576a4fd6"
+
+clean-css@2.2.x:
+  version "2.2.23"
+  resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-2.2.23.tgz#0590b5478b516c4903edc2d89bd3fdbdd286328c"
+  dependencies:
+    commander "2.2.x"
+
+clean-css@~1.1.1:
+  version "1.1.7"
+  resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-1.1.7.tgz#601ef9cf7642b982cb33efc9488a6444c986686e"
+  dependencies:
+    commander "2.0.x"
+
+cli-color@~0.2.2:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/cli-color/-/cli-color-0.2.3.tgz#0a25ceae5a6a1602be7f77d28563c36700274e88"
+  dependencies:
+    es5-ext "~0.9.2"
+    memoizee "~0.2.5"
+
+cli-color@~0.3.2:
+  version "0.3.3"
+  resolved "https://registry.yarnpkg.com/cli-color/-/cli-color-0.3.3.tgz#12d5bdd158ff8a0b0db401198913c03df069f6f5"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.6"
+    memoizee "~0.3.8"
+    timers-ext "0.1"
+
+cli@0.4.x:
+  version "0.4.5"
+  resolved "https://registry.yarnpkg.com/cli/-/cli-0.4.5.tgz#78f9485cd161b566e9a6c72d7170c4270e81db61"
+  dependencies:
+    glob ">= 3.1.4"
+
+coa@~0.3.7:
+  version "0.3.9"
+  resolved "https://registry.yarnpkg.com/coa/-/coa-0.3.9.tgz#7e3d20d30af70b80862e95d4d49b715183be9604"
+  dependencies:
+    q "~0.8.10"
+
+coffee-script@~1.3.3:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/coffee-script/-/coffee-script-1.3.3.tgz#150d6b4cb522894369efed6a2101c20bc7f4a4f4"
+
+colors@~0.6.0, colors@~0.6.2:
+  version "0.6.2"
+  resolved "https://registry.yarnpkg.com/colors/-/colors-0.6.2.tgz#2423fe6678ac0c5dae8852e5d0e5be08c997abcc"
+
+combined-stream@^1.0.5, combined-stream@~1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.5.tgz#938370a57b4a51dea2c77c15d5c5fdf895164009"
+  dependencies:
+    delayed-stream "~1.0.0"
+
+combined-stream@~0.0.4, combined-stream@~0.0.5:
+  version "0.0.7"
+  resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-0.0.7.tgz#0137e657baa5a7541c57ac37ac5fc07d73b4dc1f"
+  dependencies:
+    delayed-stream "0.0.5"
+
+commander@0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-0.6.1.tgz#fa68a14f6a945d54dbbe50d8cdb3320e9e3b1a06"
+
+commander@2.0.x:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.0.0.tgz#d1b86f901f8b64bd941bdeadaf924530393be928"
+
+commander@2.2.x:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.2.0.tgz#175ad4b9317f3ff615f201c1e57224f55a3e91df"
+
+commander@2.3.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.3.0.tgz#fd430e889832ec353b9acd1de217c11cb3eef873"
+
+commander@^2.9.0:
+  version "2.9.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.9.0.tgz#9c99094176e12240cb22d6c5146098400fe0f7d4"
+  dependencies:
+    graceful-readlink ">= 1.0.0"
+
+concat-stream@1.5.0:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.5.0.tgz#53f7d43c51c5e43f81c8fdd03321c631be68d611"
+  dependencies:
+    inherits "~2.0.1"
+    readable-stream "~2.0.0"
+    typedarray "~0.0.5"
+
+concat-stream@^1.4.1:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.6.0.tgz#0aac662fd52be78964d5532f694784e70110acf7"
+  dependencies:
+    inherits "^2.0.3"
+    readable-stream "^2.2.2"
+    typedarray "^0.0.6"
+
+configstore@^0.3.1:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/configstore/-/configstore-0.3.2.tgz#25e4c16c3768abf75c5a65bc61761f495055b459"
+  dependencies:
+    graceful-fs "^3.0.1"
+    js-yaml "^3.1.0"
+    mkdirp "^0.5.0"
+    object-assign "^2.0.0"
+    osenv "^0.1.0"
+    user-home "^1.0.0"
+    uuid "^2.0.1"
+    xdg-basedir "^1.0.0"
+
+configstore@~0.2.1:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/configstore/-/configstore-0.2.3.tgz#b1bdc4ad823a25423dc15d220fcc1ae1d7efab02"
+  dependencies:
+    graceful-fs "~2.0.1"
+    js-yaml "~3.0.1"
+    mkdirp "~0.3.5"
+    object-assign "~0.1.1"
+    osenv "0.0.3"
+    uuid "~1.4.1"
+
+connect-livereload@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/connect-livereload/-/connect-livereload-0.2.0.tgz#7573cf587846dffd02a3e65e3122b470dd615ec9"
+
+connect@~2.7.3:
+  version "2.7.11"
+  resolved "https://registry.yarnpkg.com/connect/-/connect-2.7.11.tgz#f561d5eef70b8d719c397f724d34ba4065c77a3e"
+  dependencies:
+    buffer-crc32 "0.2.1"
+    bytes "0.2.0"
+    cookie "0.0.5"
+    cookie-signature "1.0.1"
+    debug "*"
+    formidable "1.0.14"
+    fresh "0.1.0"
+    pause "0.0.1"
+    qs "0.6.5"
+    send "0.1.1"
+
+console-browserify@0.1.x:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/console-browserify/-/console-browserify-0.1.6.tgz#d128a3c0bb88350eb5626c6e7c71a6f0fd48983c"
+
+cookie-jar@~0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/cookie-jar/-/cookie-jar-0.3.0.tgz#bc9a27d4e2b97e186cd57c9e2063cb99fa68cccc"
+
+cookie-signature@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.1.tgz#44e072148af01e6e8e24afbf12690d68ae698ecb"
+
+cookie@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.0.5.tgz#f9acf9db57eb7568c9fcc596256b7bb22e307c81"
+
+core-util-is@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7"
+
+cryptiles@0.2.x:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/cryptiles/-/cryptiles-0.2.2.tgz#ed91ff1f17ad13d3748288594f8a48a0d26f325c"
+  dependencies:
+    boom "0.4.x"
+
+cryptiles@2.x.x:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/cryptiles/-/cryptiles-2.0.5.tgz#3bdfecdc608147c1c67202fa291e7dca59eaa3b8"
+  dependencies:
+    boom "2.x.x"
+
+ctype@0.5.3:
+  version "0.5.3"
+  resolved "https://registry.yarnpkg.com/ctype/-/ctype-0.5.3.tgz#82c18c2461f74114ef16c135224ad0b9144ca12f"
+
+d@1:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/d/-/d-1.0.0.tgz#754bb5bfe55451da69a58b94d45f4c5b0462d58f"
+  dependencies:
+    es5-ext "^0.10.9"
+
+d@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/d/-/d-0.1.1.tgz#da184c535d18d8ee7ba2aa229b914009fae11309"
+  dependencies:
+    es5-ext "~0.10.2"
+
+dashdash@^1.12.0:
+  version "1.14.1"
+  resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0"
+  dependencies:
+    assert-plus "^1.0.0"
+
+dateformat@1.0.2-1.2.3:
+  version "1.0.2-1.2.3"
+  resolved "https://registry.yarnpkg.com/dateformat/-/dateformat-1.0.2-1.2.3.tgz#b0220c02de98617433b72851cf47de3df2cdbee9"
+
+debug@*:
+  version "2.6.6"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.6.tgz#a9fa6fbe9ca43cf1e79f73b75c0189cbb7d6db5a"
+  dependencies:
+    ms "0.7.3"
+
+debug@0.7.4, debug@~0.7.0:
+  version "0.7.4"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-0.7.4.tgz#06e1ea8082c2cb14e39806e22e2f6f757f92af39"
+
+debug@2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.0.0.tgz#89bd9df6732b51256bc6705342bba02ed12131ef"
+  dependencies:
+    ms "0.6.2"
+
+decamelize@^1.0.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290"
+
+decompress-zip@~0.0.6:
+  version "0.0.8"
+  resolved "https://registry.yarnpkg.com/decompress-zip/-/decompress-zip-0.0.8.tgz#4a265b22c7b209d7b24fa66f2b2dfbced59044f3"
+  dependencies:
+    binary "~0.3.0"
+    graceful-fs "~3.0.0"
+    mkpath "~0.1.0"
+    nopt "~2.2.0"
+    q "~1.0.0"
+    readable-stream "~1.1.8"
+    touch "0.0.2"
+
+deep-equal@~0.0.0:
+  version "0.0.0"
+  resolved "https://registry.yarnpkg.com/deep-equal/-/deep-equal-0.0.0.tgz#99679d3bbd047156fcd450d3d01eeb9068691e83"
+
+deep-extend@~0.2.5:
+  version "0.2.11"
+  resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.2.11.tgz#7a16ba69729132340506170494bc83f7076fe08f"
+
+deep-extend@~0.4.0:
+  version "0.4.1"
+  resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.4.1.tgz#efe4113d08085f4e6f9687759810f807469e2253"
+
+defined@~0.0.0:
+  version "0.0.0"
+  resolved "https://registry.yarnpkg.com/defined/-/defined-0.0.0.tgz#f35eea7d705e933baf13b2f03b3f83d921403b3e"
+
+delayed-stream@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-0.0.5.tgz#d4b1f43a93e8296dfe02694f4680bc37a313c73f"
+
+delayed-stream@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619"
+
+diff@1.0.8:
+  version "1.0.8"
+  resolved "https://registry.yarnpkg.com/diff/-/diff-1.0.8.tgz#343276308ec991b7bc82267ed55bc1411f971666"
+
+duplexify@^3.2.0:
+  version "3.5.0"
+  resolved "https://registry.yarnpkg.com/duplexify/-/duplexify-3.5.0.tgz#1aa773002e1578457e9d9d4a50b0ccaaebcbd604"
+  dependencies:
+    end-of-stream "1.0.0"
+    inherits "^2.0.1"
+    readable-stream "^2.0.0"
+    stream-shift "^1.0.0"
+
+ecc-jsbn@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.1.tgz#0fc73a9ed5f0d53c38193398523ef7e543777505"
+  dependencies:
+    jsbn "~0.1.0"
+
+end-of-stream@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-1.0.0.tgz#d4596e702734a93e40e9af864319eabd99ff2f0e"
+  dependencies:
+    once "~1.3.0"
+
+es5-ext@^0.10.14, es5-ext@^0.10.9, es5-ext@~0.10.11, es5-ext@~0.10.14, es5-ext@~0.10.2, es5-ext@~0.10.5, es5-ext@~0.10.6:
+  version "0.10.15"
+  resolved "https://registry.yarnpkg.com/es5-ext/-/es5-ext-0.10.15.tgz#c330a5934c1ee21284a7c081a86e5fd937c91ea6"
+  dependencies:
+    es6-iterator "2"
+    es6-symbol "~3.1"
+
+es5-ext@~0.9.2:
+  version "0.9.2"
+  resolved "https://registry.yarnpkg.com/es5-ext/-/es5-ext-0.9.2.tgz#d2e309d1f223b0718648835acf5b8823a8061f8a"
+
+es6-iterator@2:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/es6-iterator/-/es6-iterator-2.0.1.tgz#8e319c9f0453bf575d374940a655920e59ca5512"
+  dependencies:
+    d "1"
+    es5-ext "^0.10.14"
+    es6-symbol "^3.1"
+
+es6-iterator@~0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/es6-iterator/-/es6-iterator-0.1.3.tgz#d6f58b8c4fc413c249b4baa19768f8e4d7c8944e"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.5"
+    es6-symbol "~2.0.1"
+
+es6-symbol@^3.1, es6-symbol@~3.1:
+  version "3.1.1"
+  resolved "https://registry.yarnpkg.com/es6-symbol/-/es6-symbol-3.1.1.tgz#bf00ef4fdab6ba1b46ecb7b629b4c7ed5715cc77"
+  dependencies:
+    d "1"
+    es5-ext "~0.10.14"
+
+es6-symbol@~2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/es6-symbol/-/es6-symbol-2.0.1.tgz#761b5c67cfd4f1d18afb234f691d678682cb3bf3"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.5"
+
+es6-weak-map@~0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/es6-weak-map/-/es6-weak-map-0.1.4.tgz#706cef9e99aa236ba7766c239c8b9e286ea7d228"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.6"
+    es6-iterator "~0.1.3"
+    es6-symbol "~2.0.1"
+
+escape-string-regexp@1.0.2, escape-string-regexp@^1.0.0, escape-string-regexp@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.2.tgz#4dbc2fe674e71949caf3fb2695ce7f2dc1d9a8d1"
+
+esprima@^3.1.1:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-3.1.3.tgz#fdca51cee6133895e3c88d535ce49dbff62a4633"
+
+"esprima@~ 1.0.2", esprima@~1.0.4:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-1.0.4.tgz#9f557e08fc3b4d26ece9dd34f8fbf476b62585ad"
+
+event-emitter@~0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/event-emitter/-/event-emitter-0.2.2.tgz#c81e3724eb55407c5a0d5ee3299411f700f54291"
+  dependencies:
+    es5-ext "~0.9.2"
+
+event-emitter@~0.3.4:
+  version "0.3.5"
+  resolved "https://registry.yarnpkg.com/event-emitter/-/event-emitter-0.3.5.tgz#df8c69eef1647923c7157b9ce83840610b02cc39"
+  dependencies:
+    d "1"
+    es5-ext "~0.10.14"
+
+eventemitter2@^0.4.9, eventemitter2@~0.4.13:
+  version "0.4.14"
+  resolved "https://registry.yarnpkg.com/eventemitter2/-/eventemitter2-0.4.14.tgz#8f61b75cde012b2e9eb284d4545583b5643b61ab"
+
+exit@~0.1.1:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/exit/-/exit-0.1.2.tgz#0632638f8d877cc82107d30a0fff1a17cba1cd0c"
+
+extend@~3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.1.tgz#a755ea7bc1adfcc5a31ce7e762dbaadc5e636444"
+
+extract-zip@~1.5.0:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/extract-zip/-/extract-zip-1.5.0.tgz#92ccf6d81ef70a9fa4c1747114ccef6d8688a6c4"
+  dependencies:
+    concat-stream "1.5.0"
+    debug "0.7.4"
+    mkdirp "0.5.0"
+    yauzl "2.4.1"
+
+extsprintf@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.0.2.tgz#e1080e0658e300b06294990cc70e1502235fd550"
+
+faye-websocket@~0.4.3:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.4.4.tgz#c14c5b3bf14d7417ffbfd990c0a7495cd9f337bc"
+
+fd-slicer@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/fd-slicer/-/fd-slicer-1.0.1.tgz#8b5bcbd9ec327c5041bf9ab023fd6750f1177e65"
+  dependencies:
+    pend "~1.2.0"
+
+filesize@~2.0.0:
+  version "2.0.4"
+  resolved "https://registry.yarnpkg.com/filesize/-/filesize-2.0.4.tgz#7805941c60fcdfe63f46d7ea358c59ade11c1325"
+
+findup-sync@~0.1.0, findup-sync@~0.1.2:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/findup-sync/-/findup-sync-0.1.3.tgz#7f3e7a97b82392c653bf06589bd85190e93c3683"
+  dependencies:
+    glob "~3.2.9"
+    lodash "~2.4.1"
+
+forever-agent@~0.5.0:
+  version "0.5.2"
+  resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.5.2.tgz#6d0e09c4921f94a27f63d3b49c5feff1ea4c5130"
+
+forever-agent@~0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91"
+
+form-data@~0.1.0:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-0.1.4.tgz#91abd788aba9702b1aabfa8bc01031a2ac9e3b12"
+  dependencies:
+    async "~0.9.0"
+    combined-stream "~0.0.4"
+    mime "~1.2.11"
+
+form-data@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-0.2.0.tgz#26f8bc26da6440e299cbdcfb69035c4f77a6e466"
+  dependencies:
+    async "~0.9.0"
+    combined-stream "~0.0.4"
+    mime-types "~2.0.3"
+
+form-data@~1.0.0-rc3:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-1.0.1.tgz#ae315db9a4907fa065502304a66d7733475ee37c"
+  dependencies:
+    async "^2.0.1"
+    combined-stream "^1.0.5"
+    mime-types "^2.1.11"
+
+formidable@1.0.14:
+  version "1.0.14"
+  resolved "https://registry.yarnpkg.com/formidable/-/formidable-1.0.14.tgz#2b3f4c411cbb5fdd695c44843e2a23514a43231a"
+
+fresh@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.1.0.tgz#03e4b0178424e4c2d5d19a54d8814cdc97934850"
+
+fs-extra@~0.26.4:
+  version "0.26.7"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.26.7.tgz#9ae1fdd94897798edab76d0918cf42d0c3184fa9"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    klaw "^1.0.0"
+    path-is-absolute "^1.0.0"
+    rimraf "^2.2.8"
+
+fstream-ignore@~0.0.6:
+  version "0.0.10"
+  resolved "https://registry.yarnpkg.com/fstream-ignore/-/fstream-ignore-0.0.10.tgz#b10f8f522cc55415f80b41f7d3a32e6cba254e8c"
+  dependencies:
+    fstream "~0.1.17"
+    inherits "2"
+    minimatch "^0.3.0"
+
+fstream@~0.1.17, fstream@~0.1.22, fstream@~0.1.28:
+  version "0.1.31"
+  resolved "https://registry.yarnpkg.com/fstream/-/fstream-0.1.31.tgz#7337f058fbbbbefa8c9f561a28cab0849202c988"
+  dependencies:
+    graceful-fs "~3.0.2"
+    inherits "~2.0.0"
+    mkdirp "0.5"
+    rimraf "2"
+
+gaze@~0.4.0:
+  version "0.4.3"
+  resolved "https://registry.yarnpkg.com/gaze/-/gaze-0.4.3.tgz#e538f4ff5e4fe648f473a97e1ebb253d2de127b5"
+  dependencies:
+    globule "~0.1.0"
+
+generate-function@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/generate-function/-/generate-function-2.0.0.tgz#6858fe7c0969b7d4e9093337647ac79f60dfbe74"
+
+generate-object-property@^1.1.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/generate-object-property/-/generate-object-property-1.2.0.tgz#9c0e1c40308ce804f4783618b937fa88f99d50d0"
+  dependencies:
+    is-property "^1.0.0"
+
+getobject@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/getobject/-/getobject-0.1.0.tgz#047a449789fa160d018f5486ed91320b6ec7885c"
+
+getpass@^0.1.1:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa"
+  dependencies:
+    assert-plus "^1.0.0"
+
+glob@3.2.3:
+  version "3.2.3"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-3.2.3.tgz#e313eeb249c7affaa5c475286b0e115b59839467"
+  dependencies:
+    graceful-fs "~2.0.0"
+    inherits "2"
+    minimatch "~0.2.11"
+
+"glob@>= 3.1.4", glob@~4.0.2:
+  version "4.0.6"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-4.0.6.tgz#695c50bdd4e2fb5c5d370b091f388d3707e291a7"
+  dependencies:
+    graceful-fs "^3.0.2"
+    inherits "2"
+    minimatch "^1.0.0"
+    once "^1.3.0"
+
+glob@~3.1.21:
+  version "3.1.21"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-3.1.21.tgz#d29e0a055dea5138f4d07ed40e8982e83c2066cd"
+  dependencies:
+    graceful-fs "~1.2.0"
+    inherits "1"
+    minimatch "~0.2.11"
+
+glob@~3.2.3, glob@~3.2.9:
+  version "3.2.11"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-3.2.11.tgz#4a973f635b9190f715d10987d5c00fd2815ebe3d"
+  dependencies:
+    inherits "2"
+    minimatch "0.3"
+
+globule@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/globule/-/globule-0.1.0.tgz#d9c8edde1da79d125a151b79533b978676346ae5"
+  dependencies:
+    glob "~3.1.21"
+    lodash "~1.0.1"
+    minimatch "~0.2.11"
+
+got@^3.2.0:
+  version "3.3.1"
+  resolved "https://registry.yarnpkg.com/got/-/got-3.3.1.tgz#e5d0ed4af55fc3eef4d56007769d98192bcb2eca"
+  dependencies:
+    duplexify "^3.2.0"
+    infinity-agent "^2.0.0"
+    is-redirect "^1.0.0"
+    is-stream "^1.0.0"
+    lowercase-keys "^1.0.0"
+    nested-error-stacks "^1.0.0"
+    object-assign "^3.0.0"
+    prepend-http "^1.0.0"
+    read-all-stream "^3.0.0"
+    timed-out "^2.0.0"
+
+graceful-fs@^3.0.1, graceful-fs@^3.0.2, graceful-fs@~3.0.0, graceful-fs@~3.0.1, graceful-fs@~3.0.2:
+  version "3.0.11"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-3.0.11.tgz#7613c778a1afea62f25c630a086d7f3acbbdd818"
+  dependencies:
+    natives "^1.1.0"
+
+graceful-fs@^4.1.2, graceful-fs@^4.1.6, graceful-fs@^4.1.9:
+  version "4.1.11"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.11.tgz#0e8bdfe4d1ddb8854d64e04ea7c00e2a026e5658"
+
+graceful-fs@~1.2.0:
+  version "1.2.3"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-1.2.3.tgz#15a4806a57547cb2d2dbf27f42e89a8c3451b364"
+
+graceful-fs@~2.0.0, graceful-fs@~2.0.1:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-2.0.3.tgz#7cd2cdb228a4a3f36e95efa6cc142de7d1a136d0"
+
+"graceful-readlink@>= 1.0.0":
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/graceful-readlink/-/graceful-readlink-1.0.1.tgz#4cafad76bc62f02fa039b2f94e9a3dd3a391a725"
+
+growl@1.8.1:
+  version "1.8.1"
+  resolved "https://registry.yarnpkg.com/growl/-/growl-1.8.1.tgz#4b2dec8d907e93db336624dcec0183502f8c9428"
+
+grunt-cli@~0.1.13:
+  version "0.1.13"
+  resolved "https://registry.yarnpkg.com/grunt-cli/-/grunt-cli-0.1.13.tgz#e9ebc4047631f5012d922770c39378133cad10f4"
+  dependencies:
+    findup-sync "~0.1.0"
+    nopt "~1.0.10"
+    resolve "~0.3.1"
+
+grunt-concurrent@~0.3.0:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/grunt-concurrent/-/grunt-concurrent-0.3.1.tgz#0ceed6add526cc63f87fa40e90287988d9e17a8e"
+  dependencies:
+    lpad "~0.1.0"
+
+grunt-contrib-clean@~0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/grunt-contrib-clean/-/grunt-contrib-clean-0.5.0.tgz#f53dfdee0849b1c7b40e9ebbba69f48c4c6079c5"
+  dependencies:
+    rimraf "~2.2.1"
+
+grunt-contrib-concat@~0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/grunt-contrib-concat/-/grunt-contrib-concat-0.3.0.tgz#48fa0d4336d29b653ad8225a6bd6f856b4483e32"
+
+grunt-contrib-connect@~0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/grunt-contrib-connect/-/grunt-contrib-connect-0.3.0.tgz#1fa25353f1ad80b792288758235faff98b583ca5"
+  dependencies:
+    connect "~2.7.3"
+
+grunt-contrib-copy@~0.4.1:
+  version "0.4.1"
+  resolved "https://registry.yarnpkg.com/grunt-contrib-copy/-/grunt-contrib-copy-0.4.1.tgz#f0753b40ae21bb706daefb0b299e03cdf5fa9d6e"
+
+grunt-contrib-cssmin@~0.6.0:
+  version "0.6.2"
+  resolved "https://registry.yarnpkg.com/grunt-contrib-cssmin/-/grunt-contrib-cssmin-0.6.2.tgz#2804dc0e81f98e8a54d61eee84a1d3fe1a3af8e2"
+  dependencies:
+    clean-css "~1.1.1"
+    grunt-lib-contrib "~0.6.0"
+
+grunt-contrib-htmlmin@~0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/grunt-contrib-htmlmin/-/grunt-contrib-htmlmin-0.1.3.tgz#410deb7f2905402c1034b92e7d3cce4a8cc0246e"
+  dependencies:
+    grunt-lib-contrib "~0.6.1"
+    html-minifier "~0.5.0"
+
+grunt-contrib-jshint@~0.6.3:
+  version "0.6.5"
+  resolved "https://registry.yarnpkg.com/grunt-contrib-jshint/-/grunt-contrib-jshint-0.6.5.tgz#3afb4676745364cc4a19eee7934c0e06008b566e"
+  dependencies:
+    jshint "~2.1.10"
+
+grunt-contrib-less@~0.11:
+  version "0.11.4"
+  resolved "https://registry.yarnpkg.com/grunt-contrib-less/-/grunt-contrib-less-0.11.4.tgz#5667475ac4517f32ca623b9a4d81d6cf4aed2b51"
+  dependencies:
+    async "^0.2.10"
+    chalk "^0.5.1"
+    less "^1.7.2"
+    lodash "^2.4.1"
+    maxmin "^0.1.0"
+
+grunt-contrib-uglify@~0.2.0:
+  version "0.2.7"
+  resolved "https://registry.yarnpkg.com/grunt-contrib-uglify/-/grunt-contrib-uglify-0.2.7.tgz#e6bda51e0c40a1459f6cead423c65efd725a1bf7"
+  dependencies:
+    grunt-lib-contrib "~0.6.1"
+    uglify-js "~2.4.0"
+
+grunt-contrib-watch@~0.5.2:
+  version "0.5.3"
+  resolved "https://registry.yarnpkg.com/grunt-contrib-watch/-/grunt-contrib-watch-0.5.3.tgz#7d9eb5465d506fa14faaca47e6e8790a82c1c9ee"
+  dependencies:
+    gaze "~0.4.0"
+    tiny-lr "0.0.4"
+
+grunt-ember-templates@0.4.14:
+  version "0.4.14"
+  resolved "https://registry.yarnpkg.com/grunt-ember-templates/-/grunt-ember-templates-0.4.14.tgz#550dbccf651146a32071782212f68106bb591c51"
+
+grunt-legacy-log-utils@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/grunt-legacy-log-utils/-/grunt-legacy-log-utils-0.1.1.tgz#c0706b9dd9064e116f36f23fe4e6b048672c0f7e"
+  dependencies:
+    colors "~0.6.2"
+    lodash "~2.4.1"
+    underscore.string "~2.3.3"
+
+grunt-legacy-log@~0.1.0:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/grunt-legacy-log/-/grunt-legacy-log-0.1.3.tgz#ec29426e803021af59029f87d2f9cd7335a05531"
+  dependencies:
+    colors "~0.6.2"
+    grunt-legacy-log-utils "~0.1.1"
+    hooker "~0.2.3"
+    lodash "~2.4.1"
+    underscore.string "~2.3.3"
+
+grunt-legacy-util@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/grunt-legacy-util/-/grunt-legacy-util-0.2.0.tgz#93324884dbf7e37a9ff7c026dff451d94a9e554b"
+  dependencies:
+    async "~0.1.22"
+    exit "~0.1.1"
+    getobject "~0.1.0"
+    hooker "~0.2.3"
+    lodash "~0.9.2"
+    underscore.string "~2.2.1"
+    which "~1.0.5"
+
+grunt-lib-contrib@~0.5.1:
+  version "0.5.3"
+  resolved "https://registry.yarnpkg.com/grunt-lib-contrib/-/grunt-lib-contrib-0.5.3.tgz#e83f9ee5c8a06592d6e825821d91210782081338"
+  dependencies:
+    zlib-browserify "0.0.1"
+
+grunt-lib-contrib@~0.6.0, grunt-lib-contrib@~0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/grunt-lib-contrib/-/grunt-lib-contrib-0.6.1.tgz#3f56adb7da06e814795ee2415b0ebe5fb8903ebb"
+  dependencies:
+    zlib-browserify "0.0.1"
+
+grunt-lib-phantomjs@^0.7.1:
+  version "0.7.1"
+  resolved "https://registry.yarnpkg.com/grunt-lib-phantomjs/-/grunt-lib-phantomjs-0.7.1.tgz#a496ac104bc8e842e26893749d54c4905475e8fc"
+  dependencies:
+    eventemitter2 "^0.4.9"
+    phantomjs "^1.9.15"
+    semver "^4.3.0"
+    temporary "^0.0.8"
+
+grunt-mocha@~0.4.1:
+  version "0.4.15"
+  resolved "https://registry.yarnpkg.com/grunt-mocha/-/grunt-mocha-0.4.15.tgz#af584712f467fb2a216b0c04863061da4838e123"
+  dependencies:
+    grunt-lib-phantomjs "^0.7.1"
+    lodash "^3.9.0"
+    mocha "^1.21.5"
+
+grunt-neuter@~0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/grunt-neuter/-/grunt-neuter-0.6.0.tgz#87f87ed7582a7536a49a0b74eda39e79b89cb1b1"
+  dependencies:
+    glob "~3.2.3"
+    source-map "~0.1.22"
+
+grunt-open@~0.2.0:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/grunt-open/-/grunt-open-0.2.3.tgz#145ac45026a57fcfaa433ffd7398ae46d2bd3957"
+  dependencies:
+    open "~0.0.4"
+
+grunt-replace@~0.4.4:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/grunt-replace/-/grunt-replace-0.4.4.tgz#18df9a330b9ed99c67dd0e5af03381ef3f0d0df5"
+  dependencies:
+    grunt-lib-contrib "~0.5.1"
+
+grunt-rev@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/grunt-rev/-/grunt-rev-0.1.0.tgz#b32427e7d842345839709343a58c387361694947"
+
+grunt-svgmin@~0.2.0:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/grunt-svgmin/-/grunt-svgmin-0.2.1.tgz#fedd0ab15e2d1805b89f594568ced01642f338dc"
+  dependencies:
+    chalk "~0.3.0"
+    filesize "~2.0.0"
+    svgo "~0.3.1"
+
+grunt-usemin@~0.1.12:
+  version "0.1.13"
+  resolved "https://registry.yarnpkg.com/grunt-usemin/-/grunt-usemin-0.1.13.tgz#656e97a9ed1a5297fbd426b4d5847a76aa229731"
+
+grunt@~0.4.1:
+  version "0.4.5"
+  resolved "https://registry.yarnpkg.com/grunt/-/grunt-0.4.5.tgz#56937cd5194324adff6d207631832a9d6ba4e7f0"
+  dependencies:
+    async "~0.1.22"
+    coffee-script "~1.3.3"
+    colors "~0.6.2"
+    dateformat "1.0.2-1.2.3"
+    eventemitter2 "~0.4.13"
+    exit "~0.1.1"
+    findup-sync "~0.1.2"
+    getobject "~0.1.0"
+    glob "~3.1.21"
+    grunt-legacy-log "~0.1.0"
+    grunt-legacy-util "~0.2.0"
+    hooker "~0.2.3"
+    iconv-lite "~0.2.11"
+    js-yaml "~2.0.5"
+    lodash "~0.9.2"
+    minimatch "~0.2.12"
+    nopt "~1.0.10"
+    rimraf "~2.2.8"
+    underscore.string "~2.2.1"
+    which "~1.0.5"
+
+gzip-size@^0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/gzip-size/-/gzip-size-0.1.1.tgz#ae33483b6fc8224e8342296de108ef93757f76e0"
+  dependencies:
+    concat-stream "^1.4.1"
+    zlib-browserify "^0.0.3"
+
+handlebars@~1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-1.3.0.tgz#9e9b130a93e389491322d975cf3ec1818c37ce34"
+  dependencies:
+    optimist "~0.3"
+  optionalDependencies:
+    uglify-js "~2.3"
+
+har-validator@~2.0.2:
+  version "2.0.6"
+  resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-2.0.6.tgz#cdcbc08188265ad119b6a5a7c8ab70eecfb5d27d"
+  dependencies:
+    chalk "^1.1.1"
+    commander "^2.9.0"
+    is-my-json-valid "^2.12.4"
+    pinkie-promise "^2.0.0"
+
+has-ansi@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-0.1.0.tgz#84f265aae8c0e6a88a12d7022894b7568894c62e"
+  dependencies:
+    ansi-regex "^0.2.0"
+
+has-ansi@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91"
+  dependencies:
+    ansi-regex "^2.0.0"
+
+has-color@~0.1.0:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/has-color/-/has-color-0.1.7.tgz#67144a5260c34fc3cca677d041daf52fe7b78b2f"
+
+hasha@^2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/hasha/-/hasha-2.2.0.tgz#78d7cbfc1e6d66303fe79837365984517b2f6ee1"
+  dependencies:
+    is-stream "^1.0.1"
+    pinkie-promise "^2.0.0"
+
+hawk@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/hawk/-/hawk-1.1.1.tgz#87cd491f9b46e4e2aeaca335416766885d2d1ed9"
+  dependencies:
+    boom "0.4.x"
+    cryptiles "0.2.x"
+    hoek "0.9.x"
+    sntp "0.2.x"
+
+hawk@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/hawk/-/hawk-1.0.0.tgz#b90bb169807285411da7ffcb8dd2598502d3b52d"
+  dependencies:
+    boom "0.4.x"
+    cryptiles "0.2.x"
+    hoek "0.9.x"
+    sntp "0.2.x"
+
+hawk@~3.1.0:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/hawk/-/hawk-3.1.3.tgz#078444bd7c1640b0fe540d2c9b73d59678e8e1c4"
+  dependencies:
+    boom "2.x.x"
+    cryptiles "2.x.x"
+    hoek "2.x.x"
+    sntp "1.x.x"
+
+hoek@0.9.x:
+  version "0.9.1"
+  resolved "https://registry.yarnpkg.com/hoek/-/hoek-0.9.1.tgz#3d322462badf07716ea7eb85baf88079cddce505"
+
+hoek@2.x.x:
+  version "2.16.3"
+  resolved "https://registry.yarnpkg.com/hoek/-/hoek-2.16.3.tgz#20bb7403d3cea398e91dc4710a8ff1b8274a25ed"
+
+hooker@~0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/hooker/-/hooker-0.2.3.tgz#b834f723cc4a242aa65963459df6d984c5d3d959"
+
+html-minifier@~0.5.0:
+  version "0.5.6"
+  resolved "https://registry.yarnpkg.com/html-minifier/-/html-minifier-0.5.6.tgz#0f15b437c27b5ce9aa84a44ca2850880e9257996"
+
+http-signature@~0.10.0:
+  version "0.10.1"
+  resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-0.10.1.tgz#4fbdac132559aa8323121e540779c0a012b27e66"
+  dependencies:
+    asn1 "0.1.11"
+    assert-plus "^0.1.5"
+    ctype "0.5.3"
+
+http-signature@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.1.1.tgz#df72e267066cd0ac67fb76adf8e134a8fbcf91bf"
+  dependencies:
+    assert-plus "^0.2.0"
+    jsprim "^1.2.2"
+    sshpk "^1.7.0"
+
+iconv-lite@~0.2.11:
+  version "0.2.11"
+  resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.2.11.tgz#1ce60a3a57864a292d1321ff4609ca4bb965adc8"
+
+infinity-agent@^2.0.0:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/infinity-agent/-/infinity-agent-2.0.3.tgz#45e0e2ff7a9eb030b27d62b74b3744b7a7ac4216"
+
+inherits@1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/inherits/-/inherits-1.0.2.tgz#ca4309dadee6b54cc0b8d247e8d7c7a0975bdc9b"
+
+inherits@2, inherits@^2.0.1, inherits@^2.0.3, inherits@~2.0.0, inherits@~2.0.1:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de"
+
+ini@~1.3.0:
+  version "1.3.4"
+  resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.4.tgz#0537cb79daf59b59a1a517dff706c86ec039162e"
+
+inquirer@~0.4.0:
+  version "0.4.1"
+  resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-0.4.1.tgz#6cf74eb1a347f97a1a207bea8ad1c987d0ff4b81"
+  dependencies:
+    async "~0.2.8"
+    cli-color "~0.2.2"
+    lodash "~2.4.1"
+    mute-stream "0.0.4"
+    readline2 "~0.1.0"
+    through "~2.3.4"
+
+inquirer@~0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-0.5.1.tgz#e9f2cd1ee172c7a32e054b78a03d4ddb0d7707f1"
+  dependencies:
+    async "~0.8.0"
+    chalk "~0.4.0"
+    cli-color "~0.3.2"
+    lodash "~2.4.1"
+    mute-stream "0.0.4"
+    readline2 "~0.1.0"
+    through "~2.3.4"
+
+insight@~0.3.0:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/insight/-/insight-0.3.1.tgz#1a14f32c06115c0850338c38a253d707b611d448"
+  dependencies:
+    async "~0.2.9"
+    chalk "~0.4.0"
+    configstore "~0.2.1"
+    inquirer "~0.4.0"
+    lodash.debounce "~2.4.1"
+    object-assign "~0.1.2"
+    request "~2.27.0"
+
+intersect@~0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/intersect/-/intersect-0.0.3.tgz#c1a4a5e5eac6ede4af7504cc07e0ada7bc9f4920"
+
+is-my-json-valid@^2.12.4:
+  version "2.16.0"
+  resolved "https://registry.yarnpkg.com/is-my-json-valid/-/is-my-json-valid-2.16.0.tgz#f079dd9bfdae65ee2038aae8acbc86ab109e3693"
+  dependencies:
+    generate-function "^2.0.0"
+    generate-object-property "^1.1.0"
+    jsonpointer "^4.0.0"
+    xtend "^4.0.0"
+
+is-npm@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-npm/-/is-npm-1.0.0.tgz#f2fb63a65e4905b406c86072765a1a4dc793b9f4"
+
+is-property@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-property/-/is-property-1.0.2.tgz#57fe1c4e48474edd65b09911f26b1cd4095dda84"
+
+is-redirect@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-redirect/-/is-redirect-1.0.0.tgz#1d03dded53bd8db0f30c26e4f95d36fc7c87dc24"
+
+is-root@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/is-root/-/is-root-0.1.0.tgz#825e394ab593df2d73c5d0092fce507270b53dcb"
+
+is-stream@^1.0.0, is-stream@^1.0.1:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-1.1.0.tgz#12d4a3dd4e68e0b79ceb8dbc84173ae80d91ca44"
+
+is-typedarray@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a"
+
+isarray@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf"
+
+isarray@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11"
+
+isexe@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10"
+
+isstream@~0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a"
+
+jade@0.26.3:
+  version "0.26.3"
+  resolved "https://registry.yarnpkg.com/jade/-/jade-0.26.3.tgz#8f10d7977d8d79f2f6ff862a81b0513ccb25686c"
+  dependencies:
+    commander "0.6.1"
+    mkdirp "0.3.0"
+
+jodid25519@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/jodid25519/-/jodid25519-1.0.2.tgz#06d4912255093419477d425633606e0e90782967"
+  dependencies:
+    jsbn "~0.1.0"
+
+js-yaml@, js-yaml@~2.0.5:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-2.0.5.tgz#a25ae6509999e97df278c6719da11bd0687743a8"
+  dependencies:
+    argparse "~ 0.1.11"
+    esprima "~ 1.0.2"
+
+js-yaml@^3.1.0:
+  version "3.8.3"
+  resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.8.3.tgz#33a05ec481c850c8875929166fe1beb61c728766"
+  dependencies:
+    argparse "^1.0.7"
+    esprima "^3.1.1"
+
+js-yaml@~3.0.1:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.0.2.tgz#9937865f8e897a5e894e73c2c5cf2e89b32eb771"
+  dependencies:
+    argparse "~ 0.1.11"
+    esprima "~ 1.0.2"
+
+jsbn@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513"
+
+jshint-stylish@~0.1.3:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/jshint-stylish/-/jshint-stylish-0.1.5.tgz#d41b6eef8e06a4ddfb36540bf6593fe31b987236"
+  dependencies:
+    chalk "~0.4.0"
+    text-table "~0.2.0"
+
+jshint@~2.1.10:
+  version "2.1.11"
+  resolved "https://registry.yarnpkg.com/jshint/-/jshint-2.1.11.tgz#eb5108fef9ba5ddebb830983f572d242e49e3f96"
+  dependencies:
+    cli "0.4.x"
+    console-browserify "0.1.x"
+    minimatch "0.x.x"
+    shelljs "0.1.x"
+    underscore "1.4.x"
+
+json-schema@0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.2.3.tgz#b480c892e59a2f05954ce727bd3f2a4e882f9e13"
+
+json-stringify-safe@~5.0.0, json-stringify-safe@~5.0.1:
+  version "5.0.1"
+  resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb"
+
+jsonfile@^2.1.0:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-2.4.0.tgz#3736a2b428b87bbda0cc83b53fa3d633a35c2ae8"
+  optionalDependencies:
+    graceful-fs "^4.1.6"
+
+jsonify@~0.0.0:
+  version "0.0.0"
+  resolved "https://registry.yarnpkg.com/jsonify/-/jsonify-0.0.0.tgz#2c74b6ee41d93ca51b7b5aaee8f503631d252a73"
+
+jsonpointer@^4.0.0:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/jsonpointer/-/jsonpointer-4.0.1.tgz#4fd92cb34e0e9db3c89c8622ecf51f9b978c6cb9"
+
+jsprim@^1.2.2:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.0.tgz#a3b87e40298d8c380552d8cc7628a0bb95a22918"
+  dependencies:
+    assert-plus "1.0.0"
+    extsprintf "1.0.2"
+    json-schema "0.2.3"
+    verror "1.3.6"
+
+junk@~0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/junk/-/junk-0.3.0.tgz#6c89c636f6e99898d8efbfc50430db40be71e10c"
+
+kew@~0.7.0:
+  version "0.7.0"
+  resolved "https://registry.yarnpkg.com/kew/-/kew-0.7.0.tgz#79d93d2d33363d6fdd2970b335d9141ad591d79b"
+
+klaw@^1.0.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/klaw/-/klaw-1.3.1.tgz#4088433b46b3b1ba259d78785d8e96f73ba02439"
+  optionalDependencies:
+    graceful-fs "^4.1.9"
+
+latest-version@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/latest-version/-/latest-version-1.0.1.tgz#72cfc46e3e8d1be651e1ebb54ea9f6ea96f374bb"
+  dependencies:
+    package-json "^1.0.0"
+
+less@^1.7.2:
+  version "1.7.5"
+  resolved "https://registry.yarnpkg.com/less/-/less-1.7.5.tgz#4f220cf7288a27eaca739df6e4808a2d4c0d5756"
+  optionalDependencies:
+    clean-css "2.2.x"
+    graceful-fs "~3.0.2"
+    mime "~1.2.11"
+    mkdirp "~0.5.0"
+    request "~2.40.0"
+    source-map "0.1.x"
+
+load-grunt-tasks@~0.1.0:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/load-grunt-tasks/-/load-grunt-tasks-0.1.3.tgz#9a1e42d5405b7b7cab43f93d47767dfd0af39851"
+  dependencies:
+    lodash "~2.2.1"
+    minimatch "~0.2.12"
+
+lockfile@~0.4.2:
+  version "0.4.3"
+  resolved "https://registry.yarnpkg.com/lockfile/-/lockfile-0.4.3.tgz#79b965ee9b32d9dd24b59cf81205e6dcb6d3b224"
+
+lodash._isnative@~2.4.1:
+  version "2.4.1"
+  resolved "https://registry.yarnpkg.com/lodash._isnative/-/lodash._isnative-2.4.1.tgz#3ea6404b784a7be836c7b57580e1cdf79b14832c"
+
+lodash._objecttypes@~2.4.1:
+  version "2.4.1"
+  resolved "https://registry.yarnpkg.com/lodash._objecttypes/-/lodash._objecttypes-2.4.1.tgz#7c0b7f69d98a1f76529f890b0cdb1b4dfec11c11"
+
+lodash.debounce@~2.4.1:
+  version "2.4.1"
+  resolved "https://registry.yarnpkg.com/lodash.debounce/-/lodash.debounce-2.4.1.tgz#d8cead246ec4b926e8b85678fc396bfeba8cc6fc"
+  dependencies:
+    lodash.isfunction "~2.4.1"
+    lodash.isobject "~2.4.1"
+    lodash.now "~2.4.1"
+
+lodash.isfunction@~2.4.1:
+  version "2.4.1"
+  resolved "https://registry.yarnpkg.com/lodash.isfunction/-/lodash.isfunction-2.4.1.tgz#2cfd575c73e498ab57e319b77fa02adef13a94d1"
+
+lodash.isobject@~2.4.1:
+  version "2.4.1"
+  resolved "https://registry.yarnpkg.com/lodash.isobject/-/lodash.isobject-2.4.1.tgz#5a2e47fe69953f1ee631a7eba1fe64d2d06558f5"
+  dependencies:
+    lodash._objecttypes "~2.4.1"
+
+lodash.now@~2.4.1:
+  version "2.4.1"
+  resolved "https://registry.yarnpkg.com/lodash.now/-/lodash.now-2.4.1.tgz#6872156500525185faf96785bb7fe7fe15b562c6"
+  dependencies:
+    lodash._isnative "~2.4.1"
+
+lodash@^2.4.1, lodash@~2.4.1:
+  version "2.4.2"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-2.4.2.tgz#fadd834b9683073da179b3eae6d9c0d15053f73e"
+
+lodash@^3.9.0:
+  version "3.10.1"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-3.10.1.tgz#5bf45e8e49ba4189e17d482789dfd15bd140b7b6"
+
+lodash@^4.14.0:
+  version "4.17.4"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.4.tgz#78203a4d1c328ae1d86dca6460e369b57f4055ae"
+
+lodash@~0.9.2:
+  version "0.9.2"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-0.9.2.tgz#8f3499c5245d346d682e5b0d3b40767e09f1a92c"
+
+lodash@~1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-1.0.2.tgz#8f57560c83b59fc270bd3d561b690043430e2551"
+
+lodash@~2.2.1:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-2.2.1.tgz#ca935fd14ab3c0c872abacf198b9cda501440867"
+
+lowercase-keys@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-1.0.0.tgz#4e3366b39e7f5457e35f1324bdf6f88d0bfc7306"
+
+lpad@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/lpad/-/lpad-0.1.0.tgz#e4c60c29139321c5970de493b496ae0d774cd2a7"
+
+lru-cache@2, lru-cache@~2.5.0:
+  version "2.5.2"
+  resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-2.5.2.tgz#1fddad938aae1263ce138680be1b3f591c0ab41c"
+
+lru-cache@~2.3.0:
+  version "2.3.1"
+  resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-2.3.1.tgz#b3adf6b3d856e954e2c390e6cef22081245a53d6"
+
+lru-queue@0.1:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/lru-queue/-/lru-queue-0.1.0.tgz#2738bd9f0d3cf4f84490c5736c48699ac632cda3"
+  dependencies:
+    es5-ext "~0.10.2"
+
+maxmin@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/maxmin/-/maxmin-0.1.0.tgz#95d81c5289e3a9d30f7fc7dc559c024e5030c9d0"
+  dependencies:
+    chalk "^0.4.0"
+    gzip-size "^0.1.0"
+    pretty-bytes "^0.1.0"
+
+memoizee@~0.2.5:
+  version "0.2.6"
+  resolved "https://registry.yarnpkg.com/memoizee/-/memoizee-0.2.6.tgz#bb45a7ad02530082f1612671dab35219cd2e0741"
+  dependencies:
+    es5-ext "~0.9.2"
+    event-emitter "~0.2.2"
+    next-tick "0.1.x"
+
+memoizee@~0.3.8:
+  version "0.3.10"
+  resolved "https://registry.yarnpkg.com/memoizee/-/memoizee-0.3.10.tgz#4eca0d8aed39ec9d017f4c5c2f2f6432f42e5c8f"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.11"
+    es6-weak-map "~0.1.4"
+    event-emitter "~0.3.4"
+    lru-queue "0.1"
+    next-tick "~0.2.2"
+    timers-ext "0.1"
+
+mime-db@~1.12.0:
+  version "1.12.0"
+  resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.12.0.tgz#3d0c63180f458eb10d325aaa37d7c58ae312e9d7"
+
+mime-db@~1.27.0:
+  version "1.27.0"
+  resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.27.0.tgz#820f572296bbd20ec25ed55e5b5de869e5436eb1"
+
+mime-types@^2.1.11, mime-types@~2.1.7:
+  version "2.1.15"
+  resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.15.tgz#a4ebf5064094569237b8cf70046776d09fc92aed"
+  dependencies:
+    mime-db "~1.27.0"
+
+mime-types@~1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-1.0.2.tgz#995ae1392ab8affcbfcb2641dd054e943c0d5dce"
+
+mime-types@~2.0.3:
+  version "2.0.14"
+  resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.0.14.tgz#310e159db23e077f8bb22b748dabfa4957140aa6"
+  dependencies:
+    mime-db "~1.12.0"
+
+mime@~1.2.11, mime@~1.2.9:
+  version "1.2.11"
+  resolved "https://registry.yarnpkg.com/mime/-/mime-1.2.11.tgz#58203eed86e3a5ef17aed2b7d9ebd47f0a60dd10"
+
+minimatch@0.3, minimatch@0.x.x, minimatch@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-0.3.0.tgz#275d8edaac4f1bb3326472089e7949c8394699dd"
+  dependencies:
+    lru-cache "2"
+    sigmund "~1.0.0"
+
+minimatch@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-1.0.0.tgz#e0dd2120b49e1b724ce8d714c520822a9438576d"
+  dependencies:
+    lru-cache "2"
+    sigmund "~1.0.0"
+
+minimatch@~0.2.11, minimatch@~0.2.12:
+  version "0.2.14"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-0.2.14.tgz#c74e780574f63c6f9a090e90efbe6ef53a6a756a"
+  dependencies:
+    lru-cache "2"
+    sigmund "~1.0.0"
+
+minimist@0.0.8, minimist@~0.0.1:
+  version "0.0.8"
+  resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d"
+
+minimist@^1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.0.tgz#a35008b20f41383eec1fb914f4cd5df79a264284"
+
+mkdirp@0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.3.0.tgz#1bbf5ab1ba827af23575143490426455f481fe1e"
+
+mkdirp@0.5, mkdirp@^0.5.0, mkdirp@~0.5.0:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903"
+  dependencies:
+    minimist "0.0.8"
+
+mkdirp@0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.0.tgz#1d73076a6df986cd9344e15e71fcc05a4c9abf12"
+  dependencies:
+    minimist "0.0.8"
+
+mkdirp@~0.3.5:
+  version "0.3.5"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.3.5.tgz#de3e5f8961c88c787ee1368df849ac4413eca8d7"
+
+mkpath@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/mkpath/-/mkpath-0.1.0.tgz#7554a6f8d871834cc97b5462b122c4c124d6de91"
+
+mocha@^1.21.5:
+  version "1.21.5"
+  resolved "https://registry.yarnpkg.com/mocha/-/mocha-1.21.5.tgz#7c58b09174df976e434a23b1e8d639873fc529e9"
+  dependencies:
+    commander "2.3.0"
+    debug "2.0.0"
+    diff "1.0.8"
+    escape-string-regexp "1.0.2"
+    glob "3.2.3"
+    growl "1.8.1"
+    jade "0.26.3"
+    mkdirp "0.5.0"
+
+mout@~0.9.0, mout@~0.9.1:
+  version "0.9.1"
+  resolved "https://registry.yarnpkg.com/mout/-/mout-0.9.1.tgz#84f0f3fd6acc7317f63de2affdcc0cee009b0477"
+
+ms@0.6.2, ms@~0.6.1:
+  version "0.6.2"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.6.2.tgz#d89c2124c6fdc1353d65a8b77bf1aac4b193708c"
+
+ms@0.7.3:
+  version "0.7.3"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.3.tgz#708155a5e44e33f5fd0fc53e81d0d40a91be1fff"
+
+mute-stream@0.0.4, mute-stream@~0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.4.tgz#a9219960a6d5d5d046597aee51252c6655f7177e"
+
+natives@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/natives/-/natives-1.1.0.tgz#e9ff841418a6b2ec7a495e939984f78f163e6e31"
+
+nested-error-stacks@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/nested-error-stacks/-/nested-error-stacks-1.0.2.tgz#19f619591519f096769a5ba9a86e6eeec823c3cf"
+  dependencies:
+    inherits "~2.0.1"
+
+next-tick@0.1.x:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/next-tick/-/next-tick-0.1.0.tgz#1912cce8eb9b697d640fbba94f8f00dec3b94259"
+
+next-tick@1:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/next-tick/-/next-tick-1.0.0.tgz#ca86d1fe8828169b0120208e3dc8424b9db8342c"
+
+next-tick@~0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/next-tick/-/next-tick-0.2.2.tgz#75da4a927ee5887e39065880065b7336413b310d"
+
+node-uuid@~1.4.0, node-uuid@~1.4.7:
+  version "1.4.8"
+  resolved "https://registry.yarnpkg.com/node-uuid/-/node-uuid-1.4.8.tgz#b040eb0923968afabf8d32fb1f17f1167fdab907"
+
+nopt@~1.0.10:
+  version "1.0.10"
+  resolved "https://registry.yarnpkg.com/nopt/-/nopt-1.0.10.tgz#6ddd21bd2a31417b92727dd585f8a6f37608ebee"
+  dependencies:
+    abbrev "1"
+
+nopt@~2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/nopt/-/nopt-2.0.0.tgz#ca7416f20a5e3f9c3b86180f96295fa3d0b52e0d"
+  dependencies:
+    abbrev "1"
+
+nopt@~2.2.0:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/nopt/-/nopt-2.2.1.tgz#2aa09b7d1768487b3b89a9c5aa52335bff0baea7"
+  dependencies:
+    abbrev "1"
+
+nopt@~3.0.0:
+  version "3.0.6"
+  resolved "https://registry.yarnpkg.com/nopt/-/nopt-3.0.6.tgz#c6465dbf08abcd4db359317f79ac68a646b28ff9"
+  dependencies:
+    abbrev "1"
+
+noptify@latest:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/noptify/-/noptify-0.0.3.tgz#58f654a73d9753df0c51d9686dc92104a67f4bbb"
+  dependencies:
+    nopt "~2.0.0"
+
+oauth-sign@~0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.3.0.tgz#cb540f93bb2b22a7d5941691a288d60e8ea9386e"
+
+oauth-sign@~0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.5.0.tgz#d767f5169325620eab2e087ef0c472e773db6461"
+
+oauth-sign@~0.8.0:
+  version "0.8.2"
+  resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.8.2.tgz#46a6ab7f0aead8deae9ec0565780b7d4efeb9d43"
+
+object-assign@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-2.1.1.tgz#43c36e5d569ff8e4816c4efa8be02d26967c18aa"
+
+object-assign@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-3.0.0.tgz#9bedd5ca0897949bca47e7ff408062d549f587f2"
+
+object-assign@~0.1.1, object-assign@~0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-0.1.2.tgz#036992f073aff7b2db83d06b3fb3155a5ccac37f"
+
+once@^1.3.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1"
+  dependencies:
+    wrappy "1"
+
+once@~1.3.0:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/once/-/once-1.3.3.tgz#b2e261557ce4c314ec8304f3fa82663e4297ca20"
+  dependencies:
+    wrappy "1"
+
+open@~0.0.4:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/open/-/open-0.0.5.tgz#42c3e18ec95466b6bf0dc42f3a2945c3f0cad8fc"
+
+opn@~0.1.1:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/opn/-/opn-0.1.2.tgz#c527832cfd964d52096b524d0035ecaece51db4f"
+
+optimist@~0.3, optimist@~0.3.5:
+  version "0.3.7"
+  resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.3.7.tgz#c90941ad59e4273328923074d2cf2e7cbc6ec0d9"
+  dependencies:
+    wordwrap "~0.0.2"
+
+optimist@~0.6.0:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.6.1.tgz#da3ea74686fa21a19a111c326e90eb15a0196686"
+  dependencies:
+    minimist "~0.0.1"
+    wordwrap "~0.0.2"
+
+os-homedir@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3"
+
+os-tmpdir@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274"
+
+osenv@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.0.3.tgz#cd6ad8ddb290915ad9e22765576025d411f29cb6"
+
+osenv@^0.1.0, osenv@~0.1.0:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.1.4.tgz#42fe6d5953df06c8064be6f176c3d05aaaa34644"
+  dependencies:
+    os-homedir "^1.0.0"
+    os-tmpdir "^1.0.0"
+
+p-throttler@~0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/p-throttler/-/p-throttler-0.0.1.tgz#c341e3589ec843852a035e6f88e6c1e96150029b"
+  dependencies:
+    q "~0.9.2"
+
+package-json@^1.0.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/package-json/-/package-json-1.2.0.tgz#c8ecac094227cdf76a316874ed05e27cc939a0e0"
+  dependencies:
+    got "^3.2.0"
+    registry-url "^3.0.0"
+
+"package@>= 1.0.0 < 1.2.0":
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/package/-/package-1.0.1.tgz#d25a1f99e2506dcb27d6704b83dca8a312e4edcc"
+
+path-is-absolute@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f"
+
+pause@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/pause/-/pause-0.0.1.tgz#1d408b3fdb76923b9543d96fb4c9dfd535d9cb5d"
+
+pend@~1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/pend/-/pend-1.2.0.tgz#7a57eb550a6783f9115331fcf4663d5c8e007a50"
+
+phantomjs@^1.9.15:
+  version "1.9.20"
+  resolved "https://registry.yarnpkg.com/phantomjs/-/phantomjs-1.9.20.tgz#4424aca20e14d255c0b0889af6f6b8973da10e0d"
+  dependencies:
+    extract-zip "~1.5.0"
+    fs-extra "~0.26.4"
+    hasha "^2.2.0"
+    kew "~0.7.0"
+    progress "~1.1.8"
+    request "~2.67.0"
+    request-progress "~2.0.1"
+    which "~1.2.2"
+
+pinkie-promise@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa"
+  dependencies:
+    pinkie "^2.0.0"
+
+pinkie@^2.0.0:
+  version "2.0.4"
+  resolved "https://registry.yarnpkg.com/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870"
+
+prepend-http@^1.0.0:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-1.0.4.tgz#d4f4562b0ce3696e41ac52d0e002e57a635dc6dc"
+
+pretty-bytes@^0.1.0:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/pretty-bytes/-/pretty-bytes-0.1.2.tgz#cd90294d58a1ca4e8a5d0fb9c8225998881acf00"
+
+process-nextick-args@~1.0.6:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-1.0.7.tgz#150e20b756590ad3f91093f25a4f2ad8bff30ba3"
+
+progress@~1.1.8:
+  version "1.1.8"
+  resolved "https://registry.yarnpkg.com/progress/-/progress-1.1.8.tgz#e260c78f6161cdd9b0e56cc3e0a85de17c7a57be"
+
+promptly@~0.2.0:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/promptly/-/promptly-0.2.1.tgz#6444e7ca4dbd9899e7eeb5ec3922827ebdc22b3b"
+  dependencies:
+    read "~1.0.4"
+
+punycode@^1.4.1:
+  version "1.4.1"
+  resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e"
+
+q@~0.8.10:
+  version "0.8.12"
+  resolved "https://registry.yarnpkg.com/q/-/q-0.8.12.tgz#9162a91e11819c4bcda7da15cf5fefaad0778823"
+
+q@~0.9.2:
+  version "0.9.7"
+  resolved "https://registry.yarnpkg.com/q/-/q-0.9.7.tgz#4de2e6cb3b29088c9e4cbc03bf9d42fb96ce2f75"
+
+q@~1.0.0, q@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/q/-/q-1.0.1.tgz#11872aeedee89268110b10a718448ffb10112a14"
+
+qs@0.6.5:
+  version "0.6.5"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-0.6.5.tgz#294b268e4b0d4250f6dde19b3b8b34935dff14ef"
+
+qs@~0.5.2:
+  version "0.5.6"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-0.5.6.tgz#31b1ad058567651c526921506b9a8793911a0384"
+
+qs@~0.6.0:
+  version "0.6.6"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-0.6.6.tgz#6e015098ff51968b8a3c819001d5f2c89bc4b107"
+
+qs@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-1.0.2.tgz#50a93e2b5af6691c31bcea5dae78ee6ea1903768"
+
+qs@~2.3.1:
+  version "2.3.3"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-2.3.3.tgz#e9e85adbe75da0bbe4c8e0476a086290f863b404"
+
+qs@~5.2.0:
+  version "5.2.1"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-5.2.1.tgz#801fee030e0b9450d6385adc48a4cc55b44aedfc"
+
+range-parser@0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-0.0.4.tgz#c0427ffef51c10acba0782a46c9602e744ff620b"
+
+rc@^1.0.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/rc/-/rc-1.2.1.tgz#2e03e8e42ee450b8cb3dce65be1bf8974e1dfd95"
+  dependencies:
+    deep-extend "~0.4.0"
+    ini "~1.3.0"
+    minimist "^1.2.0"
+    strip-json-comments "~2.0.1"
+
+read-all-stream@^3.0.0:
+  version "3.1.0"
+  resolved "https://registry.yarnpkg.com/read-all-stream/-/read-all-stream-3.1.0.tgz#35c3e177f2078ef789ee4bfafa4373074eaef4fa"
+  dependencies:
+    pinkie-promise "^2.0.0"
+    readable-stream "^2.0.0"
+
+read@~1.0.4:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/read/-/read-1.0.7.tgz#b3da19bd052431a97671d44a42634adf710b40c4"
+  dependencies:
+    mute-stream "~0.0.4"
+
+readable-stream@^2.0.0, readable-stream@^2.2.2:
+  version "2.2.9"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.2.9.tgz#cf78ec6f4a6d1eb43d26488cac97f042e74b7fc8"
+  dependencies:
+    buffer-shims "~1.0.0"
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "~1.0.0"
+    process-nextick-args "~1.0.6"
+    string_decoder "~1.0.0"
+    util-deprecate "~1.0.1"
+
+readable-stream@~1.0.26:
+  version "1.0.34"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.0.34.tgz#125820e34bc842d2f2aaafafe4c2916ee32c157c"
+  dependencies:
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "0.0.1"
+    string_decoder "~0.10.x"
+
+readable-stream@~1.1.8:
+  version "1.1.14"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.1.14.tgz#7cf4c54ef648e3813084c636dd2079e166c081d9"
+  dependencies:
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "0.0.1"
+    string_decoder "~0.10.x"
+
+readable-stream@~2.0.0, readable-stream@~2.0.5:
+  version "2.0.6"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.0.6.tgz#8f90341e68a53ccc928788dacfcd11b36eb9b78e"
+  dependencies:
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "~1.0.0"
+    process-nextick-args "~1.0.6"
+    string_decoder "~0.10.x"
+    util-deprecate "~1.0.1"
+
+readline2@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/readline2/-/readline2-0.1.1.tgz#99443ba6e83b830ef3051bfd7dc241a82728d568"
+  dependencies:
+    mute-stream "0.0.4"
+    strip-ansi "^2.0.1"
+
+redeyed@~0.4.0:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/redeyed/-/redeyed-0.4.4.tgz#37e990a6f2b21b2a11c2e6a48fd4135698cba97f"
+  dependencies:
+    esprima "~1.0.4"
+
+registry-url@^3.0.0:
+  version "3.1.0"
+  resolved "https://registry.yarnpkg.com/registry-url/-/registry-url-3.1.0.tgz#3d4ef870f73dde1d77f0cf9a381432444e174942"
+  dependencies:
+    rc "^1.0.1"
+
+request-progress@~0.3.0:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/request-progress/-/request-progress-0.3.1.tgz#0721c105d8a96ac6b2ce8b2c89ae2d5ecfcf6b3a"
+  dependencies:
+    throttleit "~0.0.2"
+
+request-progress@~2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/request-progress/-/request-progress-2.0.1.tgz#5d36bb57961c673aa5b788dbc8141fdf23b44e08"
+  dependencies:
+    throttleit "^1.0.0"
+
+request-replay@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/request-replay/-/request-replay-0.2.0.tgz#9b693a5d118b39f5c596ead5ed91a26444057f60"
+  dependencies:
+    retry "~0.6.0"
+
+request@~2.27.0:
+  version "2.27.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.27.0.tgz#dfb1a224dd3a5a9bade4337012503d710e538668"
+  dependencies:
+    aws-sign "~0.3.0"
+    cookie-jar "~0.3.0"
+    forever-agent "~0.5.0"
+    form-data "~0.1.0"
+    hawk "~1.0.0"
+    http-signature "~0.10.0"
+    json-stringify-safe "~5.0.0"
+    mime "~1.2.9"
+    node-uuid "~1.4.0"
+    oauth-sign "~0.3.0"
+    qs "~0.6.0"
+    tunnel-agent "~0.3.0"
+
+request@~2.36.0:
+  version "2.36.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.36.0.tgz#28c6c04262c7b9ffdd21b9255374517ee6d943f5"
+  dependencies:
+    forever-agent "~0.5.0"
+    json-stringify-safe "~5.0.0"
+    mime "~1.2.9"
+    node-uuid "~1.4.0"
+    qs "~0.6.0"
+  optionalDependencies:
+    aws-sign2 "~0.5.0"
+    form-data "~0.1.0"
+    hawk "~1.0.0"
+    http-signature "~0.10.0"
+    oauth-sign "~0.3.0"
+    tough-cookie ">=0.12.0"
+    tunnel-agent "~0.4.0"
+
+request@~2.40.0:
+  version "2.40.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.40.0.tgz#4dd670f696f1e6e842e66b4b5e839301ab9beb67"
+  dependencies:
+    forever-agent "~0.5.0"
+    json-stringify-safe "~5.0.0"
+    mime-types "~1.0.1"
+    node-uuid "~1.4.0"
+    qs "~1.0.0"
+  optionalDependencies:
+    aws-sign2 "~0.5.0"
+    form-data "~0.1.0"
+    hawk "1.1.1"
+    http-signature "~0.10.0"
+    oauth-sign "~0.3.0"
+    stringstream "~0.0.4"
+    tough-cookie ">=0.12.0"
+    tunnel-agent "~0.4.0"
+
+request@~2.51.0:
+  version "2.51.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.51.0.tgz#35d00bbecc012e55f907b1bd9e0dbd577bfef26e"
+  dependencies:
+    aws-sign2 "~0.5.0"
+    bl "~0.9.0"
+    caseless "~0.8.0"
+    combined-stream "~0.0.5"
+    forever-agent "~0.5.0"
+    form-data "~0.2.0"
+    hawk "1.1.1"
+    http-signature "~0.10.0"
+    json-stringify-safe "~5.0.0"
+    mime-types "~1.0.1"
+    node-uuid "~1.4.0"
+    oauth-sign "~0.5.0"
+    qs "~2.3.1"
+    stringstream "~0.0.4"
+    tough-cookie ">=0.12.0"
+    tunnel-agent "~0.4.0"
+
+request@~2.67.0:
+  version "2.67.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.67.0.tgz#8af74780e2bf11ea0ae9aa965c11f11afd272742"
+  dependencies:
+    aws-sign2 "~0.6.0"
+    bl "~1.0.0"
+    caseless "~0.11.0"
+    combined-stream "~1.0.5"
+    extend "~3.0.0"
+    forever-agent "~0.6.1"
+    form-data "~1.0.0-rc3"
+    har-validator "~2.0.2"
+    hawk "~3.1.0"
+    http-signature "~1.1.0"
+    is-typedarray "~1.0.0"
+    isstream "~0.1.2"
+    json-stringify-safe "~5.0.1"
+    mime-types "~2.1.7"
+    node-uuid "~1.4.7"
+    oauth-sign "~0.8.0"
+    qs "~5.2.0"
+    stringstream "~0.0.4"
+    tough-cookie "~2.2.0"
+    tunnel-agent "~0.4.1"
+
+resolve@~0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/resolve/-/resolve-0.3.1.tgz#34c63447c664c70598d1c9b126fc43b2a24310a4"
+
+retry@~0.6.0:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/retry/-/retry-0.6.1.tgz#fdc90eed943fde11b893554b8cc63d0e899ba918"
+
+rimraf@2, rimraf@^2.2.8, rimraf@~2.2.0, rimraf@~2.2.1, rimraf@~2.2.8:
+  version "2.2.8"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.2.8.tgz#e439be2aaee327321952730f99a8929e4fc50582"
+
+sax@~0.5.0:
+  version "0.5.8"
+  resolved "https://registry.yarnpkg.com/sax/-/sax-0.5.8.tgz#d472db228eb331c2506b0e8c15524adb939d12c1"
+
+semver-diff@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/semver-diff/-/semver-diff-2.1.0.tgz#4bbb8437c8d37e4b0cf1a68fd726ec6d645d6d36"
+  dependencies:
+    semver "^5.0.3"
+
+semver@^4.3.0:
+  version "4.3.6"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-4.3.6.tgz#300bc6e0e86374f7ba61068b5b1ecd57fc6532da"
+
+semver@^5.0.3:
+  version "5.3.0"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-5.3.0.tgz#9b2ce5d3de02d17c6012ad326aa6b4d0cf54f94f"
+
+semver@~2.3.0:
+  version "2.3.2"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-2.3.2.tgz#b9848f25d6cf36333073ec9ef8856d42f1233e52"
+
+send@0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/send/-/send-0.1.1.tgz#0bcfcbd03def6e2d8612e1abf8f4895b450c60c8"
+  dependencies:
+    debug "*"
+    fresh "0.1.0"
+    mime "~1.2.9"
+    range-parser "0.0.4"
+
+shell-quote@~1.4.1:
+  version "1.4.3"
+  resolved "https://registry.yarnpkg.com/shell-quote/-/shell-quote-1.4.3.tgz#952c44e0b1ed9013ef53958179cc643e8777466b"
+  dependencies:
+    array-filter "~0.0.0"
+    array-map "~0.0.0"
+    array-reduce "~0.0.0"
+    jsonify "~0.0.0"
+
+shelljs@0.1.x:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/shelljs/-/shelljs-0.1.4.tgz#dfbbe78d56c3c0168d2fb79e10ecd1dbcb07ec0e"
+
+sigmund@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/sigmund/-/sigmund-1.0.1.tgz#3ff21f198cad2175f9f3b781853fd94d0d19b590"
+
+sntp@0.2.x:
+  version "0.2.4"
+  resolved "https://registry.yarnpkg.com/sntp/-/sntp-0.2.4.tgz#fb885f18b0f3aad189f824862536bceeec750900"
+  dependencies:
+    hoek "0.9.x"
+
+sntp@1.x.x:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/sntp/-/sntp-1.0.9.tgz#6541184cc90aeea6c6e7b35e2659082443c66198"
+  dependencies:
+    hoek "2.x.x"
+
+source-map@0.1.34:
+  version "0.1.34"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.34.tgz#a7cfe89aec7b1682c3b198d0acfb47d7d090566b"
+  dependencies:
+    amdefine ">=0.0.4"
+
+source-map@0.1.x, source-map@~0.1.22, source-map@~0.1.7:
+  version "0.1.43"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.43.tgz#c24bc146ca517c1471f5dacbe2571b2b7f9e3346"
+  dependencies:
+    amdefine ">=0.0.4"
+
+sprintf-js@~1.0.2:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c"
+
+sshpk@^1.7.0:
+  version "1.13.0"
+  resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.13.0.tgz#ff2a3e4fd04497555fed97b39a0fd82fafb3a33c"
+  dependencies:
+    asn1 "~0.2.3"
+    assert-plus "^1.0.0"
+    dashdash "^1.12.0"
+    getpass "^0.1.1"
+  optionalDependencies:
+    bcrypt-pbkdf "^1.0.0"
+    ecc-jsbn "~0.1.1"
+    jodid25519 "^1.0.0"
+    jsbn "~0.1.0"
+    tweetnacl "~0.14.0"
+
+stream-shift@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/stream-shift/-/stream-shift-1.0.0.tgz#d5c752825e5367e786f78e18e445ea223a155952"
+
+string-length@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/string-length/-/string-length-1.0.1.tgz#56970fb1c38558e9e70b728bf3de269ac45adfac"
+  dependencies:
+    strip-ansi "^3.0.0"
+
+string_decoder@~0.10.x:
+  version "0.10.31"
+  resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-0.10.31.tgz#62e203bc41766c6c28c9fc84301dab1c5310fa94"
+
+string_decoder@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.0.0.tgz#f06f41157b664d86069f84bdbdc9b0d8ab281667"
+  dependencies:
+    buffer-shims "~1.0.0"
+
+stringify-object@~0.2.0:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/stringify-object/-/stringify-object-0.2.1.tgz#b58be50b3ff5f371038c545d4332656bfded5620"
+
+stringstream@~0.0.4:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/stringstream/-/stringstream-0.0.5.tgz#4e484cd4de5a0bbbee18e46307710a8a81621878"
+
+strip-ansi@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-0.3.0.tgz#25f48ea22ca79187f3174a4db8759347bb126220"
+  dependencies:
+    ansi-regex "^0.2.1"
+
+strip-ansi@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-2.0.1.tgz#df62c1aa94ed2f114e1d0f21fd1d50482b79a60e"
+  dependencies:
+    ansi-regex "^1.0.0"
+
+strip-ansi@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf"
+  dependencies:
+    ansi-regex "^2.0.0"
+
+strip-ansi@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-0.1.1.tgz#39e8a98d044d150660abe4a6808acf70bb7bc991"
+
+strip-json-comments@~2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a"
+
+supports-color@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-0.2.0.tgz#d92de2694eb3f67323973d7ae3d8b55b4c22190a"
+
+supports-color@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7"
+
+svgo@~0.3.1:
+  version "0.3.7"
+  resolved "https://registry.yarnpkg.com/svgo/-/svgo-0.3.7.tgz#18e6bbfd9f70b84b5fe88e6656d84402c71ae7a0"
+  dependencies:
+    coa "~0.3.7"
+    colors "~0.6.0"
+    js-yaml ""
+    sax "~0.5.0"
+    whet.extend ""
+
+tape@~0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/tape/-/tape-0.2.2.tgz#64ccfa4b7ecf4a0060007e61716d424781671637"
+  dependencies:
+    deep-equal "~0.0.0"
+    defined "~0.0.0"
+    jsonify "~0.0.0"
+
+tar@~0.1.17:
+  version "0.1.20"
+  resolved "https://registry.yarnpkg.com/tar/-/tar-0.1.20.tgz#42940bae5b5f22c74483699126f9f3f27449cb13"
+  dependencies:
+    block-stream "*"
+    fstream "~0.1.28"
+    inherits "2"
+
+temporary@^0.0.8:
+  version "0.0.8"
+  resolved "https://registry.yarnpkg.com/temporary/-/temporary-0.0.8.tgz#a18a981d28ba8ca36027fb3c30538c3ecb740ac0"
+  dependencies:
+    package ">= 1.0.0 < 1.2.0"
+
+text-table@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4"
+
+throttleit@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/throttleit/-/throttleit-1.0.0.tgz#9e785836daf46743145a5984b6268d828528ac6c"
+
+throttleit@~0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/throttleit/-/throttleit-0.0.2.tgz#cfedf88e60c00dd9697b61fdd2a8343a9b680eaf"
+
+through@~2.3.4:
+  version "2.3.8"
+  resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5"
+
+time-grunt@~0.1.1:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/time-grunt/-/time-grunt-0.1.2.tgz#2f6393a9af2ff832ed4163e080779d1dc326a2c1"
+  dependencies:
+    chalk "~0.3.0"
+    ms "~0.6.1"
+    text-table "~0.2.0"
+
+timed-out@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/timed-out/-/timed-out-2.0.0.tgz#f38b0ae81d3747d628001f41dafc652ace671c0a"
+
+timers-ext@0.1:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/timers-ext/-/timers-ext-0.1.2.tgz#61cc47a76c1abd3195f14527f978d58ae94c5204"
+  dependencies:
+    es5-ext "~0.10.14"
+    next-tick "1"
+
+tiny-lr@0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/tiny-lr/-/tiny-lr-0.0.4.tgz#80618547f63f697d05cb40c4c2c4b083521aefb6"
+  dependencies:
+    debug "~0.7.0"
+    faye-websocket "~0.4.3"
+    noptify latest
+    qs "~0.5.2"
+
+tmp@0.0.23:
+  version "0.0.23"
+  resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.23.tgz#de874aa5e974a85f0a32cdfdbd74663cb3bd9c74"
+
+touch@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/touch/-/touch-0.0.2.tgz#a65a777795e5cbbe1299499bdc42281ffb21b5f4"
+  dependencies:
+    nopt "~1.0.10"
+
+tough-cookie@>=0.12.0:
+  version "2.3.2"
+  resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.3.2.tgz#f081f76e4c85720e6c37a5faced737150d84072a"
+  dependencies:
+    punycode "^1.4.1"
+
+tough-cookie@~2.2.0:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.2.2.tgz#c83a1830f4e5ef0b93ef2a3488e724f8de016ac7"
+
+"traverse@>=0.3.0 <0.4":
+  version "0.3.9"
+  resolved "https://registry.yarnpkg.com/traverse/-/traverse-0.3.9.tgz#717b8f220cc0bb7b44e40514c22b2e8bbc70d8b9"
+
+tunnel-agent@~0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.3.0.tgz#ad681b68f5321ad2827c4cfb1b7d5df2cfe942ee"
+
+tunnel-agent@~0.4.0, tunnel-agent@~0.4.1:
+  version "0.4.3"
+  resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.4.3.tgz#6373db76909fe570e08d73583365ed828a74eeeb"
+
+tweetnacl@^0.14.3, tweetnacl@~0.14.0:
+  version "0.14.5"
+  resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64"
+
+typedarray@^0.0.6, typedarray@~0.0.5:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777"
+
+uglify-js@~2.3:
+  version "2.3.6"
+  resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-2.3.6.tgz#fa0984770b428b7a9b2a8058f46355d14fef211a"
+  dependencies:
+    async "~0.2.6"
+    optimist "~0.3.5"
+    source-map "~0.1.7"
+
+uglify-js@~2.4.0:
+  version "2.4.24"
+  resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-2.4.24.tgz#fad5755c1e1577658bb06ff9ab6e548c95bebd6e"
+  dependencies:
+    async "~0.2.6"
+    source-map "0.1.34"
+    uglify-to-browserify "~1.0.0"
+    yargs "~3.5.4"
+
+uglify-to-browserify@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/uglify-to-browserify/-/uglify-to-browserify-1.0.2.tgz#6e0924d6bda6b5afe349e39a6d632850a0f882b7"
+
+underscore.string@~2.2.1:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/underscore.string/-/underscore.string-2.2.1.tgz#d7c0fa2af5d5a1a67f4253daee98132e733f0f19"
+
+underscore.string@~2.3.3:
+  version "2.3.3"
+  resolved "https://registry.yarnpkg.com/underscore.string/-/underscore.string-2.3.3.tgz#71c08bf6b428b1133f37e78fa3a21c82f7329b0d"
+
+underscore.string@~2.4.0:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/underscore.string/-/underscore.string-2.4.0.tgz#8cdd8fbac4e2d2ea1e7e2e8097c42f442280f85b"
+
+underscore@1.4.x:
+  version "1.4.4"
+  resolved "https://registry.yarnpkg.com/underscore/-/underscore-1.4.4.tgz#61a6a32010622afa07963bf325203cf12239d604"
+
+underscore@~1.7.0:
+  version "1.7.0"
+  resolved "https://registry.yarnpkg.com/underscore/-/underscore-1.7.0.tgz#6bbaf0877500d36be34ecaa584e0db9fef035209"
+
+update-notifier@~0.2.0:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/update-notifier/-/update-notifier-0.2.2.tgz#e69b3a784b4e686a2acd98f5e66944591996e187"
+  dependencies:
+    chalk "^0.5.1"
+    configstore "^0.3.1"
+    is-npm "^1.0.0"
+    latest-version "^1.0.0"
+    semver-diff "^2.0.0"
+    string-length "^1.0.0"
+
+user-home@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/user-home/-/user-home-1.1.1.tgz#2b5be23a32b63a7c9deb8d0f28d485724a3df190"
+
+util-deprecate@~1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf"
+
+uuid@^2.0.1:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/uuid/-/uuid-2.0.3.tgz#67e2e863797215530dff318e5bf9dcebfd47b21a"
+
+uuid@~1.4.1:
+  version "1.4.2"
+  resolved "https://registry.yarnpkg.com/uuid/-/uuid-1.4.2.tgz#453019f686966a6df83cdc5244e7c990ecc332fc"
+
+verror@1.3.6:
+  version "1.3.6"
+  resolved "https://registry.yarnpkg.com/verror/-/verror-1.3.6.tgz#cff5df12946d297d2baaefaa2689e25be01c005c"
+  dependencies:
+    extsprintf "1.0.2"
+
+whet.extend@:
+  version "0.9.9"
+  resolved "https://registry.yarnpkg.com/whet.extend/-/whet.extend-0.9.9.tgz#f877d5bf648c97e5aa542fadc16d6a259b9c11a1"
+
+which@~1.0.5:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/which/-/which-1.0.9.tgz#460c1da0f810103d0321a9b633af9e575e64486f"
+
+which@~1.2.2:
+  version "1.2.14"
+  resolved "https://registry.yarnpkg.com/which/-/which-1.2.14.tgz#9a87c4378f03e827cecaf1acdf56c736c01c14e5"
+  dependencies:
+    isexe "^2.0.0"
+
+window-size@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.0.tgz#5438cd2ea93b202efa3a19fe8887aee7c94f9c9d"
+
+wordwrap@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.2.tgz#b79669bb42ecb409f83d583cad52ca17eaa1643f"
+
+wordwrap@~0.0.2:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.3.tgz#a3d5da6cd5c0bc0008d37234bbaf1bed63059107"
+
+wrappy@1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f"
+
+xdg-basedir@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-1.0.1.tgz#14ff8f63a4fdbcb05d5b6eea22b36f3033b9f04e"
+  dependencies:
+    user-home "^1.0.0"
+
+xtend@^4.0.0:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.1.tgz#a5c6d532be656e23db820efb943a1f04998d63af"
+
+yargs@~3.5.4:
+  version "3.5.4"
+  resolved "https://registry.yarnpkg.com/yargs/-/yargs-3.5.4.tgz#d8aff8f665e94c34bd259bdebd1bfaf0ddd35361"
+  dependencies:
+    camelcase "^1.0.2"
+    decamelize "^1.0.0"
+    window-size "0.1.0"
+    wordwrap "0.0.2"
+
+yauzl@2.4.1:
+  version "2.4.1"
+  resolved "https://registry.yarnpkg.com/yauzl/-/yauzl-2.4.1.tgz#9528f442dab1b2284e58b4379bb194e22e0c4005"
+  dependencies:
+    fd-slicer "~1.0.1"
+
+zlib-browserify@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/zlib-browserify/-/zlib-browserify-0.0.1.tgz#4fa6a45d00dbc15f318a4afa1d9afc0258e176cc"
+
+zlib-browserify@^0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/zlib-browserify/-/zlib-browserify-0.0.3.tgz#240ccdbfd0203fa842b130deefb1414122c8cc50"
+  dependencies:
+    tape "~0.2.2"
diff --git a/contrib/views/pig/pom.xml b/contrib/views/pig/pom.xml
index d89d47e..5239cc2 100644
--- a/contrib/views/pig/pom.xml
+++ b/contrib/views/pig/pom.xml
@@ -178,25 +178,30 @@
       <plugin>
         <groupId>com.github.eirslett</groupId>
         <artifactId>frontend-maven-plugin</artifactId>
-        <version>1.3</version>
+        <version>1.4</version>
         <configuration>
           <nodeVersion>v4.5.0</nodeVersion>
-          <npmVersion>2.15.0</npmVersion>
+          <yarnVersion>v0.23.2</yarnVersion>
           <workingDirectory>${ui.directory}</workingDirectory>
           <npmInheritsProxyConfigFromMaven>false</npmInheritsProxyConfigFromMaven>
+          <!-- setting npm_config_tmp environment variable is a workaround for 
+               https://github.com/Medium/phantomjs/issues/673 -->
+          <environmentVariables>
+            <npm_config_tmp>/tmp/npm_config_tmp</npm_config_tmp>
+          </environmentVariables>
         </configuration>
         <executions>
           <execution>
-            <id>install node and npm</id>
+            <id>install node and yarn</id>
             <phase>initialize</phase>
             <goals>
-              <goal>install-node-and-npm</goal>
+              <goal>install-node-and-yarn</goal>
             </goals>
           </execution>
           <execution>
-            <id>npm install</id>
+            <id>yarn install --pure-lockfile</id>
             <goals>
-              <goal>npm</goal>
+              <goal>yarn</goal>
             </goals>
             <phase>generate-resources</phase>
             <configuration>
@@ -204,6 +209,7 @@
                    you need to run some other npm command, you can remove this whole <configuration>
                    section. -->
               <arguments>install --unsafe-perm</arguments>
+              <arguments>--ignore-engines</arguments>
             </configuration>
           </execution>
         </executions>
@@ -213,7 +219,7 @@
         <groupId>org.codehaus.mojo</groupId>
         <version>1.2.1</version>
         <executions>
-          <execution>
+<!--           <execution>
             <id>node gyp executable</id>
             <phase>initialize</phase>
             <goals>
@@ -228,7 +234,7 @@
                 <argument>${ui.directory}/node/node_modules/npm/bin/node-gyp-bin/node-gyp</argument>
               </arguments>
             </configuration>
-          </execution>
+          </execution> -->
           <execution>
             <id>Bower install</id>
             <phase>generate-resources</phase>
diff --git a/contrib/views/pig/src/main/resources/ui/pig-web/yarn.lock b/contrib/views/pig/src/main/resources/ui/pig-web/yarn.lock
new file mode 100644
index 0000000..5b469c4
--- /dev/null
+++ b/contrib/views/pig/src/main/resources/ui/pig-web/yarn.lock
@@ -0,0 +1,2376 @@
+# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
+# yarn lockfile v1
+
+
+abab@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/abab/-/abab-1.0.3.tgz#b81de5f7274ec4e756d797cd834f303642724e5d"
+
+abbrev@1:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.0.tgz#d0554c2256636e2f56e7c2e5ad183f859428d81f"
+
+accepts@1.3.3:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.3.tgz#c3ca7434938648c3e0d9c1e328dd68b622c284ca"
+  dependencies:
+    mime-types "~2.1.11"
+    negotiator "0.6.1"
+
+acorn-globals@^3.1.0:
+  version "3.1.0"
+  resolved "https://registry.yarnpkg.com/acorn-globals/-/acorn-globals-3.1.0.tgz#fd8270f71fbb4996b004fa880ee5d46573a731bf"
+  dependencies:
+    acorn "^4.0.4"
+
+acorn@^4.0.4:
+  version "4.0.11"
+  resolved "https://registry.yarnpkg.com/acorn/-/acorn-4.0.11.tgz#edcda3bd937e7556410d42ed5860f67399c794c0"
+
+after@0.8.2:
+  version "0.8.2"
+  resolved "https://registry.yarnpkg.com/after/-/after-0.8.2.tgz#fedb394f9f0e02aa9768e702bda23b505fae7e1f"
+
+ajv@^4.9.1:
+  version "4.11.8"
+  resolved "https://registry.yarnpkg.com/ajv/-/ajv-4.11.8.tgz#82ffb02b29e662ae53bdc20af15947706739c536"
+  dependencies:
+    co "^4.6.0"
+    json-stable-stringify "^1.0.1"
+
+amdefine@>=0.0.4:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5"
+
+ansi-color@~0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/ansi-color/-/ansi-color-0.2.1.tgz#3e75c037475217544ed763a8db5709fa9ae5bf9a"
+
+ansi-regex@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df"
+
+ansi-styles@^2.2.1:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe"
+
+anymatch@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-1.3.0.tgz#a3e52fa39168c825ff57b0248126ce5a8ff95507"
+  dependencies:
+    arrify "^1.0.0"
+    micromatch "^2.1.5"
+
+anymatch@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-0.2.0.tgz#e919a97cd43373e6e645bbb7b644e2daebeba405"
+  dependencies:
+    minimatch "~0.2.12"
+
+anysort@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/anysort/-/anysort-0.2.0.tgz#57d7035a84960566763568fc5e2d9ab4b047fe60"
+  dependencies:
+    anymatch "~0.2.0"
+
+aproba@^1.0.3:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.1.1.tgz#95d3600f07710aa0e9298c726ad5ecf2eacbabab"
+
+are-we-there-yet@~1.1.2:
+  version "1.1.4"
+  resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.4.tgz#bb5dca382bb94f05e15194373d16fd3ba1ca110d"
+  dependencies:
+    delegates "^1.0.0"
+    readable-stream "^2.0.6"
+
+arr-diff@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-2.0.0.tgz#8f3b827f955a8bd669697e4a4256ac3ceae356cf"
+  dependencies:
+    arr-flatten "^1.0.1"
+
+arr-flatten@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.0.3.tgz#a274ed85ac08849b6bd7847c4580745dc51adfb1"
+
+array-equal@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/array-equal/-/array-equal-1.0.0.tgz#8c2a5ef2472fd9ea742b04c77a75093ba2757c93"
+
+array-slice@^0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/array-slice/-/array-slice-0.2.3.tgz#dd3cfb80ed7973a75117cdac69b0b99ec86186f5"
+
+array-unique@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.2.1.tgz#a1d97ccafcbc2625cc70fadceb36a50c58b01a53"
+
+arraybuffer.slice@0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/arraybuffer.slice/-/arraybuffer.slice-0.0.6.tgz#f33b2159f0532a3f3107a272c0ccfbd1ad2979ca"
+
+arrify@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d"
+
+asn1@~0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.3.tgz#dac8787713c9966849fc8180777ebe9c1ddf3b86"
+
+assert-plus@1.0.0, assert-plus@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525"
+
+assert-plus@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.2.0.tgz#d74e1b87e7affc0db8aadb7021f3fe48101ab234"
+
+async-each@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/async-each/-/async-each-1.0.1.tgz#19d386a1d9edc6e7c1c85d388aedbcc56d33602d"
+
+async-each@~0.1.1, async-each@~0.1.2, async-each@~0.1.3:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/async-each/-/async-each-0.1.6.tgz#b67e99edcddf96541e44af56290cd7d5c6e70439"
+
+async-waterfall@~0.1.2:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/async-waterfall/-/async-waterfall-0.1.5.tgz#398bd48b0eac5d40ffbe400fe9e37a53ba966dae"
+
+async@^2.0.1:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/async/-/async-2.3.0.tgz#1013d1051047dd320fe24e494d5c66ecaf6147d9"
+  dependencies:
+    lodash "^4.14.0"
+
+async@~0.2.6:
+  version "0.2.10"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.2.10.tgz#b6bbe0b0674b9d719708ca38de8c237cb526c3d1"
+
+asynckit@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79"
+
+aws-sign2@~0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.6.0.tgz#14342dd38dbcc94d0e5b87d763cd63612c0e794f"
+
+aws4@^1.2.1:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.6.0.tgz#83ef5ca860b2b32e4a0deedee8c771b9db57471e"
+
+backo2@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/backo2/-/backo2-1.0.2.tgz#31ab1ac8b129363463e35b3ebb69f4dfcfba7947"
+
+balanced-match@^0.4.1:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-0.4.2.tgz#cb3f3e3c732dc0f01ee70b403f302e61d7709838"
+
+base64-arraybuffer@0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/base64-arraybuffer/-/base64-arraybuffer-0.1.5.tgz#73926771923b5a19747ad666aa5cd4bf9c6e9ce8"
+
+base64id@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/base64id/-/base64id-1.0.0.tgz#47688cb99bb6804f0e06d3e763b1c32e57d8e6b6"
+
+batch@0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/batch/-/batch-0.2.1.tgz#4463997bb4d5fd1c7a011548813e52aa189c2c79"
+
+bcrypt-pbkdf@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.1.tgz#63bc5dcb61331b92bc05fd528953c33462a06f8d"
+  dependencies:
+    tweetnacl "^0.14.3"
+
+better-assert@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/better-assert/-/better-assert-1.0.2.tgz#40866b9e1b9e0b55b481894311e68faffaebc522"
+  dependencies:
+    callsite "1.0.0"
+
+binary-extensions@^1.0.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-1.8.0.tgz#48ec8d16df4377eae5fa5884682480af4d95c774"
+
+bl@~1.0.0:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/bl/-/bl-1.0.3.tgz#fc5421a28fd4226036c3b3891a66a25bc64d226e"
+  dependencies:
+    readable-stream "~2.0.5"
+
+blob@0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/blob/-/blob-0.0.4.tgz#bcf13052ca54463f30f9fc7e95b9a47630a94921"
+
+block-stream@*:
+  version "0.0.9"
+  resolved "https://registry.yarnpkg.com/block-stream/-/block-stream-0.0.9.tgz#13ebfe778a03205cfe03751481ebb4b3300c126a"
+  dependencies:
+    inherits "~2.0.0"
+
+bluebird@^3.3.0:
+  version "3.5.0"
+  resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.5.0.tgz#791420d7f551eea2897453a8a77653f96606d67c"
+
+body-parser@^1.16.1:
+  version "1.17.1"
+  resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.17.1.tgz#75b3bc98ddd6e7e0d8ffe750dfaca5c66993fa47"
+  dependencies:
+    bytes "2.4.0"
+    content-type "~1.0.2"
+    debug "2.6.1"
+    depd "~1.1.0"
+    http-errors "~1.6.1"
+    iconv-lite "0.4.15"
+    on-finished "~2.3.0"
+    qs "6.4.0"
+    raw-body "~2.2.0"
+    type-is "~1.6.14"
+
+boom@2.x.x:
+  version "2.10.1"
+  resolved "https://registry.yarnpkg.com/boom/-/boom-2.10.1.tgz#39c8918ceff5799f83f9492a848f625add0c766f"
+  dependencies:
+    hoek "2.x.x"
+
+"bower@>= 1.2.0":
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/bower/-/bower-1.8.0.tgz#55dbebef0ad9155382d9e9d3e497c1372345b44a"
+
+brace-expansion@^1.0.0:
+  version "1.1.7"
+  resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.7.tgz#3effc3c50e000531fb720eaff80f0ae8ef23cf59"
+  dependencies:
+    balanced-match "^0.4.1"
+    concat-map "0.0.1"
+
+braces@^0.1.2:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/braces/-/braces-0.1.5.tgz#c085711085291d8b75fdd74eab0f8597280711e6"
+  dependencies:
+    expand-range "^0.1.0"
+
+braces@^1.8.2:
+  version "1.8.5"
+  resolved "https://registry.yarnpkg.com/braces/-/braces-1.8.5.tgz#ba77962e12dff969d6b76711e914b737857bf6a7"
+  dependencies:
+    expand-range "^1.8.1"
+    preserve "^0.2.0"
+    repeat-element "^1.1.2"
+
+brunch@1.7.17:
+  version "1.7.17"
+  resolved "https://registry.yarnpkg.com/brunch/-/brunch-1.7.17.tgz#070940f1c99e0a7e0f1ea25f07de95d75825ced5"
+  dependencies:
+    anysort "~0.2.0"
+    async-each "~0.1.2"
+    async-waterfall "~0.1.2"
+    chokidar "~0.9.0"
+    coffee-script "~1.8.0"
+    commander "~2.0.0"
+    commonjs-require-definition "~0.1.0"
+    debug "~0.7.2"
+    init-skeleton "~0.2.0"
+    loggy "~0.2.0"
+    mkdirp "~0.3.5"
+    ncp "~0.4.2"
+    pushserve "~0.1.6"
+    read-components "~0.6.0"
+    source-map "~0.1.35"
+
+buffer-crc32@0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/buffer-crc32/-/buffer-crc32-0.2.1.tgz#be3e5382fc02b6d6324956ac1af98aa98b08534c"
+
+buffer-shims@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/buffer-shims/-/buffer-shims-1.0.0.tgz#9978ce317388c649ad8793028c3477ef044a8b51"
+
+bytes@0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-0.2.0.tgz#aad33ec14e3dc2ca74e8e7d451f9ba053ad4f7a0"
+
+bytes@2.4.0:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.4.0.tgz#7d97196f9d5baf7f6935e25985549edd2a6c2339"
+
+callsite@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/callsite/-/callsite-1.0.0.tgz#280398e5d664bd74038b6f0905153e6e8af1bc20"
+
+camelcase@^1.0.2:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-1.2.1.tgz#9bb5304d2e0b56698b2c758b08a3eaa9daa58a39"
+
+caseless@~0.11.0:
+  version "0.11.0"
+  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.11.0.tgz#715b96ea9841593cc33067923f5ec60ebda4f7d7"
+
+caseless@~0.12.0:
+  version "0.12.0"
+  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc"
+
+chalk@^1.1.1:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98"
+  dependencies:
+    ansi-styles "^2.2.1"
+    escape-string-regexp "^1.0.2"
+    has-ansi "^2.0.0"
+    strip-ansi "^3.0.0"
+    supports-color "^2.0.0"
+
+chokidar@^1.4.1:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-1.6.1.tgz#2f4447ab5e96e50fb3d789fd90d4c72e0e4c70c2"
+  dependencies:
+    anymatch "^1.3.0"
+    async-each "^1.0.0"
+    glob-parent "^2.0.0"
+    inherits "^2.0.1"
+    is-binary-path "^1.0.0"
+    is-glob "^2.0.0"
+    path-is-absolute "^1.0.0"
+    readdirp "^2.0.0"
+  optionalDependencies:
+    fsevents "^1.0.0"
+
+chokidar@~0.9.0:
+  version "0.9.0"
+  resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-0.9.0.tgz#c1ae41561dbdb89dd5fac615453d20b48a946c2f"
+  optionalDependencies:
+    fsevents "0.3.0"
+    recursive-readdir "0.0.2"
+
+clean-css@2.0.x:
+  version "2.0.8"
+  resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-2.0.8.tgz#e937cdfdcc5781a00817aec4079e85b3ec157a20"
+  dependencies:
+    commander "2.0.x"
+
+co@^4.6.0:
+  version "4.6.0"
+  resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184"
+
+code-point-at@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77"
+
+coffee-script@1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/coffee-script/-/coffee-script-1.4.0.tgz#5e3bc8aac26c01a8e27bf107722c5655f5ad7d36"
+
+coffee-script@~1.8.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/coffee-script/-/coffee-script-1.8.0.tgz#9c9f1d2b4a52a000ded15b659791703648263c1d"
+  dependencies:
+    mkdirp "~0.3.5"
+
+colors@^1.1.0:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/colors/-/colors-1.1.2.tgz#168a4701756b6a7f51a12ce0c97bfa28c084ed63"
+
+combine-lists@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/combine-lists/-/combine-lists-1.0.1.tgz#458c07e09e0d900fc28b70a3fec2dacd1d2cb7f6"
+  dependencies:
+    lodash "^4.5.0"
+
+combined-stream@^1.0.5, combined-stream@~1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.5.tgz#938370a57b4a51dea2c77c15d5c5fdf895164009"
+  dependencies:
+    delayed-stream "~1.0.0"
+
+commander@1.2.0, commander@~1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-1.2.0.tgz#fd5713bfa153c7d6cc599378a5ab4c45c535029e"
+  dependencies:
+    keypress "0.1.x"
+
+commander@2.0.x, commander@~2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.0.0.tgz#d1b86f901f8b64bd941bdeadaf924530393be928"
+
+commander@^2.9.0:
+  version "2.9.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.9.0.tgz#9c99094176e12240cb22d6c5146098400fe0f7d4"
+  dependencies:
+    graceful-readlink ">= 1.0.0"
+
+commonjs-require-definition@~0.1.0:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/commonjs-require-definition/-/commonjs-require-definition-0.1.2.tgz#93720e42b3383a00e4097f6a4a979f10f376dc2d"
+
+component-bind@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/component-bind/-/component-bind-1.0.0.tgz#00c608ab7dcd93897c0009651b1d3a8e1e73bbd1"
+
+component-builder@~0.10.0:
+  version "0.10.1"
+  resolved "https://registry.yarnpkg.com/component-builder/-/component-builder-0.10.1.tgz#d29c7ab70241a678e3f8dbea4d1759c68b6f8f9b"
+  dependencies:
+    batch "0.2.1"
+    component-require "0.3.1"
+    cp "~0.1.0"
+    debug "*"
+    mkdirp "0.3.4"
+    string-to-js "0.0.1"
+
+component-emitter@1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.1.2.tgz#296594f2753daa63996d2af08d15a95116c9aec3"
+
+component-emitter@1.2.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.2.1.tgz#137918d6d78283f7df7a6b7c5a63e140e69425e6"
+
+component-inherit@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/component-inherit/-/component-inherit-0.0.3.tgz#645fc4adf58b72b649d5cae65135619db26ff143"
+
+component-require@0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/component-require/-/component-require-0.3.1.tgz#50a00e2e2cb0fe273ab4268fe20ae4804f35fe6d"
+
+concat-map@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b"
+
+concat-stream@1.5.0:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.5.0.tgz#53f7d43c51c5e43f81c8fdd03321c631be68d611"
+  dependencies:
+    inherits "~2.0.1"
+    readable-stream "~2.0.0"
+    typedarray "~0.0.5"
+
+connect-slashes@~0.0.9:
+  version "0.0.11"
+  resolved "https://registry.yarnpkg.com/connect-slashes/-/connect-slashes-0.0.11.tgz#4b44efae7599cc03ee20b24e9287272f41d62258"
+
+connect@2.8.8:
+  version "2.8.8"
+  resolved "https://registry.yarnpkg.com/connect/-/connect-2.8.8.tgz#b9abf8caf0bd9773cb3dea29344119872582446d"
+  dependencies:
+    buffer-crc32 "0.2.1"
+    bytes "0.2.0"
+    cookie "0.1.0"
+    cookie-signature "1.0.1"
+    debug "*"
+    formidable "1.0.14"
+    fresh "0.2.0"
+    methods "0.0.1"
+    pause "0.0.1"
+    qs "0.6.5"
+    send "0.1.4"
+    uid2 "0.0.2"
+
+connect@^3.6.0:
+  version "3.6.1"
+  resolved "https://registry.yarnpkg.com/connect/-/connect-3.6.1.tgz#b7760693a74f0454face1d9378edb3f885b43227"
+  dependencies:
+    debug "2.6.3"
+    finalhandler "1.0.1"
+    parseurl "~1.3.1"
+    utils-merge "1.0.0"
+
+console-control-strings@^1.0.0, console-control-strings@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/console-control-strings/-/console-control-strings-1.1.0.tgz#3d7cf4464db6446ea644bf4b39507f9851008e8e"
+
+content-type-parser@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/content-type-parser/-/content-type-parser-1.0.1.tgz#c3e56988c53c65127fb46d4032a3a900246fdc94"
+
+content-type@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.2.tgz#b7d113aee7a8dd27bd21133c4dc2529df1721eed"
+
+cookie-signature@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.1.tgz#44e072148af01e6e8e24afbf12690d68ae698ecb"
+
+cookie@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.1.0.tgz#90eb469ddce905c866de687efc43131d8801f9d0"
+
+cookie@0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.3.1.tgz#e7e0a1f9ef43b4c8ba925c5c5a96e806d16873bb"
+
+core-js@^2.2.0:
+  version "2.4.1"
+  resolved "https://registry.yarnpkg.com/core-js/-/core-js-2.4.1.tgz#4de911e667b0eae9124e34254b53aea6fc618d3e"
+
+core-util-is@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7"
+
+cp@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/cp/-/cp-0.1.1.tgz#3946a76c1a53ffe0e68593f341c124b336c1f06d"
+
+cryptiles@2.x.x:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/cryptiles/-/cryptiles-2.0.5.tgz#3bdfecdc608147c1c67202fa291e7dca59eaa3b8"
+  dependencies:
+    boom "2.x.x"
+
+"css-brunch@>= 1.0 < 1.8":
+  version "1.7.0"
+  resolved "https://registry.yarnpkg.com/css-brunch/-/css-brunch-1.7.0.tgz#258c1b038a970840af0c6a8e5582f614050294da"
+
+cssom@0.3.x, "cssom@>= 0.3.2 < 0.4.0":
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/cssom/-/cssom-0.3.2.tgz#b8036170c79f07a90ff2f16e22284027a243848b"
+
+"cssstyle@>= 0.2.37 < 0.3.0":
+  version "0.2.37"
+  resolved "https://registry.yarnpkg.com/cssstyle/-/cssstyle-0.2.37.tgz#541097234cb2513c83ceed3acddc27ff27987d54"
+  dependencies:
+    cssom "0.3.x"
+
+custom-event@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/custom-event/-/custom-event-1.0.1.tgz#5d02a46850adf1b4a317946a3928fccb5bfd0425"
+
+dashdash@^1.12.0:
+  version "1.14.1"
+  resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0"
+  dependencies:
+    assert-plus "^1.0.0"
+
+date-utils@~1.2.17:
+  version "1.2.21"
+  resolved "https://registry.yarnpkg.com/date-utils/-/date-utils-1.2.21.tgz#61fb16cdc1274b3c9acaaffe9fc69df8720a2b64"
+
+debug@*, debug@2.6.3, debug@^2.2.0:
+  version "2.6.3"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.3.tgz#0f7eb8c30965ec08c72accfa0130c8b79984141d"
+  dependencies:
+    ms "0.7.2"
+
+debug@0.7.4, debug@~0.7.2:
+  version "0.7.4"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-0.7.4.tgz#06e1ea8082c2cb14e39806e22e2f6f757f92af39"
+
+debug@2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.2.0.tgz#f87057e995b1a1f6ae6a4960664137bc56f039da"
+  dependencies:
+    ms "0.7.1"
+
+debug@2.3.3:
+  version "2.3.3"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.3.3.tgz#40c453e67e6e13c901ddec317af8986cda9eff8c"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.1:
+  version "2.6.1"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.1.tgz#79855090ba2c4e3115cc7d8769491d58f0491351"
+  dependencies:
+    ms "0.7.2"
+
+decamelize@^1.0.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290"
+
+deep-extend@~0.4.0:
+  version "0.4.1"
+  resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.4.1.tgz#efe4113d08085f4e6f9687759810f807469e2253"
+
+deep-is@~0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.3.tgz#b369d6fb5dbc13eecf524f91b070feedc357cf34"
+
+delayed-stream@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619"
+
+delegates@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a"
+
+depd@1.1.0, depd@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.0.tgz#e1bd82c6aab6ced965b97b88b17ed3e528ca18c3"
+
+di@^0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/di/-/di-0.0.1.tgz#806649326ceaa7caa3306d75d985ea2748ba913c"
+
+dom-serialize@^2.2.0:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/dom-serialize/-/dom-serialize-2.2.1.tgz#562ae8999f44be5ea3076f5419dcd59eb43ac95b"
+  dependencies:
+    custom-event "~1.0.0"
+    ent "~2.2.0"
+    extend "^3.0.0"
+    void-elements "^2.0.0"
+
+ecc-jsbn@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.1.tgz#0fc73a9ed5f0d53c38193398523ef7e543777505"
+  dependencies:
+    jsbn "~0.1.0"
+
+ee-first@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d"
+
+ember-precompile-brunch@^0.1.0:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/ember-precompile-brunch/-/ember-precompile-brunch-0.1.2.tgz#54e73d5054a35afde79aa1c1b454f6ed0e37611c"
+  dependencies:
+    coffee-script "1.4.0"
+    jsdom "^9.0.0"
+
+encodeurl@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.1.tgz#79e3d58655346909fe6f0f45a5de68103b294d20"
+
+engine.io-client@1.8.3:
+  version "1.8.3"
+  resolved "https://registry.yarnpkg.com/engine.io-client/-/engine.io-client-1.8.3.tgz#1798ed93451246453d4c6f635d7a201fe940d5ab"
+  dependencies:
+    component-emitter "1.2.1"
+    component-inherit "0.0.3"
+    debug "2.3.3"
+    engine.io-parser "1.3.2"
+    has-cors "1.1.0"
+    indexof "0.0.1"
+    parsejson "0.0.3"
+    parseqs "0.0.5"
+    parseuri "0.0.5"
+    ws "1.1.2"
+    xmlhttprequest-ssl "1.5.3"
+    yeast "0.1.2"
+
+engine.io-parser@1.3.2:
+  version "1.3.2"
+  resolved "https://registry.yarnpkg.com/engine.io-parser/-/engine.io-parser-1.3.2.tgz#937b079f0007d0893ec56d46cb220b8cb435220a"
+  dependencies:
+    after "0.8.2"
+    arraybuffer.slice "0.0.6"
+    base64-arraybuffer "0.1.5"
+    blob "0.0.4"
+    has-binary "0.1.7"
+    wtf-8 "1.0.0"
+
+engine.io@1.8.3:
+  version "1.8.3"
+  resolved "https://registry.yarnpkg.com/engine.io/-/engine.io-1.8.3.tgz#8de7f97895d20d39b85f88eeee777b2bd42b13d4"
+  dependencies:
+    accepts "1.3.3"
+    base64id "1.0.0"
+    cookie "0.3.1"
+    debug "2.3.3"
+    engine.io-parser "1.3.2"
+    ws "1.1.2"
+
+ent@~2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/ent/-/ent-2.2.0.tgz#e964219325a21d05f44466a2f686ed6ce5f5dd1d"
+
+escape-html@~1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988"
+
+escape-string-regexp@^1.0.2:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4"
+
+escodegen@^1.6.1:
+  version "1.8.1"
+  resolved "https://registry.yarnpkg.com/escodegen/-/escodegen-1.8.1.tgz#5a5b53af4693110bebb0867aa3430dd3b70a1018"
+  dependencies:
+    esprima "^2.7.1"
+    estraverse "^1.9.1"
+    esutils "^2.0.2"
+    optionator "^0.8.1"
+  optionalDependencies:
+    source-map "~0.2.0"
+
+esprima@^2.7.1:
+  version "2.7.3"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-2.7.3.tgz#96e3b70d5779f6ad49cd032673d1c312767ba581"
+
+esprima@~1.0.4:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-1.0.4.tgz#9f557e08fc3b4d26ece9dd34f8fbf476b62585ad"
+
+estraverse@^1.9.1:
+  version "1.9.3"
+  resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-1.9.3.tgz#af67f2dc922582415950926091a4005d29c9bb44"
+
+esutils@^2.0.2:
+  version "2.0.2"
+  resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.2.tgz#0abf4f1caa5bcb1f7a9d8acc6dea4faaa04bac9b"
+
+eventemitter3@1.x.x:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-1.2.0.tgz#1c86991d816ad1e504750e73874224ecf3bec508"
+
+expand-braces@^0.1.1:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/expand-braces/-/expand-braces-0.1.2.tgz#488b1d1d2451cb3d3a6b192cfc030f44c5855fea"
+  dependencies:
+    array-slice "^0.2.3"
+    array-unique "^0.2.1"
+    braces "^0.1.2"
+
+expand-brackets@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-0.1.5.tgz#df07284e342a807cd733ac5af72411e581d1177b"
+  dependencies:
+    is-posix-bracket "^0.1.0"
+
+expand-range@^0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/expand-range/-/expand-range-0.1.1.tgz#4cb8eda0993ca56fa4f41fc42f3cbb4ccadff044"
+  dependencies:
+    is-number "^0.1.1"
+    repeat-string "^0.2.2"
+
+expand-range@^1.8.1:
+  version "1.8.2"
+  resolved "https://registry.yarnpkg.com/expand-range/-/expand-range-1.8.2.tgz#a299effd335fe2721ebae8e257ec79644fc85337"
+  dependencies:
+    fill-range "^2.1.0"
+
+express@~3.3.0:
+  version "3.3.8"
+  resolved "https://registry.yarnpkg.com/express/-/express-3.3.8.tgz#8e98ac30d81f4c95b85d71d2af6cf84f62ef19bd"
+  dependencies:
+    buffer-crc32 "0.2.1"
+    commander "1.2.0"
+    connect "2.8.8"
+    cookie "0.1.0"
+    cookie-signature "1.0.1"
+    debug "*"
+    fresh "0.2.0"
+    methods "0.0.1"
+    mkdirp "0.3.5"
+    range-parser "0.0.4"
+    send "0.1.4"
+
+extend@^3.0.0, extend@~3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.1.tgz#a755ea7bc1adfcc5a31ce7e762dbaadc5e636444"
+
+extglob@^0.3.1:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/extglob/-/extglob-0.3.2.tgz#2e18ff3d2f49ab2765cec9023f011daa8d8349a1"
+  dependencies:
+    is-extglob "^1.0.0"
+
+extract-zip@~1.5.0:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/extract-zip/-/extract-zip-1.5.0.tgz#92ccf6d81ef70a9fa4c1747114ccef6d8688a6c4"
+  dependencies:
+    concat-stream "1.5.0"
+    debug "0.7.4"
+    mkdirp "0.5.0"
+    yauzl "2.4.1"
+
+extsprintf@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.0.2.tgz#e1080e0658e300b06294990cc70e1502235fd550"
+
+fast-levenshtein@~2.0.4:
+  version "2.0.6"
+  resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917"
+
+fd-slicer@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/fd-slicer/-/fd-slicer-1.0.1.tgz#8b5bcbd9ec327c5041bf9ab023fd6750f1177e65"
+  dependencies:
+    pend "~1.2.0"
+
+filename-regex@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/filename-regex/-/filename-regex-2.0.1.tgz#c1c4b9bee3e09725ddb106b75c1e301fe2f18b26"
+
+fill-range@^2.1.0:
+  version "2.2.3"
+  resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-2.2.3.tgz#50b77dfd7e469bc7492470963699fe7a8485a723"
+  dependencies:
+    is-number "^2.1.0"
+    isobject "^2.0.0"
+    randomatic "^1.1.3"
+    repeat-element "^1.1.2"
+    repeat-string "^1.5.2"
+
+finalhandler@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.0.1.tgz#bcd15d1689c0e5ed729b6f7f541a6df984117db8"
+  dependencies:
+    debug "2.6.3"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    statuses "~1.3.1"
+    unpipe "~1.0.0"
+
+for-in@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80"
+
+for-own@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/for-own/-/for-own-0.1.5.tgz#5265c681a4f294dabbf17c9509b6763aa84510ce"
+  dependencies:
+    for-in "^1.0.1"
+
+forever-agent@~0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91"
+
+form-data@~1.0.0-rc3:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-1.0.1.tgz#ae315db9a4907fa065502304a66d7733475ee37c"
+  dependencies:
+    async "^2.0.1"
+    combined-stream "^1.0.5"
+    mime-types "^2.1.11"
+
+form-data@~2.1.1:
+  version "2.1.4"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.1.4.tgz#33c183acf193276ecaa98143a69e94bfee1750d1"
+  dependencies:
+    asynckit "^0.4.0"
+    combined-stream "^1.0.5"
+    mime-types "^2.1.12"
+
+formidable@1.0.14:
+  version "1.0.14"
+  resolved "https://registry.yarnpkg.com/formidable/-/formidable-1.0.14.tgz#2b3f4c411cbb5fdd695c44843e2a23514a43231a"
+
+fresh@0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.2.0.tgz#bfd9402cf3df12c4a4c310c79f99a3dde13d34a7"
+
+fs-extra@~0.26.4:
+  version "0.26.7"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.26.7.tgz#9ae1fdd94897798edab76d0918cf42d0c3184fa9"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    klaw "^1.0.0"
+    path-is-absolute "^1.0.0"
+    rimraf "^2.2.8"
+
+fs.realpath@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f"
+
+fsevents@0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-0.3.0.tgz#90723a3d0bbab877b62d0a78db633ef2688d8a81"
+  dependencies:
+    nan "~1.2.0"
+
+fsevents@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-1.1.1.tgz#f19fd28f43eeaf761680e519a203c4d0b3d31aff"
+  dependencies:
+    nan "^2.3.0"
+    node-pre-gyp "^0.6.29"
+
+fstream-ignore@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/fstream-ignore/-/fstream-ignore-1.0.5.tgz#9c31dae34767018fe1d249b24dada67d092da105"
+  dependencies:
+    fstream "^1.0.0"
+    inherits "2"
+    minimatch "^3.0.0"
+
+fstream@^1.0.0, fstream@^1.0.10, fstream@^1.0.2:
+  version "1.0.11"
+  resolved "https://registry.yarnpkg.com/fstream/-/fstream-1.0.11.tgz#5c1fb1f117477114f0632a0eb4b71b3cb0fd3171"
+  dependencies:
+    graceful-fs "^4.1.2"
+    inherits "~2.0.0"
+    mkdirp ">=0.5 0"
+    rimraf "2"
+
+gauge@~2.7.1:
+  version "2.7.4"
+  resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.4.tgz#2c03405c7538c39d7eb37b317022e325fb018bf7"
+  dependencies:
+    aproba "^1.0.3"
+    console-control-strings "^1.0.0"
+    has-unicode "^2.0.0"
+    object-assign "^4.1.0"
+    signal-exit "^3.0.0"
+    string-width "^1.0.1"
+    strip-ansi "^3.0.1"
+    wide-align "^1.1.0"
+
+generate-function@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/generate-function/-/generate-function-2.0.0.tgz#6858fe7c0969b7d4e9093337647ac79f60dfbe74"
+
+generate-object-property@^1.1.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/generate-object-property/-/generate-object-property-1.2.0.tgz#9c0e1c40308ce804f4783618b937fa88f99d50d0"
+  dependencies:
+    is-property "^1.0.0"
+
+getpass@^0.1.1:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa"
+  dependencies:
+    assert-plus "^1.0.0"
+
+glob-base@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/glob-base/-/glob-base-0.3.0.tgz#dbb164f6221b1c0b1ccf82aea328b497df0ea3c4"
+  dependencies:
+    glob-parent "^2.0.0"
+    is-glob "^2.0.0"
+
+glob-parent@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-2.0.0.tgz#81383d72db054fcccf5336daa902f182f6edbb28"
+  dependencies:
+    is-glob "^2.0.0"
+
+glob@^7.0.5, glob@^7.1.1:
+  version "7.1.1"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.1.tgz#805211df04faaf1c63a3600306cdf5ade50b2ec8"
+  dependencies:
+    fs.realpath "^1.0.0"
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^3.0.2"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+graceful-fs@^4.1.2, graceful-fs@^4.1.6, graceful-fs@^4.1.9:
+  version "4.1.11"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.11.tgz#0e8bdfe4d1ddb8854d64e04ea7c00e2a026e5658"
+
+"graceful-readlink@>= 1.0.0":
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/graceful-readlink/-/graceful-readlink-1.0.1.tgz#4cafad76bc62f02fa039b2f94e9a3dd3a391a725"
+
+growl@~1.8.1:
+  version "1.8.1"
+  resolved "https://registry.yarnpkg.com/growl/-/growl-1.8.1.tgz#4b2dec8d907e93db336624dcec0183502f8c9428"
+
+handlebars@~1.0.9:
+  version "1.0.12"
+  resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-1.0.12.tgz#18c6d3440c35e91b19b3ff582b9151ab4985d4fc"
+  dependencies:
+    optimist "~0.3"
+    uglify-js "~2.3"
+
+har-schema@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-1.0.5.tgz#d263135f43307c02c602afc8fe95970c0151369e"
+
+har-validator@~2.0.2:
+  version "2.0.6"
+  resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-2.0.6.tgz#cdcbc08188265ad119b6a5a7c8ab70eecfb5d27d"
+  dependencies:
+    chalk "^1.1.1"
+    commander "^2.9.0"
+    is-my-json-valid "^2.12.4"
+    pinkie-promise "^2.0.0"
+
+har-validator@~4.2.1:
+  version "4.2.1"
+  resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-4.2.1.tgz#33481d0f1bbff600dd203d75812a6a5fba002e2a"
+  dependencies:
+    ajv "^4.9.1"
+    har-schema "^1.0.5"
+
+has-ansi@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91"
+  dependencies:
+    ansi-regex "^2.0.0"
+
+has-binary@0.1.7:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/has-binary/-/has-binary-0.1.7.tgz#68e61eb16210c9545a0a5cce06a873912fe1e68c"
+  dependencies:
+    isarray "0.0.1"
+
+has-cors@1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/has-cors/-/has-cors-1.1.0.tgz#5e474793f7ea9843d1bb99c23eef49ff126fff39"
+
+has-unicode@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9"
+
+hasha@^2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/hasha/-/hasha-2.2.0.tgz#78d7cbfc1e6d66303fe79837365984517b2f6ee1"
+  dependencies:
+    is-stream "^1.0.1"
+    pinkie-promise "^2.0.0"
+
+hawk@~3.1.0, hawk@~3.1.3:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/hawk/-/hawk-3.1.3.tgz#078444bd7c1640b0fe540d2c9b73d59678e8e1c4"
+  dependencies:
+    boom "2.x.x"
+    cryptiles "2.x.x"
+    hoek "2.x.x"
+    sntp "1.x.x"
+
+hoek@2.x.x:
+  version "2.16.3"
+  resolved "https://registry.yarnpkg.com/hoek/-/hoek-2.16.3.tgz#20bb7403d3cea398e91dc4710a8ff1b8274a25ed"
+
+html-encoding-sniffer@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/html-encoding-sniffer/-/html-encoding-sniffer-1.0.1.tgz#79bf7a785ea495fe66165e734153f363ff5437da"
+  dependencies:
+    whatwg-encoding "^1.0.1"
+
+http-errors@~1.6.1:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.6.1.tgz#5f8b8ed98aca545656bf572997387f904a722257"
+  dependencies:
+    depd "1.1.0"
+    inherits "2.0.3"
+    setprototypeof "1.0.3"
+    statuses ">= 1.3.1 < 2"
+
+http-proxy@^1.13.0:
+  version "1.16.2"
+  resolved "https://registry.yarnpkg.com/http-proxy/-/http-proxy-1.16.2.tgz#06dff292952bf64dbe8471fa9df73066d4f37742"
+  dependencies:
+    eventemitter3 "1.x.x"
+    requires-port "1.x.x"
+
+http-signature@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.1.1.tgz#df72e267066cd0ac67fb76adf8e134a8fbcf91bf"
+  dependencies:
+    assert-plus "^0.2.0"
+    jsprim "^1.2.2"
+    sshpk "^1.7.0"
+
+iconv-lite@0.4.13:
+  version "0.4.13"
+  resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.13.tgz#1f88aba4ab0b1508e8312acc39345f36e992e2f2"
+
+iconv-lite@0.4.15:
+  version "0.4.15"
+  resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.15.tgz#fe265a218ac6a57cfe854927e9d04c19825eddeb"
+
+indexof@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/indexof/-/indexof-0.0.1.tgz#82dc336d232b9062179d05ab3293a66059fd435d"
+
+inflection@~1.2.5:
+  version "1.2.7"
+  resolved "https://registry.yarnpkg.com/inflection/-/inflection-1.2.7.tgz#59db4505310a746677182ed46e155e003bfb3591"
+
+inflight@^1.0.4:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9"
+  dependencies:
+    once "^1.3.0"
+    wrappy "1"
+
+inherits@2, inherits@2.0.3, inherits@^2.0.1, inherits@~2.0.0, inherits@~2.0.1:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de"
+
+ini@~1.3.0:
+  version "1.3.4"
+  resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.4.tgz#0537cb79daf59b59a1a517dff706c86ec039162e"
+
+init-skeleton@~0.2.0:
+  version "0.2.4"
+  resolved "https://registry.yarnpkg.com/init-skeleton/-/init-skeleton-0.2.4.tgz#82655737a62d3b3b0153371c5847209132171863"
+  dependencies:
+    commander "~2.0.0"
+    loggy "~0.2.0"
+    mkdirp "~0.3.5"
+    ncp "~0.4.2"
+    rimraf "~2.2.1"
+
+is-binary-path@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/is-binary-path/-/is-binary-path-1.0.1.tgz#75f16642b480f187a711c814161fd3a4a7655898"
+  dependencies:
+    binary-extensions "^1.0.0"
+
+is-buffer@^1.1.5:
+  version "1.1.5"
+  resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.5.tgz#1f3b26ef613b214b88cbca23cc6c01d87961eecc"
+
+is-dotfile@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-dotfile/-/is-dotfile-1.0.2.tgz#2c132383f39199f8edc268ca01b9b007d205cc4d"
+
+is-equal-shallow@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/is-equal-shallow/-/is-equal-shallow-0.1.3.tgz#2238098fc221de0bcfa5d9eac4c45d638aa1c534"
+  dependencies:
+    is-primitive "^2.0.0"
+
+is-extendable@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89"
+
+is-extglob@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-1.0.0.tgz#ac468177c4943405a092fc8f29760c6ffc6206c0"
+
+is-fullwidth-code-point@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb"
+  dependencies:
+    number-is-nan "^1.0.0"
+
+is-glob@^2.0.0, is-glob@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-2.0.1.tgz#d096f926a3ded5600f3fdfd91198cb0888c2d863"
+  dependencies:
+    is-extglob "^1.0.0"
+
+is-my-json-valid@^2.12.4:
+  version "2.16.0"
+  resolved "https://registry.yarnpkg.com/is-my-json-valid/-/is-my-json-valid-2.16.0.tgz#f079dd9bfdae65ee2038aae8acbc86ab109e3693"
+  dependencies:
+    generate-function "^2.0.0"
+    generate-object-property "^1.1.0"
+    jsonpointer "^4.0.0"
+    xtend "^4.0.0"
+
+is-number@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/is-number/-/is-number-0.1.1.tgz#69a7af116963d47206ec9bd9b48a14216f1e3806"
+
+is-number@^2.0.2, is-number@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/is-number/-/is-number-2.1.0.tgz#01fcbbb393463a548f2f466cce16dece49db908f"
+  dependencies:
+    kind-of "^3.0.2"
+
+is-posix-bracket@^0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/is-posix-bracket/-/is-posix-bracket-0.1.1.tgz#3334dc79774368e92f016e6fbc0a88f5cd6e6bc4"
+
+is-primitive@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/is-primitive/-/is-primitive-2.0.0.tgz#207bab91638499c07b2adf240a41a87210034575"
+
+is-property@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-property/-/is-property-1.0.2.tgz#57fe1c4e48474edd65b09911f26b1cd4095dda84"
+
+is-stream@^1.0.1:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-1.1.0.tgz#12d4a3dd4e68e0b79ceb8dbc84173ae80d91ca44"
+
+is-typedarray@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a"
+
+isarray@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf"
+
+isarray@1.0.0, isarray@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11"
+
+isbinaryfile@^3.0.0:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/isbinaryfile/-/isbinaryfile-3.0.2.tgz#4a3e974ec0cba9004d3fc6cde7209ea69368a621"
+
+isexe@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10"
+
+isobject@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/isobject/-/isobject-2.1.0.tgz#f065561096a3f1da2ef46272f815c840d87e0c89"
+  dependencies:
+    isarray "1.0.0"
+
+isstream@~0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a"
+
+"javascript-brunch@>= 1.0 < 1.8":
+  version "1.7.1"
+  resolved "https://registry.yarnpkg.com/javascript-brunch/-/javascript-brunch-1.7.1.tgz#80810ccd8ce5a22c7525a46c0bb94b4bb9d8185d"
+  dependencies:
+    esprima "~1.0.4"
+
+jodid25519@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/jodid25519/-/jodid25519-1.0.2.tgz#06d4912255093419477d425633606e0e90782967"
+  dependencies:
+    jsbn "~0.1.0"
+
+jsbn@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513"
+
+jsdom@^9.0.0:
+  version "9.12.0"
+  resolved "https://registry.yarnpkg.com/jsdom/-/jsdom-9.12.0.tgz#e8c546fffcb06c00d4833ca84410fed7f8a097d4"
+  dependencies:
+    abab "^1.0.3"
+    acorn "^4.0.4"
+    acorn-globals "^3.1.0"
+    array-equal "^1.0.0"
+    content-type-parser "^1.0.1"
+    cssom ">= 0.3.2 < 0.4.0"
+    cssstyle ">= 0.2.37 < 0.3.0"
+    escodegen "^1.6.1"
+    html-encoding-sniffer "^1.0.1"
+    nwmatcher ">= 1.3.9 < 2.0.0"
+    parse5 "^1.5.1"
+    request "^2.79.0"
+    sax "^1.2.1"
+    symbol-tree "^3.2.1"
+    tough-cookie "^2.3.2"
+    webidl-conversions "^4.0.0"
+    whatwg-encoding "^1.0.1"
+    whatwg-url "^4.3.0"
+    xml-name-validator "^2.0.1"
+
+json-schema@0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.2.3.tgz#b480c892e59a2f05954ce727bd3f2a4e882f9e13"
+
+json-stable-stringify@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/json-stable-stringify/-/json-stable-stringify-1.0.1.tgz#9a759d39c5f2ff503fd5300646ed445f88c4f9af"
+  dependencies:
+    jsonify "~0.0.0"
+
+json-stringify-safe@~5.0.1:
+  version "5.0.1"
+  resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb"
+
+json3@3.3.2:
+  version "3.3.2"
+  resolved "https://registry.yarnpkg.com/json3/-/json3-3.3.2.tgz#3c0434743df93e2f5c42aee7b19bcb483575f4e1"
+
+jsonfile@^2.1.0:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-2.4.0.tgz#3736a2b428b87bbda0cc83b53fa3d633a35c2ae8"
+  optionalDependencies:
+    graceful-fs "^4.1.6"
+
+jsonify@~0.0.0:
+  version "0.0.0"
+  resolved "https://registry.yarnpkg.com/jsonify/-/jsonify-0.0.0.tgz#2c74b6ee41d93ca51b7b5aaee8f503631d252a73"
+
+jsonpointer@^4.0.0:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/jsonpointer/-/jsonpointer-4.0.1.tgz#4fd92cb34e0e9db3c89c8622ecf51f9b978c6cb9"
+
+jsprim@^1.2.2:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.0.tgz#a3b87e40298d8c380552d8cc7628a0bb95a22918"
+  dependencies:
+    assert-plus "1.0.0"
+    extsprintf "1.0.2"
+    json-schema "0.2.3"
+    verror "1.3.6"
+
+karma-phantomjs-launcher@~0.1.2:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/karma-phantomjs-launcher/-/karma-phantomjs-launcher-0.1.4.tgz#4ef96e4322ff63ae5d918e51c25b213723238f30"
+  dependencies:
+    phantomjs "~1.9"
+
+karma-qunit@*:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/karma-qunit/-/karma-qunit-1.2.1.tgz#88252afd2127bc03b0cc31978ed6882b139f470a"
+
+karma@*:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/karma/-/karma-1.6.0.tgz#0e871d4527d5eac56c41d181f03c5c0a7e6dbf3e"
+  dependencies:
+    bluebird "^3.3.0"
+    body-parser "^1.16.1"
+    chokidar "^1.4.1"
+    colors "^1.1.0"
+    combine-lists "^1.0.0"
+    connect "^3.6.0"
+    core-js "^2.2.0"
+    di "^0.0.1"
+    dom-serialize "^2.2.0"
+    expand-braces "^0.1.1"
+    glob "^7.1.1"
+    graceful-fs "^4.1.2"
+    http-proxy "^1.13.0"
+    isbinaryfile "^3.0.0"
+    lodash "^3.8.0"
+    log4js "^0.6.31"
+    mime "^1.3.4"
+    minimatch "^3.0.2"
+    optimist "^0.6.1"
+    qjobs "^1.1.4"
+    range-parser "^1.2.0"
+    rimraf "^2.6.0"
+    safe-buffer "^5.0.1"
+    socket.io "1.7.3"
+    source-map "^0.5.3"
+    tmp "0.0.31"
+    useragent "^2.1.12"
+
+kew@~0.7.0:
+  version "0.7.0"
+  resolved "https://registry.yarnpkg.com/kew/-/kew-0.7.0.tgz#79d93d2d33363d6fdd2970b335d9141ad591d79b"
+
+keypress@0.1.x:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/keypress/-/keypress-0.1.0.tgz#4a3188d4291b66b4f65edb99f806aa9ae293592a"
+
+kind-of@^3.0.2:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.2.0.tgz#b58abe4d5c044ad33726a8c1525b48cf891bff07"
+  dependencies:
+    is-buffer "^1.1.5"
+
+klaw@^1.0.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/klaw/-/klaw-1.3.1.tgz#4088433b46b3b1ba259d78785d8e96f73ba02439"
+  optionalDependencies:
+    graceful-fs "^4.1.9"
+
+less-brunch@~1.7.1:
+  version "1.7.2"
+  resolved "https://registry.yarnpkg.com/less-brunch/-/less-brunch-1.7.2.tgz#6a2fdd7cb16d093f8ddcaaff1ace91fb96b328ee"
+  dependencies:
+    less "~1.6.3"
+    progeny "~0.1.1"
+
+less@~1.6.3:
+  version "1.6.3"
+  resolved "https://registry.yarnpkg.com/less/-/less-1.6.3.tgz#71ce89ec30b774b3567f254c67958f2f2c193bde"
+  optionalDependencies:
+    clean-css "2.0.x"
+    mime "1.2.x"
+    mkdirp "~0.3.5"
+    request ">=2.12.0"
+    source-map "0.1.x"
+
+levn@~0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/levn/-/levn-0.3.0.tgz#3b09924edf9f083c0490fdd4c0bc4421e04764ee"
+  dependencies:
+    prelude-ls "~1.1.2"
+    type-check "~0.3.2"
+
+lodash@^3.8.0:
+  version "3.10.1"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-3.10.1.tgz#5bf45e8e49ba4189e17d482789dfd15bd140b7b6"
+
+lodash@^4.14.0, lodash@^4.5.0:
+  version "4.17.4"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.4.tgz#78203a4d1c328ae1d86dca6460e369b57f4055ae"
+
+log4js@^0.6.31:
+  version "0.6.38"
+  resolved "https://registry.yarnpkg.com/log4js/-/log4js-0.6.38.tgz#2c494116695d6fb25480943d3fc872e662a522fd"
+  dependencies:
+    readable-stream "~1.0.2"
+    semver "~4.3.3"
+
+loggy@~0.2.0:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/loggy/-/loggy-0.2.2.tgz#7edc85706a82d761ce9cef868f1afbad84165427"
+  dependencies:
+    ansi-color "~0.2.1"
+    date-utils "~1.2.17"
+    growl "~1.8.1"
+
+lru-cache@2, lru-cache@2.2.x:
+  version "2.2.4"
+  resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-2.2.4.tgz#6c658619becf14031d0d0b594b16042ce4dc063d"
+
+media-typer@0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748"
+
+methods@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/methods/-/methods-0.0.1.tgz#277c90f8bef39709645a8371c51c3b6c648e068c"
+
+micromatch@^2.1.5:
+  version "2.3.11"
+  resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-2.3.11.tgz#86677c97d1720b363431d04d0d15293bd38c1565"
+  dependencies:
+    arr-diff "^2.0.0"
+    array-unique "^0.2.1"
+    braces "^1.8.2"
+    expand-brackets "^0.1.4"
+    extglob "^0.3.1"
+    filename-regex "^2.0.0"
+    is-extglob "^1.0.0"
+    is-glob "^2.0.1"
+    kind-of "^3.0.2"
+    normalize-path "^2.0.1"
+    object.omit "^2.0.0"
+    parse-glob "^3.0.4"
+    regex-cache "^0.4.2"
+
+mime-db@~1.27.0:
+  version "1.27.0"
+  resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.27.0.tgz#820f572296bbd20ec25ed55e5b5de869e5436eb1"
+
+mime-types@^2.1.11, mime-types@^2.1.12, mime-types@~2.1.11, mime-types@~2.1.15, mime-types@~2.1.7:
+  version "2.1.15"
+  resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.15.tgz#a4ebf5064094569237b8cf70046776d09fc92aed"
+  dependencies:
+    mime-db "~1.27.0"
+
+mime@1.2.x, mime@~1.2.9:
+  version "1.2.11"
+  resolved "https://registry.yarnpkg.com/mime/-/mime-1.2.11.tgz#58203eed86e3a5ef17aed2b7d9ebd47f0a60dd10"
+
+mime@^1.3.4:
+  version "1.3.4"
+  resolved "https://registry.yarnpkg.com/mime/-/mime-1.3.4.tgz#115f9e3b6b3daf2959983cb38f149a2d40eb5d53"
+
+minimatch@^3.0.0, minimatch@^3.0.2:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.3.tgz#2a4e4090b96b2db06a9d7df01055a62a77c9b774"
+  dependencies:
+    brace-expansion "^1.0.0"
+
+minimatch@~0.2.12:
+  version "0.2.14"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-0.2.14.tgz#c74e780574f63c6f9a090e90efbe6ef53a6a756a"
+  dependencies:
+    lru-cache "2"
+    sigmund "~1.0.0"
+
+minimist@0.0.8, minimist@~0.0.1:
+  version "0.0.8"
+  resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d"
+
+minimist@^1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.0.tgz#a35008b20f41383eec1fb914f4cd5df79a264284"
+
+mkdirp@0.3.4:
+  version "0.3.4"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.3.4.tgz#f8c81d213b7299a031f193a57d752a17d2f6c7d8"
+
+mkdirp@0.3.5, mkdirp@~0.3.5:
+  version "0.3.5"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.3.5.tgz#de3e5f8961c88c787ee1368df849ac4413eca8d7"
+
+mkdirp@0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.0.tgz#1d73076a6df986cd9344e15e71fcc05a4c9abf12"
+  dependencies:
+    minimist "0.0.8"
+
+"mkdirp@>=0.5 0", mkdirp@^0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903"
+  dependencies:
+    minimist "0.0.8"
+
+ms@0.7.1:
+  version "0.7.1"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.1.tgz#9cd13c03adbff25b65effde7ce864ee952017098"
+
+ms@0.7.2:
+  version "0.7.2"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.2.tgz#ae25cf2512b3885a1d95d7f037868d8431124765"
+
+nan@^2.3.0:
+  version "2.6.2"
+  resolved "https://registry.yarnpkg.com/nan/-/nan-2.6.2.tgz#e4ff34e6c95fdfb5aecc08de6596f43605a7db45"
+
+nan@~1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/nan/-/nan-1.2.0.tgz#9c4d63ce9e4f8e95de2d574e18f7925554a8a8ef"
+
+ncp@~0.4.2:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/ncp/-/ncp-0.4.2.tgz#abcc6cbd3ec2ed2a729ff6e7c1fa8f01784a8574"
+
+negotiator@0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.1.tgz#2b327184e8992101177b28563fb5e7102acd0ca9"
+
+node-pre-gyp@^0.6.29:
+  version "0.6.34"
+  resolved "https://registry.yarnpkg.com/node-pre-gyp/-/node-pre-gyp-0.6.34.tgz#94ad1c798a11d7fc67381b50d47f8cc18d9799f7"
+  dependencies:
+    mkdirp "^0.5.1"
+    nopt "^4.0.1"
+    npmlog "^4.0.2"
+    rc "^1.1.7"
+    request "^2.81.0"
+    rimraf "^2.6.1"
+    semver "^5.3.0"
+    tar "^2.2.1"
+    tar-pack "^3.4.0"
+
+node-uuid@~1.4.7:
+  version "1.4.8"
+  resolved "https://registry.yarnpkg.com/node-uuid/-/node-uuid-1.4.8.tgz#b040eb0923968afabf8d32fb1f17f1167fdab907"
+
+nopt@^4.0.1:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/nopt/-/nopt-4.0.1.tgz#d0d4685afd5415193c8c7505602d0d17cd64474d"
+  dependencies:
+    abbrev "1"
+    osenv "^0.1.4"
+
+normalize-path@^2.0.1:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-2.1.1.tgz#1ab28b556e198363a8c1a6f7e6fa20137fe6aed9"
+  dependencies:
+    remove-trailing-separator "^1.0.1"
+
+npmlog@^4.0.2:
+  version "4.0.2"
+  resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-4.0.2.tgz#d03950e0e78ce1527ba26d2a7592e9348ac3e75f"
+  dependencies:
+    are-we-there-yet "~1.1.2"
+    console-control-strings "~1.1.0"
+    gauge "~2.7.1"
+    set-blocking "~2.0.0"
+
+number-is-nan@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/number-is-nan/-/number-is-nan-1.0.1.tgz#097b602b53422a522c1afb8790318336941a011d"
+
+"nwmatcher@>= 1.3.9 < 2.0.0":
+  version "1.3.9"
+  resolved "https://registry.yarnpkg.com/nwmatcher/-/nwmatcher-1.3.9.tgz#8bab486ff7fa3dfd086656bbe8b17116d3692d2a"
+
+oauth-sign@~0.8.0, oauth-sign@~0.8.1:
+  version "0.8.2"
+  resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.8.2.tgz#46a6ab7f0aead8deae9ec0565780b7d4efeb9d43"
+
+object-assign@4.1.0, object-assign@^4.1.0:
+  version "4.1.0"
+  resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.0.tgz#7a3b3d0e98063d43f4c03f2e8ae6cd51a86883a0"
+
+object-component@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/object-component/-/object-component-0.0.3.tgz#f0c69aa50efc95b866c186f400a33769cb2f1291"
+
+object.omit@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/object.omit/-/object.omit-2.0.1.tgz#1a9c744829f39dbb858c76ca3579ae2a54ebd1fa"
+  dependencies:
+    for-own "^0.1.4"
+    is-extendable "^0.1.1"
+
+on-finished@~2.3.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.3.0.tgz#20f1336481b083cd75337992a16971aa2d906947"
+  dependencies:
+    ee-first "1.1.1"
+
+once@^1.3.0, once@^1.3.3:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1"
+  dependencies:
+    wrappy "1"
+
+optimist@^0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.6.1.tgz#da3ea74686fa21a19a111c326e90eb15a0196686"
+  dependencies:
+    minimist "~0.0.1"
+    wordwrap "~0.0.2"
+
+optimist@~0.3, optimist@~0.3.5:
+  version "0.3.7"
+  resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.3.7.tgz#c90941ad59e4273328923074d2cf2e7cbc6ec0d9"
+  dependencies:
+    wordwrap "~0.0.2"
+
+optionator@^0.8.1:
+  version "0.8.2"
+  resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.8.2.tgz#364c5e409d3f4d6301d6c0b4c05bba50180aeb64"
+  dependencies:
+    deep-is "~0.1.3"
+    fast-levenshtein "~2.0.4"
+    levn "~0.3.0"
+    prelude-ls "~1.1.2"
+    type-check "~0.3.2"
+    wordwrap "~1.0.0"
+
+options@>=0.0.5:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/options/-/options-0.0.6.tgz#ec22d312806bb53e731773e7cdaefcf1c643128f"
+
+os-homedir@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3"
+
+os-tmpdir@^1.0.0, os-tmpdir@~1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274"
+
+osenv@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.1.4.tgz#42fe6d5953df06c8064be6f176c3d05aaaa34644"
+  dependencies:
+    os-homedir "^1.0.0"
+    os-tmpdir "^1.0.0"
+
+parse-glob@^3.0.4:
+  version "3.0.4"
+  resolved "https://registry.yarnpkg.com/parse-glob/-/parse-glob-3.0.4.tgz#b2c376cfb11f35513badd173ef0bb6e3a388391c"
+  dependencies:
+    glob-base "^0.3.0"
+    is-dotfile "^1.0.0"
+    is-extglob "^1.0.0"
+    is-glob "^2.0.0"
+
+parse5@^1.5.1:
+  version "1.5.1"
+  resolved "https://registry.yarnpkg.com/parse5/-/parse5-1.5.1.tgz#9b7f3b0de32be78dc2401b17573ccaf0f6f59d94"
+
+parsejson@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/parsejson/-/parsejson-0.0.3.tgz#ab7e3759f209ece99437973f7d0f1f64ae0e64ab"
+  dependencies:
+    better-assert "~1.0.0"
+
+parseqs@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/parseqs/-/parseqs-0.0.5.tgz#d5208a3738e46766e291ba2ea173684921a8b89d"
+  dependencies:
+    better-assert "~1.0.0"
+
+parseuri@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/parseuri/-/parseuri-0.0.5.tgz#80204a50d4dbb779bfdc6ebe2778d90e4bce320a"
+  dependencies:
+    better-assert "~1.0.0"
+
+parseurl@~1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.1.tgz#c8ab8c9223ba34888aa64a297b28853bec18da56"
+
+path-is-absolute@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f"
+
+pause@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/pause/-/pause-0.0.1.tgz#1d408b3fdb76923b9543d96fb4c9dfd535d9cb5d"
+
+pend@~1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/pend/-/pend-1.2.0.tgz#7a57eb550a6783f9115331fcf4663d5c8e007a50"
+
+performance-now@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-0.2.0.tgz#33ef30c5c77d4ea21c5a53869d91b56d8f2555e5"
+
+phantomjs@^1.9.2, phantomjs@~1.9:
+  version "1.9.20"
+  resolved "https://registry.yarnpkg.com/phantomjs/-/phantomjs-1.9.20.tgz#4424aca20e14d255c0b0889af6f6b8973da10e0d"
+  dependencies:
+    extract-zip "~1.5.0"
+    fs-extra "~0.26.4"
+    hasha "^2.2.0"
+    kew "~0.7.0"
+    progress "~1.1.8"
+    request "~2.67.0"
+    request-progress "~2.0.1"
+    which "~1.2.2"
+
+pinkie-promise@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa"
+  dependencies:
+    pinkie "^2.0.0"
+
+pinkie@^2.0.0:
+  version "2.0.4"
+  resolved "https://registry.yarnpkg.com/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870"
+
+prelude-ls@~1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.1.2.tgz#21932a549f5e52ffd9a827f570e04be62a97da54"
+
+preserve@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/preserve/-/preserve-0.2.0.tgz#815ed1f6ebc65926f865b310c0713bcb3315ce4b"
+
+process-nextick-args@~1.0.6:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-1.0.7.tgz#150e20b756590ad3f91093f25a4f2ad8bff30ba3"
+
+progeny@~0.1.1:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/progeny/-/progeny-0.1.3.tgz#b4681d341c2aedf62b1bfd32be375fa773daa090"
+  dependencies:
+    async-each "~0.1.3"
+
+progress@~1.1.8:
+  version "1.1.8"
+  resolved "https://registry.yarnpkg.com/progress/-/progress-1.1.8.tgz#e260c78f6161cdd9b0e56cc3e0a85de17c7a57be"
+
+punycode@^1.4.1:
+  version "1.4.1"
+  resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e"
+
+pushserve@~0.1.6:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/pushserve/-/pushserve-0.1.6.tgz#a07b173fc2488b71d9af4a5b37411bb3d91d6e27"
+  dependencies:
+    commander "~2.0.0"
+    connect-slashes "~0.0.9"
+    express "~3.3.0"
+
+qjobs@^1.1.4:
+  version "1.1.5"
+  resolved "https://registry.yarnpkg.com/qjobs/-/qjobs-1.1.5.tgz#659de9f2cf8dcc27a1481276f205377272382e73"
+
+qs@0.6.5:
+  version "0.6.5"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-0.6.5.tgz#294b268e4b0d4250f6dde19b3b8b34935dff14ef"
+
+qs@6.4.0, qs@~6.4.0:
+  version "6.4.0"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-6.4.0.tgz#13e26d28ad6b0ffaa91312cd3bf708ed351e7233"
+
+qs@~5.2.0:
+  version "5.2.1"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-5.2.1.tgz#801fee030e0b9450d6385adc48a4cc55b44aedfc"
+
+randomatic@^1.1.3:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/randomatic/-/randomatic-1.1.6.tgz#110dcabff397e9dcff7c0789ccc0a49adf1ec5bb"
+  dependencies:
+    is-number "^2.0.2"
+    kind-of "^3.0.2"
+
+range-parser@0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-0.0.4.tgz#c0427ffef51c10acba0782a46c9602e744ff620b"
+
+range-parser@^1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.0.tgz#f49be6b487894ddc40dcc94a322f611092e00d5e"
+
+raw-body@~2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.2.0.tgz#994976cf6a5096a41162840492f0bdc5d6e7fb96"
+  dependencies:
+    bytes "2.4.0"
+    iconv-lite "0.4.15"
+    unpipe "1.0.0"
+
+rc@^1.1.7:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/rc/-/rc-1.2.1.tgz#2e03e8e42ee450b8cb3dce65be1bf8974e1dfd95"
+  dependencies:
+    deep-extend "~0.4.0"
+    ini "~1.3.0"
+    minimist "^1.2.0"
+    strip-json-comments "~2.0.1"
+
+read-components@~0.6.0:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/read-components/-/read-components-0.6.1.tgz#45752f1c7c7e450742f4085fe6e24fccc5c75720"
+  dependencies:
+    async-each "~0.1.3"
+    component-builder "~0.10.0"
+
+readable-stream@^2.0.2, readable-stream@~2.0.0, readable-stream@~2.0.5:
+  version "2.0.6"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.0.6.tgz#8f90341e68a53ccc928788dacfcd11b36eb9b78e"
+  dependencies:
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "~1.0.0"
+    process-nextick-args "~1.0.6"
+    string_decoder "~0.10.x"
+    util-deprecate "~1.0.1"
+
+readable-stream@^2.0.6, readable-stream@^2.1.4:
+  version "2.2.9"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.2.9.tgz#cf78ec6f4a6d1eb43d26488cac97f042e74b7fc8"
+  dependencies:
+    buffer-shims "~1.0.0"
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "~1.0.0"
+    process-nextick-args "~1.0.6"
+    string_decoder "~1.0.0"
+    util-deprecate "~1.0.1"
+
+readable-stream@~1.0.2:
+  version "1.0.34"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.0.34.tgz#125820e34bc842d2f2aaafafe4c2916ee32c157c"
+  dependencies:
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "0.0.1"
+    string_decoder "~0.10.x"
+
+readdirp@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-2.1.0.tgz#4ed0ad060df3073300c48440373f72d1cc642d78"
+  dependencies:
+    graceful-fs "^4.1.2"
+    minimatch "^3.0.2"
+    readable-stream "^2.0.2"
+    set-immediate-shim "^1.0.1"
+
+recursive-readdir@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/recursive-readdir/-/recursive-readdir-0.0.2.tgz#0bc47dc4838e646dccfba0507b5e57ffbff35f7c"
+
+regex-cache@^0.4.2:
+  version "0.4.3"
+  resolved "https://registry.yarnpkg.com/regex-cache/-/regex-cache-0.4.3.tgz#9b1a6c35d4d0dfcef5711ae651e8e9d3d7114145"
+  dependencies:
+    is-equal-shallow "^0.1.3"
+    is-primitive "^2.0.0"
+
+remove-trailing-separator@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/remove-trailing-separator/-/remove-trailing-separator-1.0.1.tgz#615ebb96af559552d4bf4057c8436d486ab63cc4"
+
+repeat-element@^1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/repeat-element/-/repeat-element-1.1.2.tgz#ef089a178d1483baae4d93eb98b4f9e4e11d990a"
+
+repeat-string@^0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-0.2.2.tgz#c7a8d3236068362059a7e4651fc6884e8b1fb4ae"
+
+repeat-string@^1.5.2:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637"
+
+request-progress@~2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/request-progress/-/request-progress-2.0.1.tgz#5d36bb57961c673aa5b788dbc8141fdf23b44e08"
+  dependencies:
+    throttleit "^1.0.0"
+
+request@>=2.12.0, request@^2.79.0, request@^2.81.0:
+  version "2.81.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.81.0.tgz#c6928946a0e06c5f8d6f8a9333469ffda46298a0"
+  dependencies:
+    aws-sign2 "~0.6.0"
+    aws4 "^1.2.1"
+    caseless "~0.12.0"
+    combined-stream "~1.0.5"
+    extend "~3.0.0"
+    forever-agent "~0.6.1"
+    form-data "~2.1.1"
+    har-validator "~4.2.1"
+    hawk "~3.1.3"
+    http-signature "~1.1.0"
+    is-typedarray "~1.0.0"
+    isstream "~0.1.2"
+    json-stringify-safe "~5.0.1"
+    mime-types "~2.1.7"
+    oauth-sign "~0.8.1"
+    performance-now "^0.2.0"
+    qs "~6.4.0"
+    safe-buffer "^5.0.1"
+    stringstream "~0.0.4"
+    tough-cookie "~2.3.0"
+    tunnel-agent "^0.6.0"
+    uuid "^3.0.0"
+
+request@~2.67.0:
+  version "2.67.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.67.0.tgz#8af74780e2bf11ea0ae9aa965c11f11afd272742"
+  dependencies:
+    aws-sign2 "~0.6.0"
+    bl "~1.0.0"
+    caseless "~0.11.0"
+    combined-stream "~1.0.5"
+    extend "~3.0.0"
+    forever-agent "~0.6.1"
+    form-data "~1.0.0-rc3"
+    har-validator "~2.0.2"
+    hawk "~3.1.0"
+    http-signature "~1.1.0"
+    is-typedarray "~1.0.0"
+    isstream "~0.1.2"
+    json-stringify-safe "~5.0.1"
+    mime-types "~2.1.7"
+    node-uuid "~1.4.7"
+    oauth-sign "~0.8.0"
+    qs "~5.2.0"
+    stringstream "~0.0.4"
+    tough-cookie "~2.2.0"
+    tunnel-agent "~0.4.1"
+
+requires-port@1.x.x:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/requires-port/-/requires-port-1.0.0.tgz#925d2601d39ac485e091cf0da5c6e694dc3dcaff"
+
+rimraf@2, rimraf@^2.2.8, rimraf@^2.5.1, rimraf@^2.6.0, rimraf@^2.6.1:
+  version "2.6.1"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.1.tgz#c2338ec643df7a1b7fe5c54fa86f57428a55f33d"
+  dependencies:
+    glob "^7.0.5"
+
+rimraf@~2.2.1:
+  version "2.2.8"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.2.8.tgz#e439be2aaee327321952730f99a8929e4fc50582"
+
+safe-buffer@^5.0.1:
+  version "5.0.1"
+  resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.0.1.tgz#d263ca54696cd8a306b5ca6551e92de57918fbe7"
+
+sax@^1.2.1:
+  version "1.2.2"
+  resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.2.tgz#fd8631a23bc7826bef5d871bdb87378c95647828"
+
+scaffolt@^0.4.3:
+  version "0.4.3"
+  resolved "https://registry.yarnpkg.com/scaffolt/-/scaffolt-0.4.3.tgz#eafa6eb31b733b79435cd7e24053de49cd2e145e"
+  dependencies:
+    async-each "~0.1.1"
+    commander "~1.2.0"
+    handlebars "~1.0.9"
+    inflection "~1.2.5"
+    loggy "~0.2.0"
+    mkdirp "~0.3.5"
+
+semver@^5.3.0:
+  version "5.3.0"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-5.3.0.tgz#9b2ce5d3de02d17c6012ad326aa6b4d0cf54f94f"
+
+semver@~4.3.3:
+  version "4.3.6"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-4.3.6.tgz#300bc6e0e86374f7ba61068b5b1ecd57fc6532da"
+
+send@0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/send/-/send-0.1.4.tgz#be70d8d1be01de61821af13780b50345a4f71abd"
+  dependencies:
+    debug "*"
+    fresh "0.2.0"
+    mime "~1.2.9"
+    range-parser "0.0.4"
+
+set-blocking@~2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7"
+
+set-immediate-shim@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/set-immediate-shim/-/set-immediate-shim-1.0.1.tgz#4b2b1b27eb808a9f8dcc481a58e5e56f599f3f61"
+
+setprototypeof@1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.0.3.tgz#66567e37043eeb4f04d91bd658c0cbefb55b8e04"
+
+sigmund@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/sigmund/-/sigmund-1.0.1.tgz#3ff21f198cad2175f9f3b781853fd94d0d19b590"
+
+signal-exit@^3.0.0:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.2.tgz#b5fdc08f1287ea1178628e415e25132b73646c6d"
+
+sntp@1.x.x:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/sntp/-/sntp-1.0.9.tgz#6541184cc90aeea6c6e7b35e2659082443c66198"
+  dependencies:
+    hoek "2.x.x"
+
+socket.io-adapter@0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/socket.io-adapter/-/socket.io-adapter-0.5.0.tgz#cb6d4bb8bec81e1078b99677f9ced0046066bb8b"
+  dependencies:
+    debug "2.3.3"
+    socket.io-parser "2.3.1"
+
+socket.io-client@1.7.3:
+  version "1.7.3"
+  resolved "https://registry.yarnpkg.com/socket.io-client/-/socket.io-client-1.7.3.tgz#b30e86aa10d5ef3546601c09cde4765e381da377"
+  dependencies:
+    backo2 "1.0.2"
+    component-bind "1.0.0"
+    component-emitter "1.2.1"
+    debug "2.3.3"
+    engine.io-client "1.8.3"
+    has-binary "0.1.7"
+    indexof "0.0.1"
+    object-component "0.0.3"
+    parseuri "0.0.5"
+    socket.io-parser "2.3.1"
+    to-array "0.1.4"
+
+socket.io-parser@2.3.1:
+  version "2.3.1"
+  resolved "https://registry.yarnpkg.com/socket.io-parser/-/socket.io-parser-2.3.1.tgz#dd532025103ce429697326befd64005fcfe5b4a0"
+  dependencies:
+    component-emitter "1.1.2"
+    debug "2.2.0"
+    isarray "0.0.1"
+    json3 "3.3.2"
+
+socket.io@1.7.3:
+  version "1.7.3"
+  resolved "https://registry.yarnpkg.com/socket.io/-/socket.io-1.7.3.tgz#b8af9caba00949e568e369f1327ea9be9ea2461b"
+  dependencies:
+    debug "2.3.3"
+    engine.io "1.8.3"
+    has-binary "0.1.7"
+    object-assign "4.1.0"
+    socket.io-adapter "0.5.0"
+    socket.io-client "1.7.3"
+    socket.io-parser "2.3.1"
+
+source-map@0.1.34:
+  version "0.1.34"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.34.tgz#a7cfe89aec7b1682c3b198d0acfb47d7d090566b"
+  dependencies:
+    amdefine ">=0.0.4"
+
+source-map@0.1.x, source-map@~0.1.35, source-map@~0.1.7:
+  version "0.1.43"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.43.tgz#c24bc146ca517c1471f5dacbe2571b2b7f9e3346"
+  dependencies:
+    amdefine ">=0.0.4"
+
+source-map@^0.5.3:
+  version "0.5.6"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.6.tgz#75ce38f52bf0733c5a7f0c118d81334a2bb5f412"
+
+source-map@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.2.0.tgz#dab73fbcfc2ba819b4de03bd6f6eaa48164b3f9d"
+  dependencies:
+    amdefine ">=0.0.4"
+
+sshpk@^1.7.0:
+  version "1.13.0"
+  resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.13.0.tgz#ff2a3e4fd04497555fed97b39a0fd82fafb3a33c"
+  dependencies:
+    asn1 "~0.2.3"
+    assert-plus "^1.0.0"
+    dashdash "^1.12.0"
+    getpass "^0.1.1"
+  optionalDependencies:
+    bcrypt-pbkdf "^1.0.0"
+    ecc-jsbn "~0.1.1"
+    jodid25519 "^1.0.0"
+    jsbn "~0.1.0"
+    tweetnacl "~0.14.0"
+
+"statuses@>= 1.3.1 < 2", statuses@~1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.3.1.tgz#faf51b9eb74aaef3b3acf4ad5f61abf24cb7b93e"
+
+string-to-js@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/string-to-js/-/string-to-js-0.0.1.tgz#bf153c760636faa30769b804a0195552ba7ad80f"
+
+string-width@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3"
+  dependencies:
+    code-point-at "^1.0.0"
+    is-fullwidth-code-point "^1.0.0"
+    strip-ansi "^3.0.0"
+
+string_decoder@~0.10.x:
+  version "0.10.31"
+  resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-0.10.31.tgz#62e203bc41766c6c28c9fc84301dab1c5310fa94"
+
+string_decoder@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.0.0.tgz#f06f41157b664d86069f84bdbdc9b0d8ab281667"
+  dependencies:
+    buffer-shims "~1.0.0"
+
+stringstream@~0.0.4:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/stringstream/-/stringstream-0.0.5.tgz#4e484cd4de5a0bbbee18e46307710a8a81621878"
+
+strip-ansi@^3.0.0, strip-ansi@^3.0.1:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf"
+  dependencies:
+    ansi-regex "^2.0.0"
+
+strip-json-comments@~2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a"
+
+supports-color@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7"
+
+symbol-tree@^3.2.1:
+  version "3.2.2"
+  resolved "https://registry.yarnpkg.com/symbol-tree/-/symbol-tree-3.2.2.tgz#ae27db38f660a7ae2e1c3b7d1bc290819b8519e6"
+
+tar-pack@^3.4.0:
+  version "3.4.0"
+  resolved "https://registry.yarnpkg.com/tar-pack/-/tar-pack-3.4.0.tgz#23be2d7f671a8339376cbdb0b8fe3fdebf317984"
+  dependencies:
+    debug "^2.2.0"
+    fstream "^1.0.10"
+    fstream-ignore "^1.0.5"
+    once "^1.3.3"
+    readable-stream "^2.1.4"
+    rimraf "^2.5.1"
+    tar "^2.2.1"
+    uid-number "^0.0.6"
+
+tar@^2.2.1:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/tar/-/tar-2.2.1.tgz#8e4d2a256c0e2185c6b18ad694aec968b83cb1d1"
+  dependencies:
+    block-stream "*"
+    fstream "^1.0.2"
+    inherits "2"
+
+throttleit@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/throttleit/-/throttleit-1.0.0.tgz#9e785836daf46743145a5984b6268d828528ac6c"
+
+tmp@0.0.31, tmp@0.0.x:
+  version "0.0.31"
+  resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.31.tgz#8f38ab9438e17315e5dbd8b3657e8bfb277ae4a7"
+  dependencies:
+    os-tmpdir "~1.0.1"
+
+to-array@0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/to-array/-/to-array-0.1.4.tgz#17e6c11f73dd4f3d74cda7a4ff3238e9ad9bf890"
+
+tough-cookie@^2.3.2, tough-cookie@~2.3.0:
+  version "2.3.2"
+  resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.3.2.tgz#f081f76e4c85720e6c37a5faced737150d84072a"
+  dependencies:
+    punycode "^1.4.1"
+
+tough-cookie@~2.2.0:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.2.2.tgz#c83a1830f4e5ef0b93ef2a3488e724f8de016ac7"
+
+tr46@~0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/tr46/-/tr46-0.0.3.tgz#8184fd347dac9cdc185992f3a6622e14b9d9ab6a"
+
+tunnel-agent@^0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd"
+  dependencies:
+    safe-buffer "^5.0.1"
+
+tunnel-agent@~0.4.1:
+  version "0.4.3"
+  resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.4.3.tgz#6373db76909fe570e08d73583365ed828a74eeeb"
+
+tweetnacl@^0.14.3, tweetnacl@~0.14.0:
+  version "0.14.5"
+  resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64"
+
+type-check@~0.3.2:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.3.2.tgz#5884cab512cf1d355e3fb784f30804b2b520db72"
+  dependencies:
+    prelude-ls "~1.1.2"
+
+type-is@~1.6.14:
+  version "1.6.15"
+  resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.15.tgz#cab10fb4909e441c82842eafe1ad646c81804410"
+  dependencies:
+    media-typer "0.3.0"
+    mime-types "~2.1.15"
+
+typedarray@~0.0.5:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777"
+
+"uglify-js-brunch@>= 1.0 < 1.8":
+  version "1.7.8"
+  resolved "https://registry.yarnpkg.com/uglify-js-brunch/-/uglify-js-brunch-1.7.8.tgz#b36dffbcd19cfea27248d34797effef1ae9d3a62"
+  dependencies:
+    uglify-js "~2.4.7"
+
+uglify-js@~2.3:
+  version "2.3.6"
+  resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-2.3.6.tgz#fa0984770b428b7a9b2a8058f46355d14fef211a"
+  dependencies:
+    async "~0.2.6"
+    optimist "~0.3.5"
+    source-map "~0.1.7"
+
+uglify-js@~2.4.7:
+  version "2.4.24"
+  resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-2.4.24.tgz#fad5755c1e1577658bb06ff9ab6e548c95bebd6e"
+  dependencies:
+    async "~0.2.6"
+    source-map "0.1.34"
+    uglify-to-browserify "~1.0.0"
+    yargs "~3.5.4"
+
+uglify-to-browserify@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/uglify-to-browserify/-/uglify-to-browserify-1.0.2.tgz#6e0924d6bda6b5afe349e39a6d632850a0f882b7"
+
+uid-number@^0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/uid-number/-/uid-number-0.0.6.tgz#0ea10e8035e8eb5b8e4449f06da1c730663baa81"
+
+uid2@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/uid2/-/uid2-0.0.2.tgz#107fb155c82c1136620797ed4c88cf2b08f6aab8"
+
+ultron@1.0.x:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/ultron/-/ultron-1.0.2.tgz#ace116ab557cd197386a4e88f4685378c8b2e4fa"
+
+unpipe@1.0.0, unpipe@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec"
+
+useragent@^2.1.12:
+  version "2.1.13"
+  resolved "https://registry.yarnpkg.com/useragent/-/useragent-2.1.13.tgz#bba43e8aa24d5ceb83c2937473e102e21df74c10"
+  dependencies:
+    lru-cache "2.2.x"
+    tmp "0.0.x"
+
+util-deprecate@~1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf"
+
+utils-merge@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.0.tgz#0294fb922bb9375153541c4f7096231f287c8af8"
+
+uuid@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.0.1.tgz#6544bba2dfda8c1cf17e629a3a305e2bb1fee6c1"
+
+verror@1.3.6:
+  version "1.3.6"
+  resolved "https://registry.yarnpkg.com/verror/-/verror-1.3.6.tgz#cff5df12946d297d2baaefaa2689e25be01c005c"
+  dependencies:
+    extsprintf "1.0.2"
+
+void-elements@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/void-elements/-/void-elements-2.0.1.tgz#c066afb582bb1cb4128d60ea92392e94d5e9dbec"
+
+webidl-conversions@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871"
+
+webidl-conversions@^4.0.0:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-4.0.1.tgz#8015a17ab83e7e1b311638486ace81da6ce206a0"
+
+whatwg-encoding@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/whatwg-encoding/-/whatwg-encoding-1.0.1.tgz#3c6c451a198ee7aec55b1ec61d0920c67801a5f4"
+  dependencies:
+    iconv-lite "0.4.13"
+
+whatwg-url@^4.3.0:
+  version "4.7.1"
+  resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-4.7.1.tgz#df4dc2e3f25a63b1fa5b32ed6d6c139577d690de"
+  dependencies:
+    tr46 "~0.0.3"
+    webidl-conversions "^3.0.0"
+
+which@~1.2.2:
+  version "1.2.14"
+  resolved "https://registry.yarnpkg.com/which/-/which-1.2.14.tgz#9a87c4378f03e827cecaf1acdf56c736c01c14e5"
+  dependencies:
+    isexe "^2.0.0"
+
+wide-align@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.0.tgz#40edde802a71fea1f070da3e62dcda2e7add96ad"
+  dependencies:
+    string-width "^1.0.1"
+
+window-size@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.0.tgz#5438cd2ea93b202efa3a19fe8887aee7c94f9c9d"
+
+wordwrap@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.2.tgz#b79669bb42ecb409f83d583cad52ca17eaa1643f"
+
+wordwrap@~0.0.2:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.3.tgz#a3d5da6cd5c0bc0008d37234bbaf1bed63059107"
+
+wordwrap@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-1.0.0.tgz#27584810891456a4171c8d0226441ade90cbcaeb"
+
+wrappy@1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f"
+
+ws@1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/ws/-/ws-1.1.2.tgz#8a244fa052401e08c9886cf44a85189e1fd4067f"
+  dependencies:
+    options ">=0.0.5"
+    ultron "1.0.x"
+
+wtf-8@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/wtf-8/-/wtf-8-1.0.0.tgz#392d8ba2d0f1c34d1ee2d630f15d0efb68e1048a"
+
+xml-name-validator@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/xml-name-validator/-/xml-name-validator-2.0.1.tgz#4d8b8f1eccd3419aa362061becef515e1e559635"
+
+xmlhttprequest-ssl@1.5.3:
+  version "1.5.3"
+  resolved "https://registry.yarnpkg.com/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.5.3.tgz#185a888c04eca46c3e4070d99f7b49de3528992d"
+
+xtend@^4.0.0:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.1.tgz#a5c6d532be656e23db820efb943a1f04998d63af"
+
+yargs@~3.5.4:
+  version "3.5.4"
+  resolved "https://registry.yarnpkg.com/yargs/-/yargs-3.5.4.tgz#d8aff8f665e94c34bd259bdebd1bfaf0ddd35361"
+  dependencies:
+    camelcase "^1.0.2"
+    decamelize "^1.0.0"
+    window-size "0.1.0"
+    wordwrap "0.0.2"
+
+yauzl@2.4.1:
+  version "2.4.1"
+  resolved "https://registry.yarnpkg.com/yauzl/-/yauzl-2.4.1.tgz#9528f442dab1b2284e58b4379bb194e22e0c4005"
+  dependencies:
+    fd-slicer "~1.0.1"
+
+yeast@0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/yeast/-/yeast-0.1.2.tgz#008e06d8094320c372dbc2f8ed76a0ca6c8ac419"
diff --git a/contrib/views/pom.xml b/contrib/views/pom.xml
index 6dcfce2..402f7cd 100644
--- a/contrib/views/pom.xml
+++ b/contrib/views/pom.xml
@@ -112,6 +112,7 @@
             <exclude>**/assets/static/javascripts/**</exclude>
             <exclude>**/assets/static/stylesheets/**</exclude>
             <exclude>storm/src/main/resources/**</exclude>
+            <exclude>**/yarn.lock</exclude>
           </excludes>
         </configuration>
       </plugin>
diff --git a/contrib/views/storm/src/main/java/org/apache/ambari/storm/ProxyServlet.java b/contrib/views/storm/src/main/java/org/apache/ambari/storm/ProxyServlet.java
index 6e6bea2..f87b29f 100644
--- a/contrib/views/storm/src/main/java/org/apache/ambari/storm/ProxyServlet.java
+++ b/contrib/views/storm/src/main/java/org/apache/ambari/storm/ProxyServlet.java
@@ -38,6 +38,10 @@
 public class ProxyServlet extends HttpServlet {
 
   private ViewContext viewContext;
+  private static final String STORM_HOST = "storm.host";
+  private static final String STORM_PORT = "storm.port";
+  private static final String STORM_SSL_ENABLED = "storm.sslEnabled";
+  private String stormURL;
 
   @Override
   public void init(ServletConfig config) throws ServletException {
@@ -45,12 +49,16 @@
 
     ServletContext context = config.getServletContext();
     viewContext = (ViewContext) context.getAttribute(ViewContext.CONTEXT_ATTRIBUTE);
+    String sslEnabled = viewContext.getProperties().get(STORM_SSL_ENABLED);
+    String hostname = viewContext.getProperties().get(STORM_HOST);
+    String port = viewContext.getProperties().get(STORM_PORT);
+    stormURL = (sslEnabled.equals("true") ? "https" : "http") + "://" + hostname + ":" + port;
   }
 
   @Override
   protected void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException {
     InputStream body = null;
-    String urlToRead = URLDecoder.decode(request.getParameter("url"));
+    String urlToRead = stormURL + URLDecoder.decode(request.getParameter("url"));
     HashMap<String,String> headersMap = this.getHeaders(request);
     InputStream resultStream = viewContext.getURLStreamProvider().readAsCurrent(urlToRead, "GET", body, headersMap);
     this.setResponse(request, response, resultStream);
@@ -59,7 +67,7 @@
   @Override
   protected void doPost(HttpServletRequest request, HttpServletResponse response) throws IOException {
     InputStream stream = request.getInputStream();
-    String urlToRead = URLDecoder.decode(request.getParameter("url"));
+    String urlToRead = stormURL + URLDecoder.decode(request.getParameter("url"));
     HashMap<String,String> headersMap = this.getHeaders(request);
     InputStream resultStream = viewContext.getURLStreamProvider().readAsCurrent(urlToRead, "POST", stream, headersMap);
     this.setResponse(request, response, resultStream);
diff --git a/contrib/views/storm/src/main/java/org/apache/ambari/storm/StormDetailsServlet.java b/contrib/views/storm/src/main/java/org/apache/ambari/storm/StormDetailsServlet.java
new file mode 100644
index 0000000..42c3277
--- /dev/null
+++ b/contrib/views/storm/src/main/java/org/apache/ambari/storm/StormDetailsServlet.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.view.storm;
+
+import org.apache.ambari.view.ViewContext;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+import javax.servlet.ServletConfig;
+import javax.servlet.ServletContext;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import java.net.URLDecoder;
+import java.nio.charset.StandardCharsets;
+import java.util.*;
+import java.io.*;
+
+/**
+ * Simple servlet for proxying requests with doAs impersonation.
+ */
+public class StormDetailsServlet extends HttpServlet {
+
+  private ViewContext viewContext;
+  private static final String STORM_HOST = "storm.host";
+  private static final String STORM_PORT = "storm.port";
+  private static final String STORM_SSL_ENABLED = "storm.sslEnabled";
+  private String stormURL;
+
+  @Override
+  public void init(ServletConfig config) throws ServletException {
+    super.init(config);
+
+    ServletContext context = config.getServletContext();
+    viewContext = (ViewContext) context.getAttribute(ViewContext.CONTEXT_ATTRIBUTE);
+    String sslEnabled = viewContext.getProperties().get(STORM_SSL_ENABLED);
+    String hostname = viewContext.getProperties().get(STORM_HOST);
+    String port = viewContext.getProperties().get(STORM_PORT);
+    stormURL = (sslEnabled.equals("true") ? "https" : "http") + "://" + hostname + ":" + port;
+  }
+
+  @Override
+  protected void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException {
+    String hostDetails = "{\"hostdata\":\""+stormURL+"\"}";
+    InputStream resultStream = new ByteArrayInputStream(hostDetails.getBytes(StandardCharsets.UTF_8));
+    this.setResponse(request, response, resultStream);
+  }
+
+  /**
+   * Set response to the get/post request
+   * @param request      HttpServletRequest
+   * @param response     HttpServletResponse
+   * @param resultStream InputStream
+   */
+  public void setResponse(HttpServletRequest request, HttpServletResponse response, InputStream resultStream) throws IOException{
+    Scanner scanner = new Scanner(resultStream).useDelimiter("\\A");
+    String result = scanner.hasNext() ? scanner.next() : "";
+    Boolean notFound = result == "" || result.indexOf("\"exception\":\"NotFoundException\"") != -1;
+    response.setContentType(request.getContentType());
+    response.setStatus(notFound ? HttpServletResponse.SC_NOT_FOUND : HttpServletResponse.SC_OK);
+    PrintWriter writer = response.getWriter();
+    writer.print(result);
+  }
+}
diff --git a/contrib/views/storm/src/main/resources/WEB-INF/web.xml b/contrib/views/storm/src/main/resources/WEB-INF/web.xml
index e406de1..cc89ac7 100644
--- a/contrib/views/storm/src/main/resources/WEB-INF/web.xml
+++ b/contrib/views/storm/src/main/resources/WEB-INF/web.xml
@@ -30,8 +30,16 @@
     <servlet-name>ProxyServlet</servlet-name>
     <servlet-class>org.apache.ambari.view.storm.ProxyServlet</servlet-class>
   </servlet>
+  <servlet>
+    <servlet-name>StormDetailsServlet</servlet-name>
+    <servlet-class>org.apache.ambari.view.storm.StormDetailsServlet</servlet-class>
+  </servlet>
   <servlet-mapping>
     <servlet-name>ProxyServlet</servlet-name>
     <url-pattern>/proxy</url-pattern>
   </servlet-mapping>
+  <servlet-mapping>
+    <servlet-name>StormDetailsServlet</servlet-name>
+    <url-pattern>/storm_details</url-pattern>
+  </servlet-mapping>
 </web-app>
diff --git a/contrib/views/storm/src/main/resources/scripts/components/SearchLogs.jsx b/contrib/views/storm/src/main/resources/scripts/components/SearchLogs.jsx
index b37170c..1581a16 100644
--- a/contrib/views/storm/src/main/resources/scripts/components/SearchLogs.jsx
+++ b/contrib/views/storm/src/main/resources/scripts/components/SearchLogs.jsx
@@ -25,7 +25,7 @@
 		getInitialState: function() {
 			return null;
 		},
-		render: function() {			
+		render: function() {
 			return (
 				<div className="col-md-3 pull-right searchbar">
                     <div className="input-group">
@@ -60,26 +60,30 @@
             var searchBoxEl = document.getElementById('searchBox');
             var searchArchivedLogsEl = document.getElementById('searchArchivedLogs');
             var deepSearchEl = document.getElementById('deepSearch');
+            var topologyId = this.props.id;
 
-            var url = App.baseURL.split('?url=')[1]+'/';
-            if(deepSearchEl.checked == true){
-                url += "deep_search_result.html";
-            }else{
-                url += "search_result.html";
-            }
-            url += '?search='+searchBoxEl.value+'&id='+ this.props.id +'&count=1';
-            if(searchArchivedLogsEl.checked == true){
+            $.get(App.baseURL.replace('proxy?url=', 'storm_details'))
+              .success(function(response){
+                var url = JSON.parse(response).hostdata+'/';
                 if(deepSearchEl.checked == true){
-                    url += "&search-archived=on";
+                    url += "deep_search_result.html";
                 }else{
-                    url += "&searchArchived=checked";
+                    url += "search_result.html";
                 }
-            }
-            window.open(url, '_blank');
+                url += '?search='+searchBoxEl.value+'&id='+ topologyId +'&count=1';
+                if(searchArchivedLogsEl.checked == true){
+                    if(deepSearchEl.checked == true){
+                        url += "&search-archived=on";
+                    }else{
+                        url += "&searchArchived=checked";
+                    }
+                }
+                window.open(url, '_blank');
 
-            searchBoxEl.value = '';
-            searchArchivedLogsEl.checked = false;
-            deepSearchEl.checked = false;
+                searchBoxEl.value = '';
+                searchArchivedLogsEl.checked = false;
+                deepSearchEl.checked = false;
+              });
         },
-    }); 
+    });
 });
diff --git a/contrib/views/storm/src/main/resources/scripts/router/Router.js b/contrib/views/storm/src/main/resources/scripts/router/Router.js
index cd498c0..be6943e 100644
--- a/contrib/views/storm/src/main/resources/scripts/router/Router.js
+++ b/contrib/views/storm/src/main/resources/scripts/router/Router.js
@@ -22,7 +22,7 @@
 	'react',
 	'react-dom',
 	'utils/Utils'
-], function(require, Backbone, React, ReactDOM, Utils) {	
+], function(require, Backbone, React, ReactDOM, Utils) {
 	'use strict';
 	var rRender;
 	var AppRouter = Backbone.Router.extend({
@@ -37,7 +37,7 @@
 			'*actions'											: 'defaultAction'
 		},
 		initialize: function() {
-			App.baseURL = Utils.getStormHostDetails();
+                        App.baseURL = location.pathname+'proxy?url=';
 			this.showRegions();
 			this.listenTo(this, "route", this.postRouteExecute, this);
 		},
@@ -82,7 +82,7 @@
 		/**
 		 * Define route handlers here
 		 */
-		
+
 		dashboardAction: function(){
 			require(['jsx!views/Dashboard'], function(DashboardView){
 				ReactDOM.render(React.createElement(DashboardView), App.Container);
diff --git a/contrib/views/storm/src/main/resources/scripts/utils/Utils.js b/contrib/views/storm/src/main/resources/scripts/utils/Utils.js
index 05f992b..d9a9dd6 100644
--- a/contrib/views/storm/src/main/resources/scripts/utils/Utils.js
+++ b/contrib/views/storm/src/main/resources/scripts/utils/Utils.js
@@ -25,31 +25,6 @@
     'use strict';
     var Utils = {};
 
-    Utils.getStormHostDetails = function() {
-        var url = location.pathname+'proxy?url=';
-        var urlParts = location.pathname.split('/');
-        var apiUrl = '/api/v1/'+urlParts[1]+'/'+urlParts[2]+'/versions/'+urlParts[3]+'/instances/'+urlParts[4];
-        $.ajax({
-            url: apiUrl,
-            cache: false,
-            type: 'GET',
-            async: false,
-            dataType: 'json',
-            success: function(response){
-                var props = response.ViewInstanceInfo.properties;
-                if(props['storm.host'] && props['storm.port']){
-                    url += (props['storm.sslEnabled'] === "true" ? "https://" : "http://")+props['storm.host']+":"+props['storm.port'];
-                } else {
-                    Utils.notifyError("Failed to get storm hostname and port.");
-                }
-            },
-            error: function(error){
-                Utils.notifyError("Failed to get storm hostname and port.");
-            }
-        });
-        return url;
-    };
-
     Utils.ArrayToCollection = function(array, collection){
         if(array.length){
             array.map(function(obj){
diff --git a/contrib/views/storm/src/main/resources/view.xml b/contrib/views/storm/src/main/resources/view.xml
index 0e83ec2..105b3eb 100644
--- a/contrib/views/storm/src/main/resources/view.xml
+++ b/contrib/views/storm/src/main/resources/view.xml
@@ -22,15 +22,15 @@
   <description>Ambari view for Apache Storm</description>
   <parameter>
 	<name>storm.host</name>
-	<description>Enter the Storm host name for accessing Storm. Host must be accessible from Ambari Server.</description>
-	<label>Storm Hostname</label>
+        <description>Enter the Storm UI Server hostname for accessing Storm. Host must be accessible from Ambari Server.</description>
+        <label>Storm UI Server Hostname</label>
 	<placeholder>storm-host.example.com</placeholder>
 	<required>true</required>
   </parameter>
   <parameter>
 	<name>storm.port</name>
-	<description>Enter the Storm port for accessing Storm.</description>
-	<label>Storm Port</label>
+        <description>Enter the Storm UI Server port for accessing Storm.</description>
+        <label>Storm UI Server Port</label>
 	<placeholder>8744</placeholder>
 	<required>true</required>
   </parameter>
diff --git a/contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/ConfigurationBuilder.java b/contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/ConfigurationBuilder.java
index 714e229..df0fd96 100644
--- a/contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/ConfigurationBuilder.java
+++ b/contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/ConfigurationBuilder.java
@@ -280,7 +280,8 @@
     String nameservice = uri.getHost();
     String namenodeIDs = getProperty(HDFS_SITE, String.format(HA_NAMENODES_CLUSTER_PROPERTY, nameservice),
       HA_NAMENODES_INSTANCE_PROPERTY);
-    return namenodeIDs != null;
+    LOG.debug("namenodeIDs " + namenodeIDs);
+    return !StringUtils.isEmpty(namenodeIDs);
   }
 
   private static boolean hasPort(String url) throws URISyntaxException {
diff --git a/contrib/views/wfmanager/pom.xml b/contrib/views/wfmanager/pom.xml
index 86c03f9..a38cae5 100644
--- a/contrib/views/wfmanager/pom.xml
+++ b/contrib/views/wfmanager/pom.xml
@@ -137,31 +137,37 @@
 			<plugin>
 				<groupId>com.github.eirslett</groupId>
 				<artifactId>frontend-maven-plugin</artifactId>
-				<version>1.3</version>
+				<version>1.4</version>
 				<configuration>
 					<nodeVersion>v4.5.0</nodeVersion>
-					<npmVersion>2.15.0</npmVersion>
+					<yarnVersion>v0.23.2</yarnVersion>
 					<workingDirectory>src/main/resources/ui/</workingDirectory>
 					<npmInheritsProxyConfigFromMaven>false</npmInheritsProxyConfigFromMaven>
+                    <!-- setting npm_config_tmp environment variable is a workaround for 
+                     https://github.com/Medium/phantomjs/issues/673 -->
+                    <environmentVariables>
+                      <npm_config_tmp>/tmp/npm_config_tmp</npm_config_tmp>
+                    </environmentVariables>
 				</configuration>
 				<executions>
 					<execution>
-						<id>install node and npm</id>
+						<id>install node and yarn</id>
 						<phase>generate-sources</phase>
 						<goals>
-							<goal>install-node-and-npm</goal>
+							<goal>install-node-and-yarn</goal>
 						</goals>
 					</execution>
 					<execution>
-						<id>npm install</id>
+						<id>yarn install --pure-lockfile</id>
 						<phase>generate-sources</phase>
 						<goals>
-							<goal>npm</goal>
+							<goal>yarn</goal>
 						</goals>
 						<configuration>
 							<arguments>install
 								--python="${project.basedir}/../src/main/unix/ambari-python-wrap"
 								--unsafe-perm</arguments>
+							<arguments>--ignore-engines</arguments>
 						</configuration>
 					</execution>
 				</executions>
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/components/bundle-config.js b/contrib/views/wfmanager/src/main/resources/ui/app/components/bundle-config.js
index 99f4fa7..41012b2 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/components/bundle-config.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/components/bundle-config.js
@@ -47,6 +47,7 @@
   fileBrowser : Ember.inject.service('file-browser'),
   workspaceManager : Ember.inject.service('workspace-manager'),
   jobConfigProperties: Ember.A([]),
+  isDefaultNameForBundleEnabled : false,
   initialize : function(){
     var self = this;
     this.set('errors', Ember.A([]));
@@ -95,7 +96,7 @@
     }else{
       this.set('bundle', this.createBundle());
     }
-    if(Ember.isBlank(this.get('bundle.name'))){
+    if(Ember.isBlank(this.get('bundle.name')) && this.get('isDefaultNameForBundleEnabled')){
       this.set('bundle.name', Ember.copy(this.get('tabInfo.name')));
     }
     this.schedulePersistWorkInProgress();
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/components/coord-config.js b/contrib/views/wfmanager/src/main/resources/ui/app/components/coord-config.js
index fba4db5..1aeca5b 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/components/coord-config.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/components/coord-config.js
@@ -51,6 +51,7 @@
   workspaceManager : Ember.inject.service('workspace-manager'),
   showErrorMessage: Ember.computed.alias('saveAttempted'),
   jobConfigProperties: Ember.A([]),
+  isDefaultNameForCoordinatorEnabled : false,
   datasetsForInputs : Ember.computed('coordinator.datasets.[]','coordinator.dataOutputs.[]',function(){
     var datasetsForInputs = Ember.copy(this.get('coordinator.datasets'));
     this.get('coordinator.dataOutputs').forEach((dataOutput)=>{
@@ -145,7 +146,7 @@
       {'name':'throttle', 'displayName':'Throttle', 'value':''}
     ]);
     this.set('timezoneList', Ember.copy(Constants.timezoneList));
-    if(Ember.isBlank(this.get('coordinator.name'))){
+    if(Ember.isBlank(this.get('coordinator.name')) && this.get('isDefaultNameForCoordinatorEnabled')){
       this.set('coordinator.name', Ember.copy(this.get('tabInfo.name')));
     }
     this.schedulePersistWorkInProgress();
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/components/distcp-action.js b/contrib/views/wfmanager/src/main/resources/ui/app/components/distcp-action.js
index e5740e3..f441203 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/components/distcp-action.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/components/distcp-action.js
@@ -32,6 +32,19 @@
       this.set("actionModel.configuration.property", Ember.A([]));
     }
   }.on('init'),
+  distcpCommand : Ember.computed('actionModel.args', {
+    get(key){
+      return this.get('actionModel.args').mapBy('value').join(" ");
+    },
+    set(key, value){
+      this.get('actionModel.args').clear();
+      value.split(" ").forEach(arg => {
+        this.get('actionModel.args').pushObject({value:arg});
+      });
+      return this.get(key);
+    }
+  }),
+
   initialize : function(){
     this.on('fileSelected',function(fileName){
       this.set(this.get('filePathModel'), fileName);
@@ -53,7 +66,7 @@
     },
     register (name, context){
       this.sendAction('register',name , context);
-    },
+    }
 
   }
 });
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/components/drafts-wf.js b/contrib/views/wfmanager/src/main/resources/ui/app/components/drafts-wf.js
index ed448d9..050a3b9 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/components/drafts-wf.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/components/drafts-wf.js
@@ -46,7 +46,6 @@
     },
     set(key, value) {
       this.set('recentFiles', value);
-
       var score = 0, condition = true, searchTxt = this.get("search").toLowerCase(), isWorkflow = this.get("isWorkflow"), isCoordinator = this.get("isCoordinator"), isBundle = this.get("isBundle");
       return this.get("recentFiles").filter( (role) => {
         score = 0
@@ -125,7 +124,7 @@
         rec.destroyRecord().then(function () {
           self.get('recentFiles', self.get('store').peekAll("wfproject"));
           self.set("deleteInProgress", false);
-          self.set("deleteMsg", "Workflow successfully deleted.");
+          self.set("deleteMsg", "Successfully removed the item from history");
           self.get('store').unloadRecord(rec);
           self.set('filteredModels', self.get('store').peekAll("wfproject"));
           Ember.run.later(()=>{
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/components/flow-designer.js b/contrib/views/wfmanager/src/main/resources/ui/app/components/flow-designer.js
index 17f21ee..1cfe755 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/components/flow-designer.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/components/flow-designer.js
@@ -22,7 +22,6 @@
 import {WorkflowImporter} from '../domain/workflow-importer';
 import {WorkflowJsonImporter} from '../domain/workflow-json-importer';
 import {WorkflowContext} from '../domain/workflow-context';
-import {JSPlumbRenderer} from '../domain/jsplumb-flow-renderer';
 import {CytoscapeRenderer} from '../domain/cytoscape-flow-renderer';
 import {FindNodeMixin} from '../domain/findnode-mixin';
 import { validator, buildValidations } from 'ember-cp-validations';
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/components/job-details.js b/contrib/views/wfmanager/src/main/resources/ui/app/components/job-details.js
index e74c873..70ba41c 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/components/job-details.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/components/job-details.js
@@ -327,7 +327,18 @@
                   return "none";
                 }
               },
-              'target-arrow-color': 'data(borderColor)'
+              'target-arrow-color': 'data(borderColor)',
+              'color': '#262626',
+              'font-size': 12,
+              label: function(target) {
+                if (!target.data().transition || !target.data().transition.condition) {
+                  return "";
+                }else if (target.data().transition.condition.length>5){
+                  return target.data().transition.condition.slice(0, 5)+"...";
+                }else{
+                  return target.data().transition.condition;
+                }
+              }
             }
           }
         ],
@@ -354,6 +365,23 @@
         var node = event.cyTarget;
         this.showActionNodeDetail(node, xmlString);
       }.bind(this));
+
+      cy.on('mousemove', 'edge', function(event) {
+        this.get("context").$(".overlay-transition-content, .decision-condition-label").hide();
+        if (event.cyTarget.data().transition && event.cyTarget.data().transition.condition) {
+          this.get("context").$(".decision-condition-body").html(event.cyTarget.data().transition.condition);
+          this.get("context").$(".overlay-transition-content").css({
+            top: event.originalEvent.offsetY + 10,
+            left: event.originalEvent.offsetX + 15
+          });
+          this.get("context").$(".overlay-transition-content, .decision-condition-label").show();
+        }
+      }.bind(this));
+
+      cy.on('mouseout', 'edge',function(event) {
+        this.get("context").$(".overlay-transition-content").hide();
+      }.bind(this));
+
       this.set("model.inProgress", false);
     },
     importSampleWorkflow (){
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/components/recent-projects.js b/contrib/views/wfmanager/src/main/resources/ui/app/components/recent-projects.js
index bbc51cc..8ed9e54 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/components/recent-projects.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/components/recent-projects.js
@@ -18,10 +18,15 @@
 import Ember from 'ember';
 
 export default Ember.Component.extend({
+  rendered : function(){
+    this.$("#projectsList").on("hidden.bs.modal", function () {
+      this.sendAction("close");
+    }.bind(this));
+  }.on('didInsertElement'),
   actions: {
-  	close(){
-  		this.sendAction('close');
-  	},
+    close() {
+      this.sendAction('close');
+    },
     editWorkflow ( path, type ) {
       this.sendAction('editWorkflow', path, type);
     }
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/components/search-create-new-bar.js b/contrib/views/wfmanager/src/main/resources/ui/app/components/search-create-new-bar.js
index 96ee230..ed761c7 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/components/search-create-new-bar.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/components/search-create-new-bar.js
@@ -161,14 +161,17 @@
         queryParam = "endCreatedTime";
       }
       if (date._isAMomentObject) {
-        var dateFilter = queryParam +"="+ date.format("YYYY-MM-DDThh:mm")+'Z';
+        var dateFilter = queryParam +"="+ date.format("YYYY-MM-DDTHH:mm")+'Z';
         this.filter[queryParam] = dateFilter;
       } else {
         delete this.filter[queryParam];
       }
       this.sendAction('onSearch', { type: this.get('jobType'), filter: this.getAllFilters() });
     },
-
+    doClearFilters(){
+      this.filter={};
+      this.sendAction('onSearch', { type: this.get('jobType'), filter: this.getAllFilters() });
+    },
     getAllFilters(){
       var allFilters = [];
       Object.keys(this.filter).forEach(function(value){
@@ -189,7 +192,10 @@
             this.sendAction('onSearch', { type: type, filter: filter });
         },
         onSearchClicked(){
-          this.$('#search-field').tagsinput('add', 'Name:'+this.$('.tt-input').val());
+          var searchValue=this.$('.tt-input').val();
+          if(!Ember.isBlank(searchValue)) {
+            this.$('#search-field').tagsinput('add', 'Name:'+searchValue);
+          }
         },
         refresh(){
           this.sendAction('onSearch', this.get('history').getSearchParams());
@@ -201,10 +207,17 @@
             this.$("#endDate").trigger("dp.show");
           }
         },
+        clearFilters() {
+          this.$("#startDate").val('');
+          this.$("#endDate").val('');
+          this.$('#search-field').tagsinput('removeAll');
+          this.$('.tt-input').val('');
+          this.doClearFilters();
+        },
         onClear(type) {
           if (type ==='start' && this.get('startDate') === "") {
             this.filterByDate("", type);
-          } else if (type ==='start' && this.get('endDate') === "") {
+          } else if (type ==='end' && this.get('endDate') === "") {
             this.filterByDate("", type);
           }
 
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/domain/jsplumb-flow-renderer.js b/contrib/views/wfmanager/src/main/resources/ui/app/domain/jsplumb-flow-renderer.js
deleted file mode 100644
index c3e3133..0000000
--- a/contrib/views/wfmanager/src/main/resources/ui/app/domain/jsplumb-flow-renderer.js
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
-*    Licensed to the Apache Software Foundation (ASF) under one or more
-*    contributor license agreements.  See the NOTICE file distributed with
-*    this work for additional information regarding copyright ownership.
-*    The ASF licenses this file to You under the Apache License, Version 2.0
-*    (the "License"); you may not use this file except in compliance with
-*    the License.  You may obtain a copy of the License at
-*
-*        http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS,
-*    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-*    See the License for the specific language governing permissions and
-*    limitations under the License.
-*/
-
-import Ember from 'ember';
-import Constants from '../utils/constants';
-import {DefaultLayoutManager as LayoutManager} from '../domain/default-layout-manager';
-var JSPlumbRenderer= Ember.Object.extend({
-  designerPlumb:null,
-  flattenedNodes:null,
-  _createConnection(sourceNode,target,transition){
-    var connectionColor="#777";
-    var lineWidth=1;
-    if (transition.condition){
-      if(transition.condition==="default"){
-        lineWidth=2;
-      }else if (transition.condition==="error"|| transition.errorPath){
-        connectionColor=Constants.globalSetting.errorTransitionColor;
-      }
-    }
-    var connectionObj={
-      source:sourceNode.id,
-      target:target.id,
-      connector:["Straight"],
-      paintStyle:{lineWidth:lineWidth,strokeStyle:connectionColor},
-      endpointStyle:{fillStyle:'rgb(243,229,0)'},
-      endpoint: ["Dot", {
-        radius: 1
-      }],
-      alwaysRespectStubs:true,
-      anchors: [["Bottom"],["Top"]],
-      overlays:[]
-    };
-    return connectionObj;
-  },
-  _getAddNodeOverlay(context,sourceNode,target,transition){
-    var location=target.type==="placeholder"?1:0.5;
-    var transitionCount=sourceNode.transitions.length;
-    return {
-      id: sourceNode.id+"_"+target.id+"_"+"connector",
-      location:location,
-      /* jshint unused:vars */
-      create:function(component) {
-        var container=Ember.$('<div />');
-        var plus= Ember.$('<div class="fa fa-plus connector_overlay_new"></div>');
-        if ((sourceNode.isDecisionNode() && transitionCount>1 ||sourceNode.isForkNode() && transitionCount>2 ) &&
-        target.isPlaceholder() &&
-        !transition.isDefaultCasePath()){
-          var trash=Ember.$('<div class="node_actions node_left"><i class="fa fa-trash-o"></i></div>');
-          trash.on("click",function(){
-            context.deleteTransition(transition);
-          });
-          plus.append(trash);
-        }
-        container.append(plus);
-        return container;
-      },
-      events:{
-        click:function(labelOverlay, originalEvent) {
-          var element = originalEvent.target;
-          context.set('popOverElement', element);
-          context.setCurrentTransition(transition);
-          context.showWorkflowActionSelect(element);
-        }
-      }
-    };
-  },
-
-  _renderNodes(node,visitedNodes){
-    if (!node || node.isKillNode()){
-      return;
-    }
-    if (visitedNodes.contains(node)){
-      return;
-    }
-    visitedNodes.push(node);
-    if(!this.get("flattenedNodes").contains(node)){
-      this.get("flattenedNodes").pushObject(node);
-    }
-    if (node.transitions.length > 0){
-      node.transitions.forEach(function(transition) {
-        var target = transition.targetNode;
-        this._renderNodes(target,visitedNodes);
-      }.bind(this));
-    }
-  },
-  _connectNodes(context,sourceNode){
-    var connections=[];
-    var visitedNodes=[];
-    this._renderTransitions(sourceNode,connections,visitedNodes,context);
-    this._layout(connections);
-    this.designerPlumb.setSuspendDrawing(true);
-    this.designerPlumb.batch(function(){
-      connections.forEach(function(conn){
-        this.designerPlumb.connect(conn);
-      }.bind(this));
-    }.bind(this));
-    this.designerPlumb.setSuspendDrawing(false,true);
-
-  },
-  _renderTransitions(sourceNode,connections,visitedNodes,context){
-    var self=this;
-    if(!sourceNode){
-      return;
-    }
-    if (visitedNodes.contains(sourceNode)){
-      return;
-    }
-    if (sourceNode.hasTransition() ){
-      sourceNode.transitions.forEach(function(transition) {
-        var target = transition.targetNode;
-        if (target.isKillNode() || !Constants.showErrorTransitions && transition.isOnError()){
-          return;
-        }
-        var connectionObj=self._createConnection(sourceNode,target,transition);
-
-        if (transition.condition){
-          var conditionHTML = "<div class='decision-condition' title='"+transition.condition+"'>"+ transition.condition+"</div>";
-          connectionObj.overlays.push([ "Label", {label:conditionHTML, location:0.75, id:"myLabel" } ]);
-        }
-        if (!target.isPlaceholder()){
-          connectionObj.overlays.push(["PlainArrow",{location:-0.1,width: 7,length: 7}]);
-        }
-        if (!(sourceNode.isPlaceholder() || target.isKillNode())){
-          var addNodeoverlay=["Custom" , self._getAddNodeOverlay(context,sourceNode,target,transition)];
-          connectionObj.overlays.push(addNodeoverlay);
-        }
-        connections.push(connectionObj);
-        self._renderTransitions(target,connections,visitedNodes,context);
-      });
-    }
-  },
-  _layout(edges){
-    var nodes = Ember.$(".nodecontainer");
-    this.layoutManager.doLayout(this.get("context"),nodes,edges,this.get("workflow"));
-  },
-  initRenderer(callback,settings){
-    this.designerPlumb=jsPlumb.getInstance({});
-    this.layoutManager=LayoutManager.create({});
-    this.context=settings.context;
-    this.flattenedNodes=settings.flattenedNodes;
-    this.designerPlumb.ready(function() {
-      callback();
-    }.bind(this));
-    return this.designerPlumb;
-  },
-  refresh(){
-    this.designerPlumb.repaintEverything();
-  },
-  reset(){
-    if(!this.get('flattenedNodes')){
-      return;
-    }
-    this.get("flattenedNodes").clear();
-    this.designerPlumb.reset();
-  },
-  cleanup(){
-    if(!this.get('flattenedNodes')){
-      return;
-    }
-    this.get('flattenedNodes').clear();
-    this.designerPlumb.detachEveryConnection();
-  },
-  onDidUpdate(){
-    this._connectNodes(this.get("context"),this.get("workflow").startNode,this.get("workflow"));
-  },
-  renderWorkflow(workflow){
-    var visitedNodes=[];
-    this.set("workflow",workflow);
-    this._renderNodes(this.get("workflow").startNode,visitedNodes);
-  },
-
-  getBottomPosition(){
-    return {
-      top : this.get("context").$(".nodeEnd").offset().top,
-      left : this.get("context").$(".nodeEnd").offset().left
-    };
-  }
-
-});
-export {JSPlumbRenderer};
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/domain/layout-manager1.js b/contrib/views/wfmanager/src/main/resources/ui/app/domain/layout-manager1.js
deleted file mode 100644
index 0cd306a..0000000
--- a/contrib/views/wfmanager/src/main/resources/ui/app/domain/layout-manager1.js
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
-*    Licensed to the Apache Software Foundation (ASF) under one or more
-*    contributor license agreements.  See the NOTICE file distributed with
-*    this work for additional information regarding copyright ownership.
-*    The ASF licenses this file to You under the Apache License, Version 2.0
-*    (the "License"); you may not use this file except in compliance with
-*    the License.  You may obtain a copy of the License at
-*
-*        http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS,
-*    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-*    See the License for the specific language governing permissions and
-*    limitations under the License.
-*/
-import Ember from 'ember';
-var LayoutManager1= Ember.Object.extend({
-  doLayout(component,nodes,edges,workflow){
-    var levelMatrix = [];
-    var adjancencyMatrix = {};
-    for (var i = 0; i < edges.length; i++) {
-      var c = edges[i];
-      if(!adjancencyMatrix[c.source.id]){
-        adjancencyMatrix[c.source.id] = [];
-      }
-      adjancencyMatrix[c.source.id].push(c.target.id);
-    }
-    var bfsArray = this.doBFS(nodes[0].id, adjancencyMatrix);
-    var level = 0;
-    bfsArray.forEach((item, index)=>{
-      if(!adjancencyMatrix[item]){
-        return;
-      }
-      adjancencyMatrix[item].forEach((value)=>{
-        if(!levelMatrix[level]){
-          levelMatrix[level] = [];
-        }
-        levelMatrix[level].push(value);
-      });
-      level++;
-    });
-    var startNodeOffset = component.$("#node-start").offset();
-    var top = Math.floor(startNodeOffset.top);
-    var left = Math.floor(startNodeOffset.left);
-    levelMatrix.forEach((nodeArray, level)=>{
-      var levelLength = nodeArray.length;
-      var levelSplit = left/levelLength;
-      nodeArray.forEach((node, idx, array)=>{
-        if(levelLength == 1){
-          Ember.$("#" + node).css("top", top+(level*100)+ "px");
-        }else{
-          Ember.$("#" + node).css("top", top+ "px");
-          if(idx < levelLength/2){
-            Ember.$("#" + node).css("left", left-(idx*100) + "px");
-          }else if(idx === levelLength/2){
-            Ember.$("#" + node).css("left", left + "px");
-          }else{
-            Ember.$("#" + node).css("left", left+(idx*100) + "px");
-          }
-        }
-      });
-    });
-  },
-  doBFS (root, adjancencyMatrix){
-    var bfsResult = [];
-    var level = 0;
-    var visited = {};
-    visited[root] = true;
-    var queue = [];
-    queue.push(root);
-    while(queue.length !== 0){
-      root = queue.shift();
-      bfsResult.push(root);
-      if(!adjancencyMatrix[root]){
-        continue;
-      }
-      adjancencyMatrix[root].forEach(function(node){
-        if(!visited[node]){
-          visited[node] = true;
-          queue.push(node);
-        }
-      });
-    }
-    return bfsResult;
-  },
-});
-export {LayoutManager1};
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/domain/layout-manager2.js b/contrib/views/wfmanager/src/main/resources/ui/app/domain/layout-manager2.js
deleted file mode 100644
index d82b89e..0000000
--- a/contrib/views/wfmanager/src/main/resources/ui/app/domain/layout-manager2.js
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
-*    Licensed to the Apache Software Foundation (ASF) under one or more
-*    contributor license agreements.  See the NOTICE file distributed with
-*    this work for additional information regarding copyright ownership.
-*    The ASF licenses this file to You under the Apache License, Version 2.0
-*    (the "License"); you may not use this file except in compliance with
-*    the License.  You may obtain a copy of the License at
-*
-*        http://www.apache.org/licenses/LICENSE-2.0
-*
-*    Unless required by applicable law or agreed to in writing, software
-*    distributed under the License is distributed on an "AS IS" BASIS,
-*    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-*    See the License for the specific language governing permissions and
-*    limitations under the License.
-*/
-import Ember from 'ember';
-var LayoutManager1= Ember.Object.extend({
-  doLayout(component,nodes,edges,workflow){
-    var levelMatrix = [];
-    var adjancencyMatrix = {};
-    for (var i = 0; i < edges.length; i++) {
-      var c = edges[i];
-      if(!adjancencyMatrix[c.source.id]){
-        adjancencyMatrix[c.source.id] = [];
-      }
-      adjancencyMatrix[c.source.id].push(c.target.id);
-    }
-    var bfsArray = this.doBFS(nodes[0].id, adjancencyMatrix);
-    var level = 0;
-    levelMatrix[level] = [];
-    levelMatrix[level++].push(nodes[0].id);
-    bfsArray.forEach((item, index)=>{
-      if(!adjancencyMatrix[item]){
-        return;
-      }
-      adjancencyMatrix[item].forEach((value)=>{
-        if(!levelMatrix[level]){
-          levelMatrix[level] = [];
-        }
-        levelMatrix[level].push(value);
-      });
-      level++;
-    });
-    var top = 0;
-    var left = 400;
-    var startNodeWidth = component.$("#"+nodes[0].id).width();
-    var center = left+(150-Math.floor(startNodeWidth/2));
-    levelMatrix.forEach((nodeArray, level)=>{
-      var levelLength = nodeArray.length;
-      nodeArray.forEach((node, idx, array)=>{
-        Ember.$("#" + node).css("top", top+(level*100)+ "px");
-        var nodeWidth=Math.round(component.$("#" + node).width()/10) * 10;
-        var avgPositionChange = 0;
-        var totalPositions = ((levelLength-1)*(levelLength)/2)*100;
-        var displacement = 150-Math.floor(nodeWidth/2);
-        var avgPositionChange = (totalPositions/levelLength);
-        var eltPosition = idx*100 - avgPositionChange;
-        var total = left + eltPosition + displacement;
-        Ember.$("#" + node).css("left", total + "px");
-      });
-    });
-  },
-  doBFS (root, adjancencyMatrix){
-    var bfsResult = [];
-    var level = 0;
-    var visited = {};
-    visited[root] = true;
-    var queue = [];
-    queue.push(root);
-    while(queue.length !== 0){
-      root = queue.shift();
-      bfsResult.push(root);
-      if(!adjancencyMatrix[root]){
-        continue;
-      }
-      adjancencyMatrix[root].forEach(function(node){
-        if(!visited[node]){
-          visited[node] = true;
-          queue.push(node);
-        }
-      });
-    }
-    return bfsResult;
-  },
-});
-export {LayoutManager1};
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/routes/design/jobtab.js b/contrib/views/wfmanager/src/main/resources/ui/app/routes/design/jobtab.js
index 3fecbaa..9adf3ae 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/routes/design/jobtab.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/routes/design/jobtab.js
@@ -37,7 +37,7 @@
     return deferred.promise;
   },
   model : function(params){
-    return this.getJobInfo(Ember.ENV.API_URL+'/v2/job/'+params.id+'?show=info&timezone=GMT&offset=1').catch(function(){
+    return this.getJobInfo(Ember.ENV.API_URL+'/v2/job/'+params.id+'?show=info&timezone=GMT&offset=1&len='+1000).catch(function(){
         return {error : "Remote API Failed"};
       }).then(function(response){
       if (typeof response === "string") {
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/styles/app.less b/contrib/views/wfmanager/src/main/resources/ui/app/styles/app.less
index 3c5e720..0603b57 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/styles/app.less
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/styles/app.less
@@ -1813,3 +1813,14 @@
   overflow-y: scroll;
   margin-bottom: 10px;
 }
+
+#coord-actions-list-body-conatiner {
+  max-height: 350px;
+  overflow-y: scroll;
+  margin-bottom: 10px;
+}
+
+.note-info {
+  position: relative;
+  top: 10px;
+}
\ No newline at end of file
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/bundle-config.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/bundle-config.hbs
index b674990..0ea6c4b 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/bundle-config.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/bundle-config.hbs
@@ -121,7 +121,7 @@
   </div>
 </div>
 {{#if showingFileBrowser}}
-  {{hdfs-browser closeFileBrowser="closeFileBrowser" selectFileCallback=selectFileCallback filePath=filePath}}
+  {{hdfs-browser closeFileBrowser="closeFileBrowser" selectFileCallback=selectFileCallback filePath=filePath hideUpload=true}}
 {{/if}}
 {{#if showingJobConfig}}
   {{job-config type='bundle' closeJobConfigs="closeBundleSubmitConfig" jobFilePath=bundleFilePath openFileBrowser="openFileBrowser" closeFileBrowser="closeFileBrowser" jobConfigs=bundleConfigs containsParameteriedPaths=containsParameteriedPaths jobConfigProperties=jobConfigProperties}}
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/coord-config.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/coord-config.hbs
index 3b1b6a9..8f88b88 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/coord-config.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/coord-config.hbs
@@ -332,7 +332,7 @@
 </div>
 </div>
 {{#if showingFileBrowser}}
-{{hdfs-browser closeFileBrowser="closeFileBrowser" selectFileCallback=selectFileCallback filePath=filePath}}
+{{hdfs-browser closeFileBrowser="closeFileBrowser" selectFileCallback=selectFileCallback filePath=filePath hideUpload=true}}
 {{/if}}
 {{#if showingJobConfig}}
   {{job-config type='coord' closeJobConfigs="closeCoordSubmitConfig"
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/coord-job-details.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/coord-job-details.hbs
index b3f1097..18a17d1 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/coord-job-details.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/coord-job-details.hbs
@@ -82,36 +82,42 @@
     </div>
   </div>
   <div role="tabpanel" class="tab-pane" id="jobAction">
-    <table id="action-list" class="table listing job-listing table-striped table-hover table-bordered">
-      <thead>
-        <tr>
-          <th>Id</th>
-          <th>Status</th>
-          <th>External Id</th>
-          <th>Error Code</th>
-          <th>Created Time</th>
-          <th>Nominal Time</th>
-          <th>Last Modified Time</th>
-        </tr>
-      </thead>
-      <tbody>
-        {{#each model.actions as |actionInfo|}}
-        <tr class="{{if (eq actionInfo model.actionDetails) "active"}}">
-          {{#if actionInfo.externalId}}
-            <td class="pointer action-link" {{action 'showWorkflow' actionInfo.externalId}}>{{actionInfo.id}}</td>
-          {{else}}
-            <td>{{actionInfo.id}}</td>
-          {{/if}}
-          <td>{{actionInfo.status}}</td>
-          <td>{{actionInfo.externalId}}</td>
-          <td>{{actionInfo.errorCode}}</td>
-          <td>{{actionInfo.createdTime}}</td>
-          <td>{{actionInfo.nominalTime}}</td>
-          <td>{{actionInfo.lastModifiedTime}}</td>
-        </tr>
-        {{/each}}
-      </tbody>
-    </table>
+    <div id="actions-list-header-conatiner">
+      <table id="actions-list-header" class="table listing job-listing table-striped table-hover table-bordered">
+        <thead>
+          <tr>
+            <th class="col-xs-2">Id</th>
+            <th class="col-xs-1">Status</th>
+            <th class="col-xs-2">External Id</th>
+            <th class="col-xs-1">Error Code</th>
+            <th class="col-xs-2">Created Time</th>
+            <th class="col-xs-2">Nominal Time</th>
+            <th class="col-xs-2">Last Modified Time</th>
+          </tr>
+        </thead>
+      </table>
+    </div>
+    <div id="coord-actions-list-body-conatiner">
+      <table id="actions-list-body" class="table listing job-listing table-striped table-hover table-bordered">
+        <tbody>
+          {{#each model.actions as |actionInfo|}}
+          <tr class="{{if (eq actionInfo model.actionDetails) "active"}}">
+            {{#if actionInfo.externalId}}
+              <td class="pointer action-link col-xs-2" {{action 'showWorkflow' actionInfo.externalId}}>{{actionInfo.id}}</td>
+            {{else}}
+              <td class="col-xs-2">{{actionInfo.id}}</td>
+            {{/if}}
+            <td class="col-xs-1">{{actionInfo.status}}</td>
+            <td class="col-xs-2">{{actionInfo.externalId}}</td>
+            <td class="col-xs-1">{{actionInfo.errorCode}}</td>
+            <td class="col-xs-2">{{actionInfo.createdTime}}</td>
+            <td class="col-xs-2">{{actionInfo.nominalTime}}</td>
+            <td class="col-xs-2">{{actionInfo.lastModifiedTime}}</td>
+          </tr>
+          {{/each}}
+        </tbody>
+      </table>
+    </div>
   </div>
   <div role="tabpanel" class="tab-pane" id="jobDefinition">
     <div class="panel panel-default">
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/distcp-action.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/distcp-action.hbs
index 28e67da..a656413 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/distcp-action.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/distcp-action.hbs
@@ -18,7 +18,12 @@
 <div class="panel panel-default">
   <div class="panel-heading">General</div>
   <div class="panel-body">
-    {{#arg-config args=actionModel.args register="register" title="arg"}}{{/arg-config}}
+    <div class="form-group">
+      <label for="distcp-command" class="control-label col-xs-2">Distcp Arguments</label>
+      <div class="col-xs-7">
+        {{textarea class="form-control query-text-area" value=distcpCommand name="distcp-command" placeholder="Example: -update hdfs://nn1:9820/source/first hdfs://nn1:9820/source/second hdfs://nn2:9820/target"}}
+      </div>
+    </div>
     <div class="form-group">
       <label for="inputPassword" class="control-label col-xs-2">Java opts</label>
       <div class="col-xs-7">
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/drafts-wf.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/drafts-wf.hbs
index ac28de38..f66b3ad 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/drafts-wf.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/drafts-wf.hbs
@@ -114,7 +114,9 @@
       </div>
       <div class="modal-body">
          {{#unless deleteMsg}}
-           <label>Do you want to delete the draft?</label>
+           <label>Do you want to remove this item from the history?</label>
+           <br/>
+           <div class="note-info">NOTE : This file will still be accessible under {{currentDraft.workflowDefinitionPath}}</div>
          {{/unless}}
          {{#if true}}
            {{spin-spinner lines=7 length=3 width=3 radius=3 top=-10 left=150}}
@@ -122,10 +124,10 @@
          <div><label>{{deleteMsg}}</label></div>
       </div>
       <div class="modal-footer">
+        <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
         {{#unless deleteMsg}}
           <button type="button" class="btn btn-primary" {{action "deleteWorkflow"}}>Delete</button>
         {{/unless}}
-        <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
       </div>
     </div>
   </div>
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/flow-designer.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/flow-designer.hbs
index 429e874..8c07d6d 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/flow-designer.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/flow-designer.hbs
@@ -279,7 +279,7 @@
   {{#global-config closeGlobalConfig="closeWorkflowGlobalProps" saveGlobalConfig="saveGlobalConfig" actionModel=globalConfig}}{{/global-config}}
 {{/if}}
 {{#if showingFileBrowser}}
-  {{hdfs-browser closeFileBrowser="closeFileBrowser" selectFileCallback=selectFileCallback filePath=workflowFilePath}}
+  {{hdfs-browser closeFileBrowser="closeFileBrowser" selectFileCallback=selectFileCallback filePath=workflowFilePath hideUpload=true}}
 {{/if}}
 {{#if showingActionSettingsFileBrowser}}
   {{hdfs-browser closeFileBrowser="closeActionSettingsFileBrowser" selectFileCallback=selectFileCallback filePath=actionSettingsFilePath}}
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/hdfs-browser.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/hdfs-browser.hbs
index 44f25c8..5779e36 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/hdfs-browser.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/hdfs-browser.hbs
@@ -25,30 +25,32 @@
           </div>
       </div>
       <div class="panel-default panel-files">
-        <div class="panel-heading">
-          <div class="row">
-             <div class="col-xs-12">
-                <div class="pull-right">
-                  <!-- <span class="">
-                      <button type="button" class="btn btn-default" {{action "createFolder"}} disabled={{isFilePathInvalid}}>Create Folder</button>
-                  </span> -->
-                  {{#unless uploadSelected}}
-                    <span>
-                      <button type="button" class="btn btn-default" {{action "uploadSelect"}} disabled={{isFilePathInvalid}}>Upload File</button>
-                    </span>
-                  {{/unless}}
-                  {{#if uploadSelected}}
-                  <span class="">
-                      <span class="file-upload-control">
-                        {{file-upload url="/upload" selectedPath=selectedPath uploadFailure="uploadFailure" uploadSuccess="uploadSuccess" uploadValidation="uploadValidation"}}
+        {{#unless hideUpload}}
+          <div class="panel-heading">
+            <div class="row">
+               <div class="col-xs-12">
+                  <div class="pull-right">
+                    <!-- <span class="">
+                        <button type="button" class="btn btn-default" {{action "createFolder"}} disabled={{isFilePathInvalid}}>Create Folder</button>
+                    </span> -->
+                    {{#unless uploadSelected}}
+                      <span>
+                        <button type="button" class="btn btn-default" {{action "uploadSelect"}} disabled={{isFilePathInvalid}}>Upload File</button>
                       </span>
-                      <button type="button" class="close-icon" {{action "closeUpload"}}>x</button>
-                  </span>
-                  {{/if}}
+                    {{/unless}}
+                    {{#if uploadSelected}}
+                    <span class="">
+                        <span class="file-upload-control">
+                          {{file-upload url="/upload" selectedPath=selectedPath uploadFailure="uploadFailure" uploadSuccess="uploadSuccess" uploadValidation="uploadValidation"}}
+                        </span>
+                        <button type="button" class="close-icon" {{action "closeUpload"}}>x</button>
+                    </span>
+                    {{/if}}
+                  </div>
                 </div>
-              </div>
+            </div>
           </div>
-        </div>
+        {{/unless}}
         <div class="panel-body">
           <div>
             {{#if showUploadSuccess}}
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/job-row.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/job-row.hbs
index 4c18d5d..8ce4886 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/job-row.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/job-row.hbs
@@ -15,7 +15,7 @@
 * See the License for the specific language governing permissions and
 * limitations under the License.
 }}
-<td class="no-sort first-col">
+<!--td class="no-sort first-col">
   <input id="{{job.id}}" name="{{job.appName}}"  data-status ="{{job.status}}" class="cbox" type="checkbox" value="" {{action 'rowSelected' job.status on="change"}}>
   {{#if job.parentId}}
      {{#link-to 'design.jobtab' (query-params id=targetParentId jobType="coords")}}
@@ -25,7 +25,7 @@
      <a class="padding15pcnt" href="javascript:void(0);"></a>
   {{/if}}
 
-</td>
+</td-->
 <td {{action 'showJobDetails' job.id}} class="pointer action-link"> {{job.appName}}
 </td>
 <td class="{{job.status}}"> {{job.status}} </td>
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/search-create-new-bar.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/search-create-new-bar.hbs
index b9c6029..8bdc768 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/search-create-new-bar.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/search-create-new-bar.hbs
@@ -38,6 +38,10 @@
     <input type='text' class="form-control" value={{endDate}} id='endDate' title="Nominal End Date" placeholder="Nominal End Date" {{action 'onClear' 'end' on="change"}}/>
   </div>
   <div class="form-group">
+    <span title="Clear Filters" {{action 'clearFilters'}} class="fa fa-close fa-1 pointer btn btn-default" aria-hidden="true"></span>
+  </div>
+  <div class="form-group">
     <span class="fa fa-refresh fa-1 pointer btn btn-default" title="Refresh" {{action 'refresh'}} aria-hidden="true"></span>
   </div>
 </div>
+
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/search-table.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/search-table.hbs
index 7b5a625..86ab5a5 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/search-table.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/search-table.hbs
@@ -28,7 +28,7 @@
 <table id="search-table" class="table search-table listing table-striped table-hover table-bordered" cellspacing="0" width="100%">
   <thead>
     <tr>
-      <th></th>
+      <!--th></th-->
       <th>Name</th>
       <th>Status</th>
       <th>User</th>
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/workflow-job-details.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/workflow-job-details.hbs
index 0c5257e..9c940a2 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/workflow-job-details.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/workflow-job-details.hbs
@@ -203,6 +203,12 @@
               <div class="cy-note"><div class="pull-right">Click on node to get details</div></div>
             {{/if}}
             <div id="cy" class="cy-panel"></div>
+            <div class="overlay-transition-content">
+              <div class="decision-condition-label">
+                <div class="decision-condition-header">Condition</div>
+                <div class="decision-condition-body"></div>
+              </div>
+            </div>
           </div>
           <div class="col-xs-4">
             {{#if model.nodeName}}
diff --git a/contrib/views/wfmanager/src/main/resources/ui/bower.json b/contrib/views/wfmanager/src/main/resources/ui/bower.json
index 7e1709b..75ac7a7 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/bower.json
+++ b/contrib/views/wfmanager/src/main/resources/ui/bower.json
@@ -7,7 +7,6 @@
     "ember-qunit-notifications": "0.1.0",
     "bootstrap": "~3.3.6",
     "font-awesome": "fontawesome#~4.5.0",
-    "jsPlumb": "2.0.7",
     "jquery-ui": "~1.11.4",
     "dagre": "~0.7.4",
     "x2js": "~1.2.0",
diff --git a/contrib/views/wfmanager/src/main/resources/ui/ember-cli-build.js b/contrib/views/wfmanager/src/main/resources/ui/ember-cli-build.js
index 0979c39..c673c59 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/ember-cli-build.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/ember-cli-build.js
@@ -47,7 +47,7 @@
     app.import('bower_components/bootstrap/dist/css/bootstrap.css');
     app.import('bower_components/bootstrap/dist/js/bootstrap.js');
 
-    app.import('bower_components/jsPlumb/dist/js/jsPlumb-2.0.7.js');
+    
 
     app.import('bower_components/bootstrap/dist/fonts/glyphicons-halflings-regular.woff', {
         destDir: 'fonts'
diff --git a/contrib/views/wfmanager/src/main/resources/ui/yarn.lock b/contrib/views/wfmanager/src/main/resources/ui/yarn.lock
new file mode 100644
index 0000000..e9ad6cc
--- /dev/null
+++ b/contrib/views/wfmanager/src/main/resources/ui/yarn.lock
@@ -0,0 +1,5629 @@
+# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
+# yarn lockfile v1
+
+
+abbrev@1:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.0.tgz#d0554c2256636e2f56e7c2e5ad183f859428d81f"
+
+abbrev@~1.0.7:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.0.9.tgz#91b4792588a7738c25f35dd6f63752a2f8776135"
+
+accepts@1.3.3, accepts@~1.3.3:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.3.tgz#c3ca7434938648c3e0d9c1e328dd68b622c284ca"
+  dependencies:
+    mime-types "~2.1.11"
+    negotiator "0.6.1"
+
+acorn@^4.0.3:
+  version "4.0.11"
+  resolved "https://registry.yarnpkg.com/acorn/-/acorn-4.0.11.tgz#edcda3bd937e7556410d42ed5860f67399c794c0"
+
+after@0.8.1:
+  version "0.8.1"
+  resolved "https://registry.yarnpkg.com/after/-/after-0.8.1.tgz#ab5d4fb883f596816d3515f8f791c0af486dd627"
+
+ajv@^4.9.1:
+  version "4.11.8"
+  resolved "https://registry.yarnpkg.com/ajv/-/ajv-4.11.8.tgz#82ffb02b29e662ae53bdc20af15947706739c536"
+  dependencies:
+    co "^4.6.0"
+    json-stable-stringify "^1.0.1"
+
+align-text@^0.1.1, align-text@^0.1.3:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/align-text/-/align-text-0.1.4.tgz#0cd90a561093f35d0a99256c22b7069433fad117"
+  dependencies:
+    kind-of "^3.0.2"
+    longest "^1.0.1"
+    repeat-string "^1.5.2"
+
+alter@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/alter/-/alter-0.2.0.tgz#c7588808617572034aae62480af26b1d4d1cb3cd"
+  dependencies:
+    stable "~0.1.3"
+
+amd-name-resolver@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/amd-name-resolver/-/amd-name-resolver-0.0.2.tgz#7bee4e112aabeecc2e14429c4ca750c55d8e5ecd"
+
+amd-name-resolver@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/amd-name-resolver/-/amd-name-resolver-0.0.5.tgz#76962dac876ed3311b05d29c6a58c14e1ef3304b"
+  dependencies:
+    ensure-posix-path "^1.0.1"
+
+amd-name-resolver@0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/amd-name-resolver/-/amd-name-resolver-0.0.6.tgz#d3e4ba2dfcaab1d820c1be9de947c67828cfe595"
+  dependencies:
+    ensure-posix-path "^1.0.1"
+
+amdefine@>=0.0.4:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5"
+
+ansi-regex@*, ansi-regex@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df"
+
+ansi-regex@^0.2.0, ansi-regex@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-0.2.1.tgz#0d8e946967a3d8143f93e24e298525fc1b2235f9"
+
+ansi-regex@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-1.1.1.tgz#41c847194646375e6a1a5d10c3ca054ef9fc980d"
+
+ansi-styles@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-1.1.0.tgz#eaecbf66cd706882760b2f4691582b8f55d7a7de"
+
+ansi-styles@^2.1.0, ansi-styles@^2.2.1:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe"
+
+ansi-styles@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-1.0.0.tgz#cb102df1c56f5123eab8b67cd7b98027a0279178"
+
+ansi@^0.3.0, ansi@~0.3.0, ansi@~0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/ansi/-/ansi-0.3.1.tgz#0c42d4fb17160d5a9af1e484bace1c66922c1b21"
+
+ansicolors@~0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/ansicolors/-/ansicolors-0.2.1.tgz#be089599097b74a5c9c4a84a0cdbcdb62bd87aef"
+
+ansicolors@~0.3.2:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/ansicolors/-/ansicolors-0.3.2.tgz#665597de86a9ffe3aa9bfbe6cae5c6ea426b4979"
+
+ansistyles@~0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/ansistyles/-/ansistyles-0.1.3.tgz#5de60415bda071bb37127854c864f41b23254539"
+
+anymatch@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-1.3.0.tgz#a3e52fa39168c825ff57b0248126ce5a8ff95507"
+  dependencies:
+    arrify "^1.0.0"
+    micromatch "^2.1.5"
+
+aproba@^1.0.3:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.1.1.tgz#95d3600f07710aa0e9298c726ad5ecf2eacbabab"
+
+archy@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/archy/-/archy-1.0.0.tgz#f9c8c13757cc1dd7bc379ac77b2c62a5c2868c40"
+
+are-we-there-yet@~1.0.0:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.0.6.tgz#a2d28c93102aa6cc96245a26cb954de06ec53f0c"
+  dependencies:
+    delegates "^1.0.0"
+    readable-stream "^2.0.0 || ^1.1.13"
+
+are-we-there-yet@~1.1.2:
+  version "1.1.4"
+  resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.4.tgz#bb5dca382bb94f05e15194373d16fd3ba1ca110d"
+  dependencies:
+    delegates "^1.0.0"
+    readable-stream "^2.0.6"
+
+argparse@^1.0.7, argparse@~1.0.2:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.9.tgz#73d83bc263f86e97f8cc4f6bae1b0e90a7d22c86"
+  dependencies:
+    sprintf-js "~1.0.2"
+
+arr-diff@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-2.0.0.tgz#8f3b827f955a8bd669697e4a4256ac3ceae356cf"
+  dependencies:
+    arr-flatten "^1.0.1"
+
+arr-flatten@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.0.3.tgz#a274ed85ac08849b6bd7847c4580745dc51adfb1"
+
+array-equal@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/array-equal/-/array-equal-1.0.0.tgz#8c2a5ef2472fd9ea742b04c77a75093ba2757c93"
+
+array-flatten@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2"
+
+array-index@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/array-index/-/array-index-1.0.0.tgz#ec56a749ee103e4e08c790b9c353df16055b97f9"
+  dependencies:
+    debug "^2.2.0"
+    es6-symbol "^3.0.2"
+
+array-to-error@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/array-to-error/-/array-to-error-1.1.1.tgz#d68812926d14097a205579a667eeaf1856a44c07"
+  dependencies:
+    array-to-sentence "^1.1.0"
+
+array-to-sentence@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/array-to-sentence/-/array-to-sentence-1.1.0.tgz#c804956dafa53232495b205a9452753a258d39fc"
+
+array-unique@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.2.1.tgz#a1d97ccafcbc2625cc70fadceb36a50c58b01a53"
+
+arraybuffer.slice@0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/arraybuffer.slice/-/arraybuffer.slice-0.0.6.tgz#f33b2159f0532a3f3107a272c0ccfbd1ad2979ca"
+
+arrify@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d"
+
+asap@^2.0.0, asap@~2.0.3:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/asap/-/asap-2.0.5.tgz#522765b50c3510490e52d7dcfe085ef9ba96958f"
+
+asn1@0.1.11:
+  version "0.1.11"
+  resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.1.11.tgz#559be18376d08a4ec4dbe80877d27818639b2df7"
+
+asn1@~0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.3.tgz#dac8787713c9966849fc8180777ebe9c1ddf3b86"
+
+assert-plus@1.0.0, assert-plus@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525"
+
+assert-plus@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.1.5.tgz#ee74009413002d84cec7219c6ac811812e723160"
+
+assert-plus@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.2.0.tgz#d74e1b87e7affc0db8aadb7021f3fe48101ab234"
+
+ast-traverse@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/ast-traverse/-/ast-traverse-0.1.1.tgz#69cf2b8386f19dcda1bb1e05d68fe359d8897de6"
+
+ast-types@0.8.12:
+  version "0.8.12"
+  resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.8.12.tgz#a0d90e4351bb887716c83fd637ebf818af4adfcc"
+
+ast-types@0.9.6:
+  version "0.9.6"
+  resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.9.6.tgz#102c9e9e9005d3e7e3829bf0c4fa24ee862ee9b9"
+
+async-disk-cache@^1.2.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/async-disk-cache/-/async-disk-cache-1.3.1.tgz#3394010d9448b16205b01e0e2e704180805413d3"
+  dependencies:
+    debug "^2.1.3"
+    heimdalljs "^0.2.3"
+    istextorbinary "2.1.0"
+    mkdirp "^0.5.0"
+    rimraf "^2.5.3"
+    rsvp "^3.0.18"
+
+async-some@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/async-some/-/async-some-1.0.2.tgz#4d8a81620d5958791b5b98f802d3207776e95509"
+  dependencies:
+    dezalgo "^1.0.2"
+
+async@0.9.0:
+  version "0.9.0"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.9.0.tgz#ac3613b1da9bed1b47510bb4651b8931e47146c7"
+
+async@^1.4.0:
+  version "1.5.2"
+  resolved "https://registry.yarnpkg.com/async/-/async-1.5.2.tgz#ec6a61ae56480c0c3cb241c95618e20892f9672a"
+
+async@^2.0.1:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/async/-/async-2.3.0.tgz#1013d1051047dd320fe24e494d5c66ecaf6147d9"
+  dependencies:
+    lodash "^4.14.0"
+
+async@~0.2.9:
+  version "0.2.10"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.2.10.tgz#b6bbe0b0674b9d719708ca38de8c237cb526c3d1"
+
+async@~0.8.0:
+  version "0.8.0"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.8.0.tgz#ee65ec77298c2ff1456bc4418a052d0f06435112"
+
+asynckit@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79"
+
+aws-sign2@~0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.6.0.tgz#14342dd38dbcc94d0e5b87d763cd63612c0e794f"
+
+aws4@^1.2.1:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.6.0.tgz#83ef5ca860b2b32e4a0deedee8c771b9db57471e"
+
+babel-code-frame@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-code-frame/-/babel-code-frame-6.22.0.tgz#027620bee567a88c32561574e7fd0801d33118e4"
+  dependencies:
+    chalk "^1.1.0"
+    esutils "^2.0.2"
+    js-tokens "^3.0.0"
+
+babel-core@^5.0.0:
+  version "5.8.38"
+  resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-5.8.38.tgz#1fcaee79d7e61b750b00b8e54f6dfc9d0af86558"
+  dependencies:
+    babel-plugin-constant-folding "^1.0.1"
+    babel-plugin-dead-code-elimination "^1.0.2"
+    babel-plugin-eval "^1.0.1"
+    babel-plugin-inline-environment-variables "^1.0.1"
+    babel-plugin-jscript "^1.0.4"
+    babel-plugin-member-expression-literals "^1.0.1"
+    babel-plugin-property-literals "^1.0.1"
+    babel-plugin-proto-to-assign "^1.0.3"
+    babel-plugin-react-constant-elements "^1.0.3"
+    babel-plugin-react-display-name "^1.0.3"
+    babel-plugin-remove-console "^1.0.1"
+    babel-plugin-remove-debugger "^1.0.1"
+    babel-plugin-runtime "^1.0.7"
+    babel-plugin-undeclared-variables-check "^1.0.2"
+    babel-plugin-undefined-to-void "^1.1.6"
+    babylon "^5.8.38"
+    bluebird "^2.9.33"
+    chalk "^1.0.0"
+    convert-source-map "^1.1.0"
+    core-js "^1.0.0"
+    debug "^2.1.1"
+    detect-indent "^3.0.0"
+    esutils "^2.0.0"
+    fs-readdir-recursive "^0.1.0"
+    globals "^6.4.0"
+    home-or-tmp "^1.0.0"
+    is-integer "^1.0.4"
+    js-tokens "1.0.1"
+    json5 "^0.4.0"
+    lodash "^3.10.0"
+    minimatch "^2.0.3"
+    output-file-sync "^1.1.0"
+    path-exists "^1.0.0"
+    path-is-absolute "^1.0.0"
+    private "^0.1.6"
+    regenerator "0.8.40"
+    regexpu "^1.3.0"
+    repeating "^1.1.2"
+    resolve "^1.1.6"
+    shebang-regex "^1.0.0"
+    slash "^1.0.0"
+    source-map "^0.5.0"
+    source-map-support "^0.2.10"
+    to-fast-properties "^1.0.0"
+    trim-right "^1.0.0"
+    try-resolve "^1.0.0"
+
+babel-core@^6.14.0, babel-core@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-6.24.1.tgz#8c428564dce1e1f41fb337ec34f4c3b022b5ad83"
+  dependencies:
+    babel-code-frame "^6.22.0"
+    babel-generator "^6.24.1"
+    babel-helpers "^6.24.1"
+    babel-messages "^6.23.0"
+    babel-register "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+    babylon "^6.11.0"
+    convert-source-map "^1.1.0"
+    debug "^2.1.1"
+    json5 "^0.5.0"
+    lodash "^4.2.0"
+    minimatch "^3.0.2"
+    path-is-absolute "^1.0.0"
+    private "^0.1.6"
+    slash "^1.0.0"
+    source-map "^0.5.0"
+
+babel-generator@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-generator/-/babel-generator-6.24.1.tgz#e715f486c58ded25649d888944d52aa07c5d9497"
+  dependencies:
+    babel-messages "^6.23.0"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+    detect-indent "^4.0.0"
+    jsesc "^1.3.0"
+    lodash "^4.2.0"
+    source-map "^0.5.0"
+    trim-right "^1.0.1"
+
+babel-helper-builder-binary-assignment-operator-visitor@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-builder-binary-assignment-operator-visitor/-/babel-helper-builder-binary-assignment-operator-visitor-6.24.1.tgz#cce4517ada356f4220bcae8a02c2b346f9a56664"
+  dependencies:
+    babel-helper-explode-assignable-expression "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-helper-call-delegate@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-call-delegate/-/babel-helper-call-delegate-6.24.1.tgz#ece6aacddc76e41c3461f88bfc575bd0daa2df8d"
+  dependencies:
+    babel-helper-hoist-variables "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-helper-define-map@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-define-map/-/babel-helper-define-map-6.24.1.tgz#7a9747f258d8947d32d515f6aa1c7bd02204a080"
+  dependencies:
+    babel-helper-function-name "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+    lodash "^4.2.0"
+
+babel-helper-explode-assignable-expression@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-explode-assignable-expression/-/babel-helper-explode-assignable-expression-6.24.1.tgz#f25b82cf7dc10433c55f70592d5746400ac22caa"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-helper-function-name@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-function-name/-/babel-helper-function-name-6.24.1.tgz#d3475b8c03ed98242a25b48351ab18399d3580a9"
+  dependencies:
+    babel-helper-get-function-arity "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-helper-get-function-arity@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-get-function-arity/-/babel-helper-get-function-arity-6.24.1.tgz#8f7782aa93407c41d3aa50908f89b031b1b6853d"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-helper-hoist-variables@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-hoist-variables/-/babel-helper-hoist-variables-6.24.1.tgz#1ecb27689c9d25513eadbc9914a73f5408be7a76"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-helper-optimise-call-expression@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-optimise-call-expression/-/babel-helper-optimise-call-expression-6.24.1.tgz#f7a13427ba9f73f8f4fa993c54a97882d1244257"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-helper-regex@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-regex/-/babel-helper-regex-6.24.1.tgz#d36e22fab1008d79d88648e32116868128456ce8"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+    lodash "^4.2.0"
+
+babel-helper-remap-async-to-generator@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-remap-async-to-generator/-/babel-helper-remap-async-to-generator-6.24.1.tgz#5ec581827ad723fecdd381f1c928390676e4551b"
+  dependencies:
+    babel-helper-function-name "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-helper-replace-supers@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helper-replace-supers/-/babel-helper-replace-supers-6.24.1.tgz#bf6dbfe43938d17369a213ca8a8bf74b6a90ab1a"
+  dependencies:
+    babel-helper-optimise-call-expression "^6.24.1"
+    babel-messages "^6.23.0"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-helpers@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-helpers/-/babel-helpers-6.24.1.tgz#3471de9caec388e5c850e597e58a26ddf37602b2"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+
+babel-messages@^6.23.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-messages/-/babel-messages-6.23.0.tgz#f3cdf4703858035b2a2951c6ec5edf6c62f2630e"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-check-es2015-constants@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-check-es2015-constants/-/babel-plugin-check-es2015-constants-6.22.0.tgz#35157b101426fd2ffd3da3f75c7d1e91835bbf8a"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-constant-folding@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-constant-folding/-/babel-plugin-constant-folding-1.0.1.tgz#8361d364c98e449c3692bdba51eff0844290aa8e"
+
+babel-plugin-dead-code-elimination@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/babel-plugin-dead-code-elimination/-/babel-plugin-dead-code-elimination-1.0.2.tgz#5f7c451274dcd7cccdbfbb3e0b85dd28121f0f65"
+
+babel-plugin-debug-macros@^0.1.6:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/babel-plugin-debug-macros/-/babel-plugin-debug-macros-0.1.7.tgz#69f5a3dc7d72f781354f18c611a3b007bb223511"
+  dependencies:
+    semver "^5.3.0"
+
+babel-plugin-eval@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-eval/-/babel-plugin-eval-1.0.1.tgz#a2faed25ce6be69ade4bfec263f70169195950da"
+
+babel-plugin-feature-flags@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-feature-flags/-/babel-plugin-feature-flags-0.3.1.tgz#9c827cf9a4eb9a19f725ccb239e85cab02036fc1"
+
+babel-plugin-filter-imports@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-filter-imports/-/babel-plugin-filter-imports-0.3.1.tgz#e7859b56886b175dd2616425d277b219e209ea8b"
+
+babel-plugin-htmlbars-inline-precompile@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-htmlbars-inline-precompile/-/babel-plugin-htmlbars-inline-precompile-0.1.0.tgz#b784723bd1f108796b56faf9f1c05eb5ca442983"
+
+babel-plugin-inline-environment-variables@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-inline-environment-variables/-/babel-plugin-inline-environment-variables-1.0.1.tgz#1f58ce91207ad6a826a8bf645fafe68ff5fe3ffe"
+
+babel-plugin-jscript@^1.0.4:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/babel-plugin-jscript/-/babel-plugin-jscript-1.0.4.tgz#8f342c38276e87a47d5fa0a8bd3d5eb6ccad8fcc"
+
+babel-plugin-member-expression-literals@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-member-expression-literals/-/babel-plugin-member-expression-literals-1.0.1.tgz#cc5edb0faa8dc927170e74d6d1c02440021624d3"
+
+babel-plugin-property-literals@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-property-literals/-/babel-plugin-property-literals-1.0.1.tgz#0252301900192980b1c118efea48ce93aab83336"
+
+babel-plugin-proto-to-assign@^1.0.3:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/babel-plugin-proto-to-assign/-/babel-plugin-proto-to-assign-1.0.4.tgz#c49e7afd02f577bc4da05ea2df002250cf7cd123"
+  dependencies:
+    lodash "^3.9.3"
+
+babel-plugin-react-constant-elements@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/babel-plugin-react-constant-elements/-/babel-plugin-react-constant-elements-1.0.3.tgz#946736e8378429cbc349dcff62f51c143b34e35a"
+
+babel-plugin-react-display-name@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/babel-plugin-react-display-name/-/babel-plugin-react-display-name-1.0.3.tgz#754fe38926e8424a4e7b15ab6ea6139dee0514fc"
+
+babel-plugin-remove-console@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-remove-console/-/babel-plugin-remove-console-1.0.1.tgz#d8f24556c3a05005d42aaaafd27787f53ff013a7"
+
+babel-plugin-remove-debugger@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-remove-debugger/-/babel-plugin-remove-debugger-1.0.1.tgz#fd2ea3cd61a428ad1f3b9c89882ff4293e8c14c7"
+
+babel-plugin-runtime@^1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/babel-plugin-runtime/-/babel-plugin-runtime-1.0.7.tgz#bf7c7d966dd56ecd5c17fa1cb253c9acb7e54aaf"
+
+babel-plugin-syntax-async-functions@^6.8.0:
+  version "6.13.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-syntax-async-functions/-/babel-plugin-syntax-async-functions-6.13.0.tgz#cad9cad1191b5ad634bf30ae0872391e0647be95"
+
+babel-plugin-syntax-exponentiation-operator@^6.8.0:
+  version "6.13.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-syntax-exponentiation-operator/-/babel-plugin-syntax-exponentiation-operator-6.13.0.tgz#9ee7e8337290da95288201a6a57f4170317830de"
+
+babel-plugin-syntax-trailing-function-commas@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-syntax-trailing-function-commas/-/babel-plugin-syntax-trailing-function-commas-6.22.0.tgz#ba0360937f8d06e40180a43fe0d5616fff532cf3"
+
+babel-plugin-transform-async-to-generator@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-async-to-generator/-/babel-plugin-transform-async-to-generator-6.24.1.tgz#6536e378aff6cb1d5517ac0e40eb3e9fc8d08761"
+  dependencies:
+    babel-helper-remap-async-to-generator "^6.24.1"
+    babel-plugin-syntax-async-functions "^6.8.0"
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-arrow-functions@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-arrow-functions/-/babel-plugin-transform-es2015-arrow-functions-6.22.0.tgz#452692cb711d5f79dc7f85e440ce41b9f244d221"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-block-scoped-functions@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-block-scoped-functions/-/babel-plugin-transform-es2015-block-scoped-functions-6.22.0.tgz#bbc51b49f964d70cb8d8e0b94e820246ce3a6141"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-block-scoping@^6.23.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-block-scoping/-/babel-plugin-transform-es2015-block-scoping-6.24.1.tgz#76c295dc3a4741b1665adfd3167215dcff32a576"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+    lodash "^4.2.0"
+
+babel-plugin-transform-es2015-classes@^6.23.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-classes/-/babel-plugin-transform-es2015-classes-6.24.1.tgz#5a4c58a50c9c9461e564b4b2a3bfabc97a2584db"
+  dependencies:
+    babel-helper-define-map "^6.24.1"
+    babel-helper-function-name "^6.24.1"
+    babel-helper-optimise-call-expression "^6.24.1"
+    babel-helper-replace-supers "^6.24.1"
+    babel-messages "^6.23.0"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-computed-properties@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-computed-properties/-/babel-plugin-transform-es2015-computed-properties-6.24.1.tgz#6fe2a8d16895d5634f4cd999b6d3480a308159b3"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+
+babel-plugin-transform-es2015-destructuring@^6.23.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-destructuring/-/babel-plugin-transform-es2015-destructuring-6.23.0.tgz#997bb1f1ab967f682d2b0876fe358d60e765c56d"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-duplicate-keys@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-duplicate-keys/-/babel-plugin-transform-es2015-duplicate-keys-6.24.1.tgz#73eb3d310ca969e3ef9ec91c53741a6f1576423e"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-for-of@^6.23.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-for-of/-/babel-plugin-transform-es2015-for-of-6.23.0.tgz#f47c95b2b613df1d3ecc2fdb7573623c75248691"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-function-name@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-function-name/-/babel-plugin-transform-es2015-function-name-6.24.1.tgz#834c89853bc36b1af0f3a4c5dbaa94fd8eacaa8b"
+  dependencies:
+    babel-helper-function-name "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-literals@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-literals/-/babel-plugin-transform-es2015-literals-6.22.0.tgz#4f54a02d6cd66cf915280019a31d31925377ca2e"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-modules-amd@^6.22.0, babel-plugin-transform-es2015-modules-amd@^6.24.0, babel-plugin-transform-es2015-modules-amd@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-amd/-/babel-plugin-transform-es2015-modules-amd-6.24.1.tgz#3b3e54017239842d6d19c3011c4bd2f00a00d154"
+  dependencies:
+    babel-plugin-transform-es2015-modules-commonjs "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+
+babel-plugin-transform-es2015-modules-commonjs@^6.23.0, babel-plugin-transform-es2015-modules-commonjs@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-commonjs/-/babel-plugin-transform-es2015-modules-commonjs-6.24.1.tgz#d3e310b40ef664a36622200097c6d440298f2bfe"
+  dependencies:
+    babel-plugin-transform-strict-mode "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-modules-systemjs@^6.23.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-systemjs/-/babel-plugin-transform-es2015-modules-systemjs-6.24.1.tgz#ff89a142b9119a906195f5f106ecf305d9407d23"
+  dependencies:
+    babel-helper-hoist-variables "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+
+babel-plugin-transform-es2015-modules-umd@^6.23.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-umd/-/babel-plugin-transform-es2015-modules-umd-6.24.1.tgz#ac997e6285cd18ed6176adb607d602344ad38468"
+  dependencies:
+    babel-plugin-transform-es2015-modules-amd "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+
+babel-plugin-transform-es2015-object-super@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-object-super/-/babel-plugin-transform-es2015-object-super-6.24.1.tgz#24cef69ae21cb83a7f8603dad021f572eb278f8d"
+  dependencies:
+    babel-helper-replace-supers "^6.24.1"
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-parameters@^6.23.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-parameters/-/babel-plugin-transform-es2015-parameters-6.24.1.tgz#57ac351ab49caf14a97cd13b09f66fdf0a625f2b"
+  dependencies:
+    babel-helper-call-delegate "^6.24.1"
+    babel-helper-get-function-arity "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-template "^6.24.1"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-shorthand-properties@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-shorthand-properties/-/babel-plugin-transform-es2015-shorthand-properties-6.24.1.tgz#24f875d6721c87661bbd99a4622e51f14de38aa0"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-spread@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-spread/-/babel-plugin-transform-es2015-spread-6.22.0.tgz#d6d68a99f89aedc4536c81a542e8dd9f1746f8d1"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-sticky-regex@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-sticky-regex/-/babel-plugin-transform-es2015-sticky-regex-6.24.1.tgz#00c1cdb1aca71112cdf0cf6126c2ed6b457ccdbc"
+  dependencies:
+    babel-helper-regex "^6.24.1"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-plugin-transform-es2015-template-literals@^6.22.0:
+  version "6.22.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-template-literals/-/babel-plugin-transform-es2015-template-literals-6.22.0.tgz#a84b3450f7e9f8f1f6839d6d687da84bb1236d8d"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-typeof-symbol@^6.23.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-typeof-symbol/-/babel-plugin-transform-es2015-typeof-symbol-6.23.0.tgz#dec09f1cddff94b52ac73d505c84df59dcceb372"
+  dependencies:
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-es2015-unicode-regex@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-unicode-regex/-/babel-plugin-transform-es2015-unicode-regex-6.24.1.tgz#d38b12f42ea7323f729387f18a7c5ae1faeb35e9"
+  dependencies:
+    babel-helper-regex "^6.24.1"
+    babel-runtime "^6.22.0"
+    regexpu-core "^2.0.0"
+
+babel-plugin-transform-exponentiation-operator@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-exponentiation-operator/-/babel-plugin-transform-exponentiation-operator-6.24.1.tgz#2ab0c9c7f3098fa48907772bb813fe41e8de3a0e"
+  dependencies:
+    babel-helper-builder-binary-assignment-operator-visitor "^6.24.1"
+    babel-plugin-syntax-exponentiation-operator "^6.8.0"
+    babel-runtime "^6.22.0"
+
+babel-plugin-transform-regenerator@^6.22.0:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-regenerator/-/babel-plugin-transform-regenerator-6.24.1.tgz#b8da305ad43c3c99b4848e4fe4037b770d23c418"
+  dependencies:
+    regenerator-transform "0.9.11"
+
+babel-plugin-transform-strict-mode@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-transform-strict-mode/-/babel-plugin-transform-strict-mode-6.24.1.tgz#d5faf7aa578a65bbe591cf5edae04a0c67020758"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+
+babel-plugin-undeclared-variables-check@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/babel-plugin-undeclared-variables-check/-/babel-plugin-undeclared-variables-check-1.0.2.tgz#5cf1aa539d813ff64e99641290af620965f65dee"
+  dependencies:
+    leven "^1.0.2"
+
+babel-plugin-undefined-to-void@^1.1.6:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/babel-plugin-undefined-to-void/-/babel-plugin-undefined-to-void-1.1.6.tgz#7f578ef8b78dfae6003385d8417a61eda06e2f81"
+
+babel-polyfill@^6.16.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-polyfill/-/babel-polyfill-6.23.0.tgz#8364ca62df8eafb830499f699177466c3b03499d"
+  dependencies:
+    babel-runtime "^6.22.0"
+    core-js "^2.4.0"
+    regenerator-runtime "^0.10.0"
+
+babel-preset-env@^1.2.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/babel-preset-env/-/babel-preset-env-1.4.0.tgz#c8e02a3bcc7792f23cded68e0355b9d4c28f0f7a"
+  dependencies:
+    babel-plugin-check-es2015-constants "^6.22.0"
+    babel-plugin-syntax-trailing-function-commas "^6.22.0"
+    babel-plugin-transform-async-to-generator "^6.22.0"
+    babel-plugin-transform-es2015-arrow-functions "^6.22.0"
+    babel-plugin-transform-es2015-block-scoped-functions "^6.22.0"
+    babel-plugin-transform-es2015-block-scoping "^6.23.0"
+    babel-plugin-transform-es2015-classes "^6.23.0"
+    babel-plugin-transform-es2015-computed-properties "^6.22.0"
+    babel-plugin-transform-es2015-destructuring "^6.23.0"
+    babel-plugin-transform-es2015-duplicate-keys "^6.22.0"
+    babel-plugin-transform-es2015-for-of "^6.23.0"
+    babel-plugin-transform-es2015-function-name "^6.22.0"
+    babel-plugin-transform-es2015-literals "^6.22.0"
+    babel-plugin-transform-es2015-modules-amd "^6.22.0"
+    babel-plugin-transform-es2015-modules-commonjs "^6.23.0"
+    babel-plugin-transform-es2015-modules-systemjs "^6.23.0"
+    babel-plugin-transform-es2015-modules-umd "^6.23.0"
+    babel-plugin-transform-es2015-object-super "^6.22.0"
+    babel-plugin-transform-es2015-parameters "^6.23.0"
+    babel-plugin-transform-es2015-shorthand-properties "^6.22.0"
+    babel-plugin-transform-es2015-spread "^6.22.0"
+    babel-plugin-transform-es2015-sticky-regex "^6.22.0"
+    babel-plugin-transform-es2015-template-literals "^6.22.0"
+    babel-plugin-transform-es2015-typeof-symbol "^6.23.0"
+    babel-plugin-transform-es2015-unicode-regex "^6.22.0"
+    babel-plugin-transform-exponentiation-operator "^6.22.0"
+    babel-plugin-transform-regenerator "^6.22.0"
+    browserslist "^1.4.0"
+    invariant "^2.2.2"
+
+babel-register@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-register/-/babel-register-6.24.1.tgz#7e10e13a2f71065bdfad5a1787ba45bca6ded75f"
+  dependencies:
+    babel-core "^6.24.1"
+    babel-runtime "^6.22.0"
+    core-js "^2.4.0"
+    home-or-tmp "^2.0.0"
+    lodash "^4.2.0"
+    mkdirp "^0.5.1"
+    source-map-support "^0.4.2"
+
+babel-runtime@^6.18.0, babel-runtime@^6.22.0:
+  version "6.23.0"
+  resolved "https://registry.yarnpkg.com/babel-runtime/-/babel-runtime-6.23.0.tgz#0a9489f144de70efb3ce4300accdb329e2fc543b"
+  dependencies:
+    core-js "^2.4.0"
+    regenerator-runtime "^0.10.0"
+
+babel-template@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-template/-/babel-template-6.24.1.tgz#04ae514f1f93b3a2537f2a0f60a5a45fb8308333"
+  dependencies:
+    babel-runtime "^6.22.0"
+    babel-traverse "^6.24.1"
+    babel-types "^6.24.1"
+    babylon "^6.11.0"
+    lodash "^4.2.0"
+
+babel-traverse@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-traverse/-/babel-traverse-6.24.1.tgz#ab36673fd356f9a0948659e7b338d5feadb31695"
+  dependencies:
+    babel-code-frame "^6.22.0"
+    babel-messages "^6.23.0"
+    babel-runtime "^6.22.0"
+    babel-types "^6.24.1"
+    babylon "^6.15.0"
+    debug "^2.2.0"
+    globals "^9.0.0"
+    invariant "^2.2.0"
+    lodash "^4.2.0"
+
+babel-types@^6.19.0, babel-types@^6.24.1:
+  version "6.24.1"
+  resolved "https://registry.yarnpkg.com/babel-types/-/babel-types-6.24.1.tgz#a136879dc15b3606bda0d90c1fc74304c2ff0975"
+  dependencies:
+    babel-runtime "^6.22.0"
+    esutils "^2.0.2"
+    lodash "^4.2.0"
+    to-fast-properties "^1.0.1"
+
+babel6-plugin-strip-class-callcheck@^6.0.0:
+  version "6.0.0"
+  resolved "https://registry.yarnpkg.com/babel6-plugin-strip-class-callcheck/-/babel6-plugin-strip-class-callcheck-6.0.0.tgz#de841c1abebbd39f78de0affb2c9a52ee228fddf"
+
+babel6-plugin-strip-heimdall@^6.0.1:
+  version "6.0.1"
+  resolved "https://registry.yarnpkg.com/babel6-plugin-strip-heimdall/-/babel6-plugin-strip-heimdall-6.0.1.tgz#35f80eddec1f7fffdc009811dfbd46d9965072b6"
+
+babylon@^5.8.38:
+  version "5.8.38"
+  resolved "https://registry.yarnpkg.com/babylon/-/babylon-5.8.38.tgz#ec9b120b11bf6ccd4173a18bf217e60b79859ffd"
+
+babylon@^6.11.0, babylon@^6.15.0:
+  version "6.17.0"
+  resolved "https://registry.yarnpkg.com/babylon/-/babylon-6.17.0.tgz#37da948878488b9c4e3c4038893fa3314b3fc932"
+
+backbone@^1.1.2:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/backbone/-/backbone-1.3.3.tgz#4cc80ea7cb1631ac474889ce40f2f8bc683b2999"
+  dependencies:
+    underscore ">=1.8.3"
+
+backo2@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/backo2/-/backo2-1.0.2.tgz#31ab1ac8b129363463e35b3ebb69f4dfcfba7947"
+
+balanced-match@^0.4.1:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-0.4.2.tgz#cb3f3e3c732dc0f01ee70b403f302e61d7709838"
+
+base64-arraybuffer@0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/base64-arraybuffer/-/base64-arraybuffer-0.1.5.tgz#73926771923b5a19747ad666aa5cd4bf9c6e9ce8"
+
+base64id@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/base64id/-/base64id-0.1.0.tgz#02ce0fdeee0cef4f40080e1e73e834f0b1bfce3f"
+
+basic-auth@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/basic-auth/-/basic-auth-1.1.0.tgz#45221ee429f7ee1e5035be3f51533f1cdfd29884"
+
+bcrypt-pbkdf@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.1.tgz#63bc5dcb61331b92bc05fd528953c33462a06f8d"
+  dependencies:
+    tweetnacl "^0.14.3"
+
+better-assert@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/better-assert/-/better-assert-1.0.2.tgz#40866b9e1b9e0b55b481894311e68faffaebc522"
+  dependencies:
+    callsite "1.0.0"
+
+"binaryextensions@1 || 2":
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/binaryextensions/-/binaryextensions-2.0.0.tgz#e597d1a7a6a3558a2d1c7241a16c99965e6aa40f"
+
+bl@~1.0.0:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/bl/-/bl-1.0.3.tgz#fc5421a28fd4226036c3b3891a66a25bc64d226e"
+  dependencies:
+    readable-stream "~2.0.5"
+
+blank-object@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/blank-object/-/blank-object-1.0.2.tgz#f990793fbe9a8c8dd013fb3219420bec81d5f4b9"
+
+blob@0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/blob/-/blob-0.0.4.tgz#bcf13052ca54463f30f9fc7e95b9a47630a94921"
+
+block-stream@*, block-stream@0.0.8:
+  version "0.0.8"
+  resolved "https://registry.yarnpkg.com/block-stream/-/block-stream-0.0.8.tgz#0688f46da2bbf9cff0c4f68225a0cb95cbe8a46b"
+  dependencies:
+    inherits "~2.0.0"
+
+bluebird@^2.9.33:
+  version "2.11.0"
+  resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-2.11.0.tgz#534b9033c022c9579c56ba3b3e5a5caafbb650e1"
+
+bluebird@^3.1.1, bluebird@^3.4.6:
+  version "3.5.0"
+  resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.5.0.tgz#791420d7f551eea2897453a8a77653f96606d67c"
+
+body-parser@~1.14.0:
+  version "1.14.2"
+  resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.14.2.tgz#1015cb1fe2c443858259581db53332f8d0cf50f9"
+  dependencies:
+    bytes "2.2.0"
+    content-type "~1.0.1"
+    debug "~2.2.0"
+    depd "~1.1.0"
+    http-errors "~1.3.1"
+    iconv-lite "0.4.13"
+    on-finished "~2.3.0"
+    qs "5.2.0"
+    raw-body "~2.1.5"
+    type-is "~1.6.10"
+
+boom@2.x.x:
+  version "2.10.1"
+  resolved "https://registry.yarnpkg.com/boom/-/boom-2.10.1.tgz#39c8918ceff5799f83f9492a848f625add0c766f"
+  dependencies:
+    hoek "2.x.x"
+
+bower-config@0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/bower-config/-/bower-config-0.6.1.tgz#7093155688bef44079bf4cb32d189312c87ded60"
+  dependencies:
+    graceful-fs "~2.0.0"
+    mout "~0.9.0"
+    optimist "~0.6.0"
+    osenv "0.0.3"
+
+bower-endpoint-parser@0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/bower-endpoint-parser/-/bower-endpoint-parser-0.2.2.tgz#00b565adbfab6f2d35addde977e97962acbcb3f6"
+
+bower@^1.3.12, bower@^1.7.7:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/bower/-/bower-1.8.0.tgz#55dbebef0ad9155382d9e9d3e497c1372345b44a"
+
+brace-expansion@^1.0.0:
+  version "1.1.7"
+  resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.7.tgz#3effc3c50e000531fb720eaff80f0ae8ef23cf59"
+  dependencies:
+    balanced-match "^0.4.1"
+    concat-map "0.0.1"
+
+braces@^1.8.2:
+  version "1.8.5"
+  resolved "https://registry.yarnpkg.com/braces/-/braces-1.8.5.tgz#ba77962e12dff969d6b76711e914b737857bf6a7"
+  dependencies:
+    expand-range "^1.8.1"
+    preserve "^0.2.0"
+    repeat-element "^1.1.2"
+
+breakable@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/breakable/-/breakable-1.0.0.tgz#784a797915a38ead27bad456b5572cb4bbaa78c1"
+
+broccoli-asset-rev@^2.2.0:
+  version "2.5.0"
+  resolved "https://registry.yarnpkg.com/broccoli-asset-rev/-/broccoli-asset-rev-2.5.0.tgz#f5f66eac962bf9f086286921f0eaeaab6d00d819"
+  dependencies:
+    broccoli-asset-rewrite "^1.1.0"
+    broccoli-filter "^1.2.2"
+    json-stable-stringify "^1.0.0"
+    matcher-collection "^1.0.1"
+    rsvp "^3.0.6"
+
+broccoli-asset-rewrite@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-asset-rewrite/-/broccoli-asset-rewrite-1.1.0.tgz#77a5da56157aa318c59113245e8bafb4617f8830"
+  dependencies:
+    broccoli-filter "^1.2.3"
+
+broccoli-babel-transpiler@^5.4.5, broccoli-babel-transpiler@^5.5.0, broccoli-babel-transpiler@^5.6.2:
+  version "5.6.2"
+  resolved "https://registry.yarnpkg.com/broccoli-babel-transpiler/-/broccoli-babel-transpiler-5.6.2.tgz#958c72e43575b2f0a862a5096dba1ce1ebc7d74d"
+  dependencies:
+    babel-core "^5.0.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-persistent-filter "^1.0.1"
+    clone "^0.2.0"
+    hash-for-dep "^1.0.2"
+    json-stable-stringify "^1.0.0"
+
+broccoli-babel-transpiler@^6.0.0:
+  version "6.0.0"
+  resolved "https://registry.yarnpkg.com/broccoli-babel-transpiler/-/broccoli-babel-transpiler-6.0.0.tgz#a52c5404bf36236849da503b011fd41fe64a00a2"
+  dependencies:
+    babel-core "^6.14.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-persistent-filter "^1.0.1"
+    clone "^2.0.0"
+    hash-for-dep "^1.0.2"
+    json-stable-stringify "^1.0.0"
+
+broccoli-caching-writer@^2.0.4, broccoli-caching-writer@^2.2.0, broccoli-caching-writer@^2.3.1:
+  version "2.3.1"
+  resolved "https://registry.yarnpkg.com/broccoli-caching-writer/-/broccoli-caching-writer-2.3.1.tgz#b93cf58f9264f003075868db05774f4e7f25bd07"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.2.5"
+    broccoli-plugin "1.1.0"
+    debug "^2.1.1"
+    rimraf "^2.2.8"
+    rsvp "^3.0.17"
+    walk-sync "^0.2.5"
+
+broccoli-clean-css@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-clean-css/-/broccoli-clean-css-1.1.0.tgz#9db143d9af7e0ae79c26e3ac5a9bb2d720ea19fa"
+  dependencies:
+    broccoli-persistent-filter "^1.1.6"
+    clean-css-promise "^0.1.0"
+    inline-source-map-comment "^1.0.5"
+    json-stable-stringify "^1.0.0"
+
+broccoli-concat@^2.0.4, broccoli-concat@^2.2.0:
+  version "2.3.8"
+  resolved "https://registry.yarnpkg.com/broccoli-concat/-/broccoli-concat-2.3.8.tgz#590cdcc021bb905b6c121d87c2d1d57df44a2a48"
+  dependencies:
+    broccoli-caching-writer "^2.3.1"
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-stew "^1.3.3"
+    fast-sourcemap-concat "^1.0.1"
+    fs-extra "^0.30.0"
+    lodash.merge "^4.3.0"
+    lodash.omit "^4.1.0"
+    lodash.uniq "^4.2.0"
+
+broccoli-config-loader@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/broccoli-config-loader/-/broccoli-config-loader-1.0.0.tgz#c3cf5ecfaffc04338c6f1d5d38dc36baeaa131ba"
+  dependencies:
+    broccoli-caching-writer "^2.0.4"
+
+broccoli-config-replace@^1.1.0:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/broccoli-config-replace/-/broccoli-config-replace-1.1.2.tgz#6ea879d92a5bad634d11329b51fc5f4aafda9c00"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-plugin "^1.2.0"
+    debug "^2.2.0"
+    fs-extra "^0.24.0"
+
+broccoli-file-creator@^1.0.0, broccoli-file-creator@^1.0.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/broccoli-file-creator/-/broccoli-file-creator-1.1.1.tgz#1b35b67d215abdfadd8d49eeb69493c39e6c3450"
+  dependencies:
+    broccoli-kitchen-sink-helpers "~0.2.0"
+    broccoli-plugin "^1.1.0"
+    broccoli-writer "~0.1.1"
+    mkdirp "^0.5.1"
+    rsvp "~3.0.6"
+    symlink-or-copy "^1.0.1"
+
+broccoli-filter@^1.2.2, broccoli-filter@^1.2.3:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/broccoli-filter/-/broccoli-filter-1.2.4.tgz#409afb94b9a3a6da9fac8134e91e205f40cc7330"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-plugin "^1.0.0"
+    copy-dereference "^1.0.0"
+    debug "^2.2.0"
+    mkdirp "^0.5.1"
+    promise-map-series "^0.2.1"
+    rsvp "^3.0.18"
+    symlink-or-copy "^1.0.1"
+    walk-sync "^0.3.1"
+
+broccoli-funnel-reducer@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/broccoli-funnel-reducer/-/broccoli-funnel-reducer-1.0.0.tgz#11365b2a785aec9b17972a36df87eef24c5cc0ea"
+
+broccoli-funnel@^1.0.0, broccoli-funnel@^1.0.1:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/broccoli-funnel/-/broccoli-funnel-1.2.0.tgz#cddc3afc5ff1685a8023488fff74ce6fb5a51296"
+  dependencies:
+    array-equal "^1.0.0"
+    blank-object "^1.0.1"
+    broccoli-plugin "^1.3.0"
+    debug "^2.2.0"
+    exists-sync "0.0.4"
+    fast-ordered-set "^1.0.0"
+    fs-tree-diff "^0.5.3"
+    heimdalljs "^0.2.0"
+    minimatch "^3.0.0"
+    mkdirp "^0.5.0"
+    path-posix "^1.0.0"
+    rimraf "^2.4.3"
+    symlink-or-copy "^1.0.0"
+    walk-sync "^0.3.1"
+
+broccoli-jshint@^1.0.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/broccoli-jshint/-/broccoli-jshint-1.2.0.tgz#8cd565d11a04bfd32cb8f85a0f7ede1e5be7a6a2"
+  dependencies:
+    broccoli-persistent-filter "^1.2.0"
+    chalk "~0.4.0"
+    findup-sync "^0.3.0"
+    jshint "^2.7.0"
+    json-stable-stringify "^1.0.0"
+    mkdirp "~0.4.0"
+
+broccoli-kitchen-sink-helpers@^0.2.5, broccoli-kitchen-sink-helpers@~0.2.0:
+  version "0.2.9"
+  resolved "https://registry.yarnpkg.com/broccoli-kitchen-sink-helpers/-/broccoli-kitchen-sink-helpers-0.2.9.tgz#a5e0986ed8d76fb5984b68c3f0450d3a96e36ecc"
+  dependencies:
+    glob "^5.0.10"
+    mkdirp "^0.5.1"
+
+broccoli-kitchen-sink-helpers@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/broccoli-kitchen-sink-helpers/-/broccoli-kitchen-sink-helpers-0.3.1.tgz#77c7c18194b9664163ec4fcee2793444926e0c06"
+  dependencies:
+    glob "^5.0.10"
+    mkdirp "^0.5.1"
+
+broccoli-less-single@^0.6.4:
+  version "0.6.4"
+  resolved "https://registry.yarnpkg.com/broccoli-less-single/-/broccoli-less-single-0.6.4.tgz#200316f4146b8cf7e6ab97fc661b8085cc89bdb9"
+  dependencies:
+    broccoli-caching-writer "^2.3.1"
+    include-path-searcher "^0.1.0"
+    less "^2.5.0"
+    lodash.merge "^3.3.2"
+    mkdirp "^0.5.0"
+
+broccoli-merge-trees@^1.0.0, broccoli-merge-trees@^1.1.0:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/broccoli-merge-trees/-/broccoli-merge-trees-1.2.4.tgz#a001519bb5067f06589d91afa2942445a2d0fdb5"
+  dependencies:
+    broccoli-plugin "^1.3.0"
+    can-symlink "^1.0.0"
+    fast-ordered-set "^1.0.2"
+    fs-tree-diff "^0.5.4"
+    heimdalljs "^0.2.1"
+    heimdalljs-logger "^0.1.7"
+    rimraf "^2.4.3"
+    symlink-or-copy "^1.0.0"
+
+broccoli-persistent-filter@^1.0.1, broccoli-persistent-filter@^1.0.3, broccoli-persistent-filter@^1.1.6, broccoli-persistent-filter@^1.2.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/broccoli-persistent-filter/-/broccoli-persistent-filter-1.3.1.tgz#d02556a135c77dfb859bba7844bc3539be7168e1"
+  dependencies:
+    async-disk-cache "^1.2.1"
+    broccoli-plugin "^1.0.0"
+    fs-tree-diff "^0.5.2"
+    hash-for-dep "^1.0.2"
+    heimdalljs "^0.2.1"
+    heimdalljs-logger "^0.1.7"
+    md5-hex "^1.0.2"
+    mkdirp "^0.5.1"
+    promise-map-series "^0.2.1"
+    rimraf "^2.6.1"
+    rsvp "^3.0.18"
+    symlink-or-copy "^1.0.1"
+    walk-sync "^0.3.1"
+
+broccoli-plugin@1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-plugin/-/broccoli-plugin-1.1.0.tgz#73e2cfa05f8ea1e3fc1420c40c3d9e7dc724bf02"
+  dependencies:
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.3"
+    rimraf "^2.3.4"
+    symlink-or-copy "^1.0.1"
+
+broccoli-plugin@^1.0.0, broccoli-plugin@^1.1.0, broccoli-plugin@^1.2.0, broccoli-plugin@^1.2.1, broccoli-plugin@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/broccoli-plugin/-/broccoli-plugin-1.3.0.tgz#bee704a8e42da08cb58e513aaa436efb7f0ef1ee"
+  dependencies:
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.3"
+    rimraf "^2.3.4"
+    symlink-or-copy "^1.1.8"
+
+broccoli-sane-watcher@^1.1.1:
+  version "1.1.5"
+  resolved "https://registry.yarnpkg.com/broccoli-sane-watcher/-/broccoli-sane-watcher-1.1.5.tgz#f2b0af9cf0afb74c7a49cd88eb11c6869ee8c0c0"
+  dependencies:
+    broccoli-slow-trees "^1.1.0"
+    debug "^2.1.0"
+    rsvp "^3.0.18"
+    sane "^1.1.1"
+
+broccoli-slow-trees@^1.0.0, broccoli-slow-trees@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-slow-trees/-/broccoli-slow-trees-1.1.0.tgz#426c5724e008107e4573f73e8a9ca702916b78f7"
+
+broccoli-source@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-source/-/broccoli-source-1.1.0.tgz#54f0e82c8b73f46580cbbc4f578f0b32fca8f809"
+
+broccoli-sri-hash@^2.1.0:
+  version "2.1.2"
+  resolved "https://registry.yarnpkg.com/broccoli-sri-hash/-/broccoli-sri-hash-2.1.2.tgz#bc69905ed7a381ad325cc0d02ded071328ebf3f3"
+  dependencies:
+    broccoli-caching-writer "^2.2.0"
+    mkdirp "^0.5.1"
+    rsvp "^3.1.0"
+    sri-toolbox "^0.2.0"
+    symlink-or-copy "^1.0.1"
+
+broccoli-stew@^1.0.0, broccoli-stew@^1.3.3:
+  version "1.4.2"
+  resolved "https://registry.yarnpkg.com/broccoli-stew/-/broccoli-stew-1.4.2.tgz#9ec4062fd7162c6026561a2fbf64558363aff8d6"
+  dependencies:
+    broccoli-funnel "^1.0.1"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-persistent-filter "^1.1.6"
+    broccoli-plugin "^1.3.0"
+    chalk "^1.1.3"
+    debug "^2.4.0"
+    ensure-posix-path "^1.0.1"
+    fs-extra "^2.0.0"
+    minimatch "^3.0.2"
+    resolve "^1.1.6"
+    rsvp "^3.0.16"
+    sanitize-filename "^1.5.3"
+    symlink-or-copy "^1.1.8"
+    walk-sync "^0.3.0"
+
+broccoli-uglify-sourcemap@^1.0.0:
+  version "1.5.2"
+  resolved "https://registry.yarnpkg.com/broccoli-uglify-sourcemap/-/broccoli-uglify-sourcemap-1.5.2.tgz#04f84ab0db539031fa868ccfa563c9932d50cedb"
+  dependencies:
+    broccoli-plugin "^1.2.1"
+    debug "^2.2.0"
+    lodash.merge "^4.5.1"
+    matcher-collection "^1.0.0"
+    mkdirp "^0.5.0"
+    source-map-url "^0.3.0"
+    symlink-or-copy "^1.0.1"
+    uglify-js "^2.7.0"
+    walk-sync "^0.1.3"
+
+broccoli-viz@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/broccoli-viz/-/broccoli-viz-2.0.1.tgz#3f3ed2fb83e368aa5306fae460801dea552e40db"
+
+broccoli-writer@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/broccoli-writer/-/broccoli-writer-0.1.1.tgz#d4d71aa8f2afbc67a3866b91a2da79084b96ab2d"
+  dependencies:
+    quick-temp "^0.1.0"
+    rsvp "^3.0.6"
+
+broccoli@0.16.9:
+  version "0.16.9"
+  resolved "https://registry.yarnpkg.com/broccoli/-/broccoli-0.16.9.tgz#b87ca679f09005c576901a9bc19f5df77efd55a4"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.2.5"
+    broccoli-slow-trees "^1.0.0"
+    commander "^2.5.0"
+    connect "^3.3.3"
+    copy-dereference "^1.0.0"
+    findup-sync "^0.2.1"
+    handlebars "^4.0.4"
+    mime "^1.2.11"
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.2"
+    rimraf "^2.2.8"
+    rsvp "^3.0.17"
+
+browserslist@^1.4.0:
+  version "1.7.7"
+  resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-1.7.7.tgz#0bd76704258be829b2398bb50e4b62d1a166b0b9"
+  dependencies:
+    caniuse-db "^1.0.30000639"
+    electron-to-chromium "^1.2.7"
+
+bser@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/bser/-/bser-1.0.2.tgz#381116970b2a6deea5646dd15dd7278444b56169"
+  dependencies:
+    node-int64 "^0.4.0"
+
+buffer-shims@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/buffer-shims/-/buffer-shims-1.0.0.tgz#9978ce317388c649ad8793028c3477ef044a8b51"
+
+builtin-modules@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/builtin-modules/-/builtin-modules-1.1.1.tgz#270f076c5a72c02f5b65a47df94c5fe3a278892f"
+
+builtins@0.0.7:
+  version "0.0.7"
+  resolved "https://registry.yarnpkg.com/builtins/-/builtins-0.0.7.tgz#355219cd6cf18dbe7c01cc7fd2dce765cfdc549a"
+
+builtins@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/builtins/-/builtins-1.0.3.tgz#cb94faeb61c8696451db36534e1422f94f0aee88"
+
+bytes@2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.2.0.tgz#fd35464a403f6f9117c2de3609ecff9cae000588"
+
+bytes@2.3.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.3.0.tgz#d5b680a165b6201739acb611542aabc2d8ceb070"
+
+bytes@2.4.0:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.4.0.tgz#7d97196f9d5baf7f6935e25985549edd2a6c2339"
+
+callsite@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/callsite/-/callsite-1.0.0.tgz#280398e5d664bd74038b6f0905153e6e8af1bc20"
+
+camelcase@^1.0.2, camelcase@^1.2.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-1.2.1.tgz#9bb5304d2e0b56698b2c758b08a3eaa9daa58a39"
+
+can-symlink@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/can-symlink/-/can-symlink-1.0.0.tgz#97b607d8a84bb6c6e228b902d864ecb594b9d219"
+  dependencies:
+    tmp "0.0.28"
+
+caniuse-db@^1.0.30000639:
+  version "1.0.30000664"
+  resolved "https://registry.yarnpkg.com/caniuse-db/-/caniuse-db-1.0.30000664.tgz#e16316e5fdabb9c7209b2bf0744ffc8a14201f22"
+
+cardinal@^0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/cardinal/-/cardinal-0.5.0.tgz#00d5f661dbd4aabfdf7d41ce48a5a59bca35a291"
+  dependencies:
+    ansicolors "~0.2.1"
+    redeyed "~0.5.0"
+
+caseless@~0.11.0:
+  version "0.11.0"
+  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.11.0.tgz#715b96ea9841593cc33067923f5ec60ebda4f7d7"
+
+caseless@~0.12.0:
+  version "0.12.0"
+  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc"
+
+center-align@^0.1.1:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/center-align/-/center-align-0.1.3.tgz#aa0d32629b6ee972200411cbd4461c907bc2b7ad"
+  dependencies:
+    align-text "^0.1.3"
+    lazy-cache "^1.0.3"
+
+chalk@^0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-0.5.1.tgz#663b3a648b68b55d04690d49167aa837858f2174"
+  dependencies:
+    ansi-styles "^1.1.0"
+    escape-string-regexp "^1.0.0"
+    has-ansi "^0.1.0"
+    strip-ansi "^0.3.0"
+    supports-color "^0.2.0"
+
+chalk@^1.0.0, chalk@^1.1.0, chalk@^1.1.1, chalk@^1.1.3:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98"
+  dependencies:
+    ansi-styles "^2.2.1"
+    escape-string-regexp "^1.0.2"
+    has-ansi "^2.0.0"
+    strip-ansi "^3.0.0"
+    supports-color "^2.0.0"
+
+chalk@~0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-0.4.0.tgz#5199a3ddcd0c1efe23bc08c1b027b06176e0c64f"
+  dependencies:
+    ansi-styles "~1.0.0"
+    has-color "~0.1.0"
+    strip-ansi "~0.1.0"
+
+char-spinner@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/char-spinner/-/char-spinner-1.0.1.tgz#e6ea67bd247e107112983b7ab0479ed362800081"
+
+charm@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/charm/-/charm-1.0.2.tgz#8add367153a6d9a581331052c4090991da995e35"
+  dependencies:
+    inherits "^2.0.1"
+
+chmodr@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/chmodr/-/chmodr-1.0.2.tgz#04662b932d0f02ec66deaa2b0ea42811968e3eb9"
+
+chownr@^1.0.1, chownr@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.0.1.tgz#e2a75042a9551908bebd25b8523d5f9769d79181"
+
+clean-base-url@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/clean-base-url/-/clean-base-url-1.0.0.tgz#c901cf0a20b972435b0eccd52d056824a4351b7b"
+
+clean-css-promise@^0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/clean-css-promise/-/clean-css-promise-0.1.1.tgz#43f3d2c8dfcb2bf071481252cd9b76433c08eecb"
+  dependencies:
+    array-to-error "^1.0.0"
+    clean-css "^3.4.5"
+    pinkie-promise "^2.0.0"
+
+clean-css@^3.4.5:
+  version "3.4.25"
+  resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-3.4.25.tgz#9e9a52d5c1e6bc5123e1b2783fa65fe958946ede"
+  dependencies:
+    commander "2.8.x"
+    source-map "0.4.x"
+
+cli-color@~0.3.2:
+  version "0.3.3"
+  resolved "https://registry.yarnpkg.com/cli-color/-/cli-color-0.3.3.tgz#12d5bdd158ff8a0b0db401198913c03df069f6f5"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.6"
+    memoizee "~0.3.8"
+    timers-ext "0.1"
+
+cli-table@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/cli-table/-/cli-table-0.3.1.tgz#f53b05266a8b1a0b934b3d0821e6e2dc5914ae23"
+  dependencies:
+    colors "1.0.3"
+
+cli@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/cli/-/cli-1.0.1.tgz#22817534f24bfa4950c34d532d48ecbc621b8c14"
+  dependencies:
+    exit "0.1.2"
+    glob "^7.1.1"
+
+cliui@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/cliui/-/cliui-2.1.0.tgz#4b475760ff80264c762c3a1719032e91c7fea0d1"
+  dependencies:
+    center-align "^0.1.1"
+    right-align "^0.1.1"
+    wordwrap "0.0.2"
+
+clone@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/clone/-/clone-0.2.0.tgz#c6126a90ad4f72dbf5acdb243cc37724fe93fc1f"
+
+clone@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/clone/-/clone-1.0.2.tgz#260b7a99ebb1edfe247538175f783243cb19d149"
+
+clone@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/clone/-/clone-2.1.1.tgz#d217d1e961118e3ac9a4b8bba3285553bf647cdb"
+
+cmd-shim@~2.0.1:
+  version "2.0.2"
+  resolved "https://registry.yarnpkg.com/cmd-shim/-/cmd-shim-2.0.2.tgz#6fcbda99483a8fd15d7d30a196ca69d688a2efdb"
+  dependencies:
+    graceful-fs "^4.1.2"
+    mkdirp "~0.5.0"
+
+co@^4.6.0:
+  version "4.6.0"
+  resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184"
+
+code-point-at@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77"
+
+colors@1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/colors/-/colors-1.0.3.tgz#0433f44d809680fdeb60ed260f1b0c262e82a40b"
+
+colors@~0.6.0-1:
+  version "0.6.2"
+  resolved "https://registry.yarnpkg.com/colors/-/colors-0.6.2.tgz#2423fe6678ac0c5dae8852e5d0e5be08c997abcc"
+
+columnify@~1.5.2:
+  version "1.5.4"
+  resolved "https://registry.yarnpkg.com/columnify/-/columnify-1.5.4.tgz#4737ddf1c7b69a8a7c340570782e947eec8e78bb"
+  dependencies:
+    strip-ansi "^3.0.0"
+    wcwidth "^1.0.0"
+
+combined-stream@^1.0.5, combined-stream@~1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.5.tgz#938370a57b4a51dea2c77c15d5c5fdf895164009"
+  dependencies:
+    delayed-stream "~1.0.0"
+
+commander@2.8.x:
+  version "2.8.1"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.8.1.tgz#06be367febfda0c330aa1e2a072d3dc9762425d4"
+  dependencies:
+    graceful-readlink ">= 1.0.0"
+
+commander@^2.5.0, commander@^2.6.0, commander@^2.9.0:
+  version "2.9.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.9.0.tgz#9c99094176e12240cb22d6c5146098400fe0f7d4"
+  dependencies:
+    graceful-readlink ">= 1.0.0"
+
+commander@~2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.1.0.tgz#d121bbae860d9992a3d517ba96f56588e47c6781"
+
+commoner@~0.10.3:
+  version "0.10.8"
+  resolved "https://registry.yarnpkg.com/commoner/-/commoner-0.10.8.tgz#34fc3672cd24393e8bb47e70caa0293811f4f2c5"
+  dependencies:
+    commander "^2.5.0"
+    detective "^4.3.1"
+    glob "^5.0.15"
+    graceful-fs "^4.1.2"
+    iconv-lite "^0.4.5"
+    mkdirp "^0.5.0"
+    private "^0.1.6"
+    q "^1.1.2"
+    recast "^0.11.17"
+
+component-bind@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/component-bind/-/component-bind-1.0.0.tgz#00c608ab7dcd93897c0009651b1d3a8e1e73bbd1"
+
+component-emitter@1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.1.2.tgz#296594f2753daa63996d2af08d15a95116c9aec3"
+
+component-emitter@1.2.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.2.1.tgz#137918d6d78283f7df7a6b7c5a63e140e69425e6"
+
+component-inherit@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/component-inherit/-/component-inherit-0.0.3.tgz#645fc4adf58b72b649d5cae65135619db26ff143"
+
+compressible@~2.0.8:
+  version "2.0.10"
+  resolved "https://registry.yarnpkg.com/compressible/-/compressible-2.0.10.tgz#feda1c7f7617912732b29bf8cf26252a20b9eecd"
+  dependencies:
+    mime-db ">= 1.27.0 < 2"
+
+compression@^1.4.4:
+  version "1.6.2"
+  resolved "https://registry.yarnpkg.com/compression/-/compression-1.6.2.tgz#cceb121ecc9d09c52d7ad0c3350ea93ddd402bc3"
+  dependencies:
+    accepts "~1.3.3"
+    bytes "2.3.0"
+    compressible "~2.0.8"
+    debug "~2.2.0"
+    on-headers "~1.0.1"
+    vary "~1.1.0"
+
+concat-map@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b"
+
+concat-stream@^1.4.6:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.6.0.tgz#0aac662fd52be78964d5532f694784e70110acf7"
+  dependencies:
+    inherits "^2.0.3"
+    readable-stream "^2.2.2"
+    typedarray "^0.0.6"
+
+config-chain@~1.1.9:
+  version "1.1.11"
+  resolved "https://registry.yarnpkg.com/config-chain/-/config-chain-1.1.11.tgz#aba09747dfbe4c3e70e766a6e41586e1859fc6f2"
+  dependencies:
+    ini "^1.3.4"
+    proto-list "~1.2.1"
+
+configstore@^1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/configstore/-/configstore-1.4.0.tgz#c35781d0501d268c25c54b8b17f6240e8a4fb021"
+  dependencies:
+    graceful-fs "^4.1.2"
+    mkdirp "^0.5.0"
+    object-assign "^4.0.1"
+    os-tmpdir "^1.0.0"
+    osenv "^0.1.0"
+    uuid "^2.0.1"
+    write-file-atomic "^1.1.2"
+    xdg-basedir "^2.0.0"
+
+connect@^3.3.3:
+  version "3.6.1"
+  resolved "https://registry.yarnpkg.com/connect/-/connect-3.6.1.tgz#b7760693a74f0454face1d9378edb3f885b43227"
+  dependencies:
+    debug "2.6.3"
+    finalhandler "1.0.1"
+    parseurl "~1.3.1"
+    utils-merge "1.0.0"
+
+console-browserify@1.1.x:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/console-browserify/-/console-browserify-1.1.0.tgz#f0241c45730a9fc6323b206dbf38edc741d0bb10"
+  dependencies:
+    date-now "^0.1.4"
+
+console-control-strings@^1.0.0, console-control-strings@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/console-control-strings/-/console-control-strings-1.1.0.tgz#3d7cf4464db6446ea644bf4b39507f9851008e8e"
+
+consolidate@^0.14.0:
+  version "0.14.5"
+  resolved "https://registry.yarnpkg.com/consolidate/-/consolidate-0.14.5.tgz#5a25047bc76f73072667c8cb52c989888f494c63"
+  dependencies:
+    bluebird "^3.1.1"
+
+content-disposition@0.5.2:
+  version "0.5.2"
+  resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.2.tgz#0cf68bb9ddf5f2be7961c3a85178cb85dba78cb4"
+
+content-type@~1.0.1, content-type@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.2.tgz#b7d113aee7a8dd27bd21133c4dc2529df1721eed"
+
+convert-source-map@^1.1.0:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.5.0.tgz#9acd70851c6d5dfdd93d9282e5edf94a03ff46b5"
+
+cookie-signature@1.0.6:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c"
+
+cookie@0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.3.1.tgz#e7e0a1f9ef43b4c8ba925c5c5a96e806d16873bb"
+
+copy-dereference@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/copy-dereference/-/copy-dereference-1.0.0.tgz#6b131865420fd81b413ba994b44d3655311152b6"
+
+core-js@^1.0.0:
+  version "1.2.7"
+  resolved "https://registry.yarnpkg.com/core-js/-/core-js-1.2.7.tgz#652294c14651db28fa93bd2d5ff2983a4f08c636"
+
+core-js@^2.4.0:
+  version "2.4.1"
+  resolved "https://registry.yarnpkg.com/core-js/-/core-js-2.4.1.tgz#4de911e667b0eae9124e34254b53aea6fc618d3e"
+
+core-object@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/core-object/-/core-object-0.0.2.tgz#c9a6fee8f712e281fa9f6fba10243409ea2debc3"
+  dependencies:
+    lodash-node "^2.4.1"
+
+core-util-is@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7"
+
+cpr@0.4.2:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/cpr/-/cpr-0.4.2.tgz#cc5083e6d2fa31f52bbfeefae508a445fe6180f2"
+  dependencies:
+    graceful-fs "~4.1.2"
+    mkdirp "~0.5.0"
+    rimraf "~2.4.3"
+
+cross-spawn@^5.1.0:
+  version "5.1.0"
+  resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-5.1.0.tgz#e8bd0efee58fcff6f8f94510a0a554bbfa235449"
+  dependencies:
+    lru-cache "^4.0.1"
+    shebang-command "^1.2.0"
+    which "^1.2.9"
+
+cryptiles@2.x.x:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/cryptiles/-/cryptiles-2.0.5.tgz#3bdfecdc608147c1c67202fa291e7dca59eaa3b8"
+  dependencies:
+    boom "2.x.x"
+
+ctype@0.5.3:
+  version "0.5.3"
+  resolved "https://registry.yarnpkg.com/ctype/-/ctype-0.5.3.tgz#82c18c2461f74114ef16c135224ad0b9144ca12f"
+
+d@1:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/d/-/d-1.0.0.tgz#754bb5bfe55451da69a58b94d45f4c5b0462d58f"
+  dependencies:
+    es5-ext "^0.10.9"
+
+d@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/d/-/d-0.1.1.tgz#da184c535d18d8ee7ba2aa229b914009fae11309"
+  dependencies:
+    es5-ext "~0.10.2"
+
+dashdash@^1.12.0:
+  version "1.14.1"
+  resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0"
+  dependencies:
+    assert-plus "^1.0.0"
+
+date-now@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/date-now/-/date-now-0.1.4.tgz#eaf439fd4d4848ad74e5cc7dbef200672b9e345b"
+
+debug@2.2.0, debug@~2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.2.0.tgz#f87057e995b1a1f6ae6a4960664137bc56f039da"
+  dependencies:
+    ms "0.7.1"
+
+debug@2.3.3:
+  version "2.3.3"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.3.3.tgz#40c453e67e6e13c901ddec317af8986cda9eff8c"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.1:
+  version "2.6.1"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.1.tgz#79855090ba2c4e3115cc7d8769491d58f0491351"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.3:
+  version "2.6.3"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.3.tgz#0f7eb8c30965ec08c72accfa0130c8b79984141d"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.4:
+  version "2.6.4"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.4.tgz#7586a9b3c39741c0282ae33445c4e8ac74734fe0"
+  dependencies:
+    ms "0.7.3"
+
+debug@^2.1.0, debug@^2.1.1, debug@^2.1.3, debug@^2.2.0, debug@^2.4.0:
+  version "2.6.6"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.6.tgz#a9fa6fbe9ca43cf1e79f73b75c0189cbb7d6db5a"
+  dependencies:
+    ms "0.7.3"
+
+debuglog@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/debuglog/-/debuglog-1.0.1.tgz#aa24ffb9ac3df9a2351837cfb2d279360cd78492"
+
+decamelize@^1.0.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290"
+
+defaults@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/defaults/-/defaults-1.0.3.tgz#c656051e9817d9ff08ed881477f3fe4019f3ef7d"
+  dependencies:
+    clone "^1.0.2"
+
+defined@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/defined/-/defined-1.0.0.tgz#c98d9bcef75674188e110969151199e39b1fa693"
+
+defs@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/defs/-/defs-1.1.1.tgz#b22609f2c7a11ba7a3db116805c139b1caffa9d2"
+  dependencies:
+    alter "~0.2.0"
+    ast-traverse "~0.1.1"
+    breakable "~1.0.0"
+    esprima-fb "~15001.1001.0-dev-harmony-fb"
+    simple-fmt "~0.1.0"
+    simple-is "~0.2.0"
+    stringmap "~0.2.2"
+    stringset "~0.2.1"
+    tryor "~0.1.2"
+    yargs "~3.27.0"
+
+delayed-stream@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619"
+
+delegates@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a"
+
+depd@1.1.0, depd@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.0.tgz#e1bd82c6aab6ced965b97b88b17ed3e528ca18c3"
+
+destroy@~1.0.4:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.0.4.tgz#978857442c44749e4206613e37946205826abd80"
+
+detect-indent@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/detect-indent/-/detect-indent-3.0.1.tgz#9dc5e5ddbceef8325764b9451b02bc6d54084f75"
+  dependencies:
+    get-stdin "^4.0.1"
+    minimist "^1.1.0"
+    repeating "^1.1.0"
+
+detect-indent@^4.0.0:
+  version "4.0.0"
+  resolved "https://registry.yarnpkg.com/detect-indent/-/detect-indent-4.0.0.tgz#f76d064352cdf43a1cb6ce619c4ee3a9475de208"
+  dependencies:
+    repeating "^2.0.0"
+
+detective@^4.3.1:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/detective/-/detective-4.5.0.tgz#6e5a8c6b26e6c7a254b1c6b6d7490d98ec91edd1"
+  dependencies:
+    acorn "^4.0.3"
+    defined "^1.0.0"
+
+dezalgo@^1.0.0, dezalgo@^1.0.1, dezalgo@^1.0.2, dezalgo@~1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/dezalgo/-/dezalgo-1.0.3.tgz#7f742de066fc748bc8db820569dddce49bf0d456"
+  dependencies:
+    asap "^2.0.0"
+    wrappy "1"
+
+diff@^1.3.1:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/diff/-/diff-1.4.0.tgz#7f28d2eb9ee7b15a97efd89ce63dcfdaa3ccbabf"
+
+dom-serializer@0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-0.1.0.tgz#073c697546ce0780ce23be4a28e293e40bc30c82"
+  dependencies:
+    domelementtype "~1.1.1"
+    entities "~1.1.1"
+
+domelementtype@1:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.3.0.tgz#b17aed82e8ab59e52dd9c19b1756e0fc187204c2"
+
+domelementtype@~1.1.1:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.1.3.tgz#bd28773e2642881aec51544924299c5cd822185b"
+
+domhandler@2.3:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-2.3.0.tgz#2de59a0822d5027fabff6f032c2b25a2a8abe738"
+  dependencies:
+    domelementtype "1"
+
+domutils@1.5:
+  version "1.5.1"
+  resolved "https://registry.yarnpkg.com/domutils/-/domutils-1.5.1.tgz#dcd8488a26f563d61079e48c9f7b7e32373682cf"
+  dependencies:
+    dom-serializer "0"
+    domelementtype "1"
+
+ecc-jsbn@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.1.tgz#0fc73a9ed5f0d53c38193398523ef7e543777505"
+  dependencies:
+    jsbn "~0.1.0"
+
+editions@^1.1.1:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/editions/-/editions-1.3.3.tgz#0907101bdda20fac3cbe334c27cbd0688dc99a5b"
+
+editor@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/editor/-/editor-1.0.0.tgz#60c7f87bd62bcc6a894fa8ccd6afb7823a24f742"
+
+ee-first@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d"
+
+electron-to-chromium@^1.2.7:
+  version "1.3.8"
+  resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.8.tgz#b2c8a2c79bb89fbbfd3724d9555e15095b5f5fb6"
+
+ember-ajax@0.7.1:
+  version "0.7.1"
+  resolved "https://registry.yarnpkg.com/ember-ajax/-/ember-ajax-0.7.1.tgz#0b3d1eeb99ed9d9251c013cc6ab6a1e7d4d14507"
+  dependencies:
+    ember-cli-babel "^5.1.5"
+
+ember-cli-app-version@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-app-version/-/ember-cli-app-version-1.0.1.tgz#d135eba75f30e791d8a5e5844f1251dcbcc40438"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+    ember-cli-htmlbars "^1.0.0"
+    git-repo-version "0.3.0"
+
+ember-cli-babel@^5.0.0, ember-cli-babel@^5.1.10, ember-cli-babel@^5.1.3, ember-cli-babel@^5.1.5, ember-cli-babel@^5.1.6, ember-cli-babel@^5.1.7:
+  version "5.2.4"
+  resolved "https://registry.yarnpkg.com/ember-cli-babel/-/ember-cli-babel-5.2.4.tgz#5ce4f46b08ed6f6d21e878619fb689719d6e8e13"
+  dependencies:
+    broccoli-babel-transpiler "^5.6.2"
+    broccoli-funnel "^1.0.0"
+    clone "^2.0.0"
+    ember-cli-version-checker "^1.0.2"
+    resolve "^1.1.2"
+
+ember-cli-babel@^6.0.0, ember-cli-babel@^6.0.0-beta.7:
+  version "6.1.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-babel/-/ember-cli-babel-6.1.0.tgz#d9c83a7d0c67cc8a3ccb9bd082971c3593e54fad"
+  dependencies:
+    amd-name-resolver "0.0.6"
+    babel-plugin-debug-macros "^0.1.6"
+    babel-plugin-transform-es2015-modules-amd "^6.24.0"
+    babel-polyfill "^6.16.0"
+    babel-preset-env "^1.2.0"
+    broccoli-babel-transpiler "^6.0.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-source "^1.1.0"
+    clone "^2.0.0"
+    ember-cli-version-checker "^1.2.0"
+
+ember-cli-dependency-checker@^1.2.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-dependency-checker/-/ember-cli-dependency-checker-1.4.0.tgz#2b13f977e1eea843fc1a21a001be6ca5d4ef1942"
+  dependencies:
+    chalk "^0.5.1"
+    is-git-url "^0.2.0"
+    semver "^4.1.0"
+
+ember-cli-file-picker@0.0.9:
+  version "0.0.9"
+  resolved "https://registry.yarnpkg.com/ember-cli-file-picker/-/ember-cli-file-picker-0.0.9.tgz#3aac5b924e963e39841b4508085c15eeac77d8c9"
+  dependencies:
+    ember-cli-babel "^5.1.5"
+
+ember-cli-htmlbars-inline-precompile@^0.3.1:
+  version "0.3.6"
+  resolved "https://registry.yarnpkg.com/ember-cli-htmlbars-inline-precompile/-/ember-cli-htmlbars-inline-precompile-0.3.6.tgz#4095fe423f93102724c0725e4dd1a31f25e24de5"
+  dependencies:
+    babel-plugin-htmlbars-inline-precompile "^0.1.0"
+    ember-cli-babel "^5.1.3"
+    ember-cli-htmlbars "^1.0.0"
+    hash-for-dep "^1.0.2"
+
+ember-cli-htmlbars@^1.0.0, ember-cli-htmlbars@^1.0.1:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-1.3.0.tgz#e090f011239153bf45dab29625f94a46fce205af"
+  dependencies:
+    broccoli-persistent-filter "^1.0.3"
+    ember-cli-version-checker "^1.0.2"
+    hash-for-dep "^1.0.2"
+    json-stable-stringify "^1.0.0"
+    strip-bom "^2.0.0"
+
+ember-cli-inject-live-reload@^1.3.1:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-inject-live-reload/-/ember-cli-inject-live-reload-1.6.1.tgz#82b8f5be454815a75e7f6d42c9ce0bc883a914a3"
+
+ember-cli-is-package-missing@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-is-package-missing/-/ember-cli-is-package-missing-1.0.0.tgz#6e6184cafb92635dd93ca6c946b104292d4e3390"
+
+ember-cli-less@^1.5.3:
+  version "1.5.4"
+  resolved "https://registry.yarnpkg.com/ember-cli-less/-/ember-cli-less-1.5.4.tgz#4cfbc05c6f23712fe9665f93be9bc8f2cccb0f71"
+  dependencies:
+    broccoli-less-single "^0.6.4"
+    broccoli-merge-trees "^1.0.0"
+    ember-cli-version-checker "^1.1.4"
+    lodash.merge "^3.3.2"
+
+ember-cli-moment-shim@2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-moment-shim/-/ember-cli-moment-shim-2.0.0.tgz#8a3a5534b5d1d671b084ba2cf6750dbc93fed869"
+  dependencies:
+    broccoli-funnel "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-stew "^1.0.0"
+    chalk "^1.1.1"
+    ember-cli-babel "^5.0.0"
+    exists-sync "0.0.3"
+    lodash.defaults "^3.1.2"
+    moment "^2.13.0"
+    moment-timezone "^0.5.0"
+
+ember-cli-normalize-entity-name@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-normalize-entity-name/-/ember-cli-normalize-entity-name-1.0.0.tgz#0b14f7bcbc599aa117b5fddc81e4fd03c4bad5b7"
+  dependencies:
+    silent-error "^1.0.0"
+
+ember-cli-path-utils@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-path-utils/-/ember-cli-path-utils-1.0.0.tgz#4e39af8b55301cddc5017739b77a804fba2071ed"
+
+ember-cli-preprocess-registry@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-preprocess-registry/-/ember-cli-preprocess-registry-2.0.0.tgz#45c8b985eba06bb443b3abce1c3c6220fdcb8094"
+  dependencies:
+    broccoli-clean-css "^1.1.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    debug "^2.2.0"
+    exists-sync "0.0.3"
+    lodash "^3.10.0"
+    process-relative-require "^1.0.0"
+    silent-error "^1.0.0"
+
+ember-cli-qunit@^1.2.1:
+  version "1.4.2"
+  resolved "https://registry.yarnpkg.com/ember-cli-qunit/-/ember-cli-qunit-1.4.2.tgz#7ca25495c70ca347106d44fc00f0d7aeca027475"
+  dependencies:
+    broccoli-babel-transpiler "^5.5.0"
+    broccoli-concat "^2.2.0"
+    broccoli-jshint "^1.0.0"
+    broccoli-merge-trees "^1.1.0"
+    ember-cli-babel "^5.1.5"
+    ember-cli-version-checker "^1.1.4"
+    ember-qunit "^0.4.18"
+    qunitjs "^1.20.0"
+    resolve "^1.1.6"
+
+ember-cli-release@0.2.8:
+  version "0.2.8"
+  resolved "https://registry.yarnpkg.com/ember-cli-release/-/ember-cli-release-0.2.8.tgz#e9fddd06058c0f3bc2ea57ab2667e9611f8fb205"
+  dependencies:
+    chalk "^1.0.0"
+    git-tools "^0.1.4"
+    make-array "^0.1.2"
+    merge "^1.2.0"
+    moment-timezone "^0.3.0"
+    nopt "^3.0.3"
+    rsvp "^3.0.17"
+    semver "^4.3.1"
+    silent-error "^1.0.0"
+
+ember-cli-sri@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-sri/-/ember-cli-sri-2.1.1.tgz#971620934a4b9183cf7923cc03e178b83aa907fd"
+  dependencies:
+    broccoli-sri-hash "^2.1.0"
+
+ember-cli-string-utils@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-string-utils/-/ember-cli-string-utils-1.1.0.tgz#39b677fc2805f55173735376fcef278eaa4452a1"
+
+ember-cli-test-info@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-test-info/-/ember-cli-test-info-1.0.0.tgz#ed4e960f249e97523cf891e4aed2072ce84577b4"
+  dependencies:
+    ember-cli-string-utils "^1.0.0"
+
+ember-cli-uglify@^1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-uglify/-/ember-cli-uglify-1.2.0.tgz#3208c32b54bc2783056e8bb0d5cfe9bbaf17ffb2"
+  dependencies:
+    broccoli-uglify-sourcemap "^1.0.0"
+
+ember-cli-version-checker@^1.0.2, ember-cli-version-checker@^1.1.4, ember-cli-version-checker@^1.1.6, ember-cli-version-checker@^1.2.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-version-checker/-/ember-cli-version-checker-1.3.1.tgz#0bc2d134c830142da64bf9627a0eded10b61ae72"
+  dependencies:
+    semver "^5.3.0"
+
+ember-cli@2.3.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/ember-cli/-/ember-cli-2.3.0.tgz#eb33519baf738a2ac90e945b1494bbe6f4d60dd4"
+  dependencies:
+    amd-name-resolver "0.0.2"
+    bower "^1.3.12"
+    bower-config "0.6.1"
+    bower-endpoint-parser "0.2.2"
+    broccoli "0.16.9"
+    broccoli-babel-transpiler "^5.4.5"
+    broccoli-concat "^2.0.4"
+    broccoli-config-loader "^1.0.0"
+    broccoli-config-replace "^1.1.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-funnel-reducer "^1.0.0"
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-plugin "^1.2.0"
+    broccoli-sane-watcher "^1.1.1"
+    broccoli-source "^1.1.0"
+    broccoli-viz "^2.0.1"
+    chalk "^1.1.1"
+    clean-base-url "^1.0.0"
+    compression "^1.4.4"
+    configstore "^1.4.0"
+    core-object "0.0.2"
+    cpr "0.4.2"
+    debug "^2.1.3"
+    diff "^1.3.1"
+    ember-cli-is-package-missing "^1.0.0"
+    ember-cli-normalize-entity-name "^1.0.0"
+    ember-cli-path-utils "^1.0.0"
+    ember-cli-preprocess-registry "^2.0.0"
+    ember-cli-string-utils "^1.0.0"
+    ember-cli-test-info "^1.0.0"
+    ember-router-generator "^1.0.0"
+    escape-string-regexp "^1.0.3"
+    exists-sync "0.0.3"
+    exit "^0.1.2"
+    express "^4.12.3"
+    findup "0.1.5"
+    findup-sync "^0.2.1"
+    fs-extra "0.26.2"
+    fs-monitor-stack "^1.0.2"
+    fs-tree-diff "^0.4.4"
+    get-caller-file "^1.0.0"
+    git-repo-info "^1.0.4"
+    glob "5.0.13"
+    http-proxy "^1.9.0"
+    inflection "^1.7.0"
+    inquirer "0.5.1"
+    is-git-url "^0.2.0"
+    isbinaryfile "^2.0.3"
+    leek "0.0.21"
+    lodash "^4.0.0"
+    markdown-it "4.3.0"
+    markdown-it-terminal "0.0.3"
+    merge-defaults "^0.2.1"
+    minimatch "^3.0.0"
+    mkdirp "^0.5.1"
+    morgan "^1.5.2"
+    node-modules-path "^1.0.0"
+    node-uuid "^1.4.3"
+    nopt "^3.0.1"
+    npm "2.14.10"
+    pleasant-progress "^1.0.2"
+    portfinder "^0.4.0"
+    promise-map-series "^0.2.1"
+    quick-temp "0.1.5"
+    readline2 "0.1.1"
+    resolve "^1.1.6"
+    rimraf "^2.4.4"
+    rsvp "^3.0.17"
+    sane "^1.1.1"
+    semver "^5.1.0"
+    silent-error "^1.0.0"
+    symlink-or-copy "^1.0.1"
+    temp "0.8.3"
+    testem "^1.3.0"
+    through "^2.3.6"
+    tiny-lr "0.2.1"
+    tree-sync "^1.0.0"
+    walk-sync "^0.2.6"
+    yam "0.0.18"
+
+ember-cp-validations@2.9.5:
+  version "2.9.5"
+  resolved "https://registry.yarnpkg.com/ember-cp-validations/-/ember-cp-validations-2.9.5.tgz#d3e81f6c6365f87e833af9c1f6fc8f35974f68d2"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+    ember-cli-version-checker "^1.1.4"
+    ember-getowner-polyfill "^1.0.1"
+    exists-sync "0.0.3"
+    walk-sync "^0.2.0"
+
+ember-data@^2.3.0:
+  version "2.13.0"
+  resolved "https://registry.yarnpkg.com/ember-data/-/ember-data-2.13.0.tgz#6d61487129de0e72225cc98bbc0d995e2042a933"
+  dependencies:
+    amd-name-resolver "0.0.5"
+    babel-plugin-feature-flags "^0.3.1"
+    babel-plugin-filter-imports "^0.3.1"
+    babel6-plugin-strip-class-callcheck "^6.0.0"
+    babel6-plugin-strip-heimdall "^6.0.1"
+    broccoli-babel-transpiler "^6.0.0"
+    broccoli-file-creator "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    chalk "^1.1.1"
+    ember-cli-babel "^6.0.0-beta.7"
+    ember-cli-path-utils "^1.0.0"
+    ember-cli-string-utils "^1.0.0"
+    ember-cli-test-info "^1.0.0"
+    ember-cli-version-checker "^1.1.4"
+    ember-inflector "^2.0.0"
+    ember-runtime-enumerable-includes-polyfill "^2.0.0"
+    exists-sync "0.0.3"
+    git-repo-info "^1.1.2"
+    heimdalljs "^0.3.0"
+    inflection "^1.8.0"
+    npm-git-info "^1.0.0"
+    semver "^5.1.0"
+    silent-error "^1.0.0"
+
+ember-disable-proxy-controllers@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/ember-disable-proxy-controllers/-/ember-disable-proxy-controllers-1.0.1.tgz#1254eeec0ba025c24eb9e8da611afa7b38754281"
+  dependencies:
+    ember-cli-babel "^5.0.0"
+
+ember-export-application-global@^1.0.4:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ember-export-application-global/-/ember-export-application-global-1.1.1.tgz#f257d5271268932a89d7392679ce4db89d7154af"
+  dependencies:
+    ember-cli-babel "^5.1.10"
+
+ember-factory-for-polyfill@^1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ember-factory-for-polyfill/-/ember-factory-for-polyfill-1.1.1.tgz#c1124d541a058baaa6681d9611340c16f0baf660"
+  dependencies:
+    ember-cli-babel "^5.1.7"
+    ember-cli-version-checker "^1.2.0"
+
+ember-getowner-polyfill@^1.0.0, ember-getowner-polyfill@^1.0.1:
+  version "1.2.3"
+  resolved "https://registry.yarnpkg.com/ember-getowner-polyfill/-/ember-getowner-polyfill-1.2.3.tgz#ea70f4a48b1c05b91056371d1878bbafe018222e"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+    ember-cli-version-checker "^1.2.0"
+    ember-factory-for-polyfill "^1.1.0"
+
+ember-inflector@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/ember-inflector/-/ember-inflector-2.0.0.tgz#ac0870e87c0724bd42cf5ed7ef166c49a296ecfb"
+  dependencies:
+    ember-cli-babel "^6.0.0"
+
+ember-load-initializers@^0.5.0:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/ember-load-initializers/-/ember-load-initializers-0.5.1.tgz#76e3db23c111dbdcd3ae6f687036bf0b56be0cbe"
+
+ember-moment@6.1.0:
+  version "6.1.0"
+  resolved "https://registry.yarnpkg.com/ember-moment/-/ember-moment-6.1.0.tgz#5c2e7448e22007f9839c41e05bd3013a9eba2a82"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+
+ember-qunit@^0.4.18:
+  version "0.4.24"
+  resolved "https://registry.yarnpkg.com/ember-qunit/-/ember-qunit-0.4.24.tgz#b54cf6688c442d07eacea47c3285879cdd7c2163"
+  dependencies:
+    ember-test-helpers "^0.5.32"
+
+ember-resolver@^2.0.3:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/ember-resolver/-/ember-resolver-2.1.1.tgz#5e4c1fffe9f5f48fc2194ad7592274ed0cd74f72"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+    ember-cli-version-checker "^1.1.6"
+
+ember-router-generator@^1.0.0:
+  version "1.2.3"
+  resolved "https://registry.yarnpkg.com/ember-router-generator/-/ember-router-generator-1.2.3.tgz#8ed2ca86ff323363120fc14278191e9e8f1315ee"
+  dependencies:
+    recast "^0.11.3"
+
+ember-runtime-enumerable-includes-polyfill@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/ember-runtime-enumerable-includes-polyfill/-/ember-runtime-enumerable-includes-polyfill-2.0.0.tgz#6e9ba118bc909d1d7762de1b03a550d8955308a9"
+  dependencies:
+    ember-cli-babel "^6.0.0"
+    ember-cli-version-checker "^1.1.6"
+
+ember-spin-spinner@0.2.4:
+  version "0.2.4"
+  resolved "https://registry.yarnpkg.com/ember-spin-spinner/-/ember-spin-spinner-0.2.4.tgz#6a8d81354bfe2a603565a1cf589db06b9108ab9a"
+  dependencies:
+    ember-cli-babel "^5.1.3"
+
+ember-test-helpers@^0.5.32:
+  version "0.5.34"
+  resolved "https://registry.yarnpkg.com/ember-test-helpers/-/ember-test-helpers-0.5.34.tgz#c8439108d1cba1d7d838c212208a5c4061471b83"
+  dependencies:
+    klassy "^0.1.3"
+
+ember-truth-helpers@1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/ember-truth-helpers/-/ember-truth-helpers-1.2.0.tgz#e63cffeaa8211882ae61a958816fded3790d065b"
+  dependencies:
+    ember-cli-babel "^5.1.5"
+
+ember-uploader@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-uploader/-/ember-uploader-1.0.0.tgz#bce8d2e149292dac74bc1aba29c27e3963bbb35e"
+  dependencies:
+    broccoli-file-creator "^1.0.1"
+    broccoli-merge-trees "^1.0.0"
+    ember-cli-babel "^5.1.5"
+
+ember-uuid@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-uuid/-/ember-uuid-1.0.0.tgz#0932f2b56d64417cab892d96e3cc69f895356df7"
+  dependencies:
+    ember-cli-babel "^5.1.5"
+
+"ember-validations@~ 2.0.0-alpha.4":
+  version "2.0.0-alpha.5"
+  resolved "https://registry.yarnpkg.com/ember-validations/-/ember-validations-2.0.0-alpha.5.tgz#95af5bb5fcf43a5d18a6ddebe64594280bf988c0"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+    ember-getowner-polyfill "^1.0.0"
+
+encodeurl@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.1.tgz#79e3d58655346909fe6f0f45a5de68103b294d20"
+
+engine.io-client@1.8.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/engine.io-client/-/engine.io-client-1.8.0.tgz#7b730e4127414087596d9be3c88d2bc5fdb6cf5c"
+  dependencies:
+    component-emitter "1.2.1"
+    component-inherit "0.0.3"
+    debug "2.3.3"
+    engine.io-parser "1.3.1"
+    has-cors "1.1.0"
+    indexof "0.0.1"
+    parsejson "0.0.3"
+    parseqs "0.0.5"
+    parseuri "0.0.5"
+    ws "1.1.1"
+    xmlhttprequest-ssl "1.5.3"
+    yeast "0.1.2"
+
+engine.io-parser@1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/engine.io-parser/-/engine.io-parser-1.3.1.tgz#9554f1ae33107d6fbd170ca5466d2f833f6a07cf"
+  dependencies:
+    after "0.8.1"
+    arraybuffer.slice "0.0.6"
+    base64-arraybuffer "0.1.5"
+    blob "0.0.4"
+    has-binary "0.1.6"
+    wtf-8 "1.0.0"
+
+engine.io@1.8.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/engine.io/-/engine.io-1.8.0.tgz#3eeb5f264cb75dbbec1baaea26d61f5a4eace2aa"
+  dependencies:
+    accepts "1.3.3"
+    base64id "0.1.0"
+    cookie "0.3.1"
+    debug "2.3.3"
+    engine.io-parser "1.3.1"
+    ws "1.1.1"
+
+ensure-posix-path@^1.0.0, ensure-posix-path@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/ensure-posix-path/-/ensure-posix-path-1.0.2.tgz#a65b3e42d0b71cfc585eb774f9943c8d9b91b0c2"
+
+entities@1.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/entities/-/entities-1.0.0.tgz#b2987aa3821347fcde642b24fdfc9e4fb712bf26"
+
+entities@~1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/entities/-/entities-1.1.1.tgz#6e5c2d0a5621b5dadaecef80b90edfb5cd7772f0"
+
+errno@^0.1.1:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/errno/-/errno-0.1.4.tgz#b896e23a9e5e8ba33871fc996abd3635fc9a1c7d"
+  dependencies:
+    prr "~0.0.0"
+
+es5-ext@^0.10.14, es5-ext@^0.10.9, es5-ext@~0.10.11, es5-ext@~0.10.14, es5-ext@~0.10.2, es5-ext@~0.10.5, es5-ext@~0.10.6:
+  version "0.10.15"
+  resolved "https://registry.yarnpkg.com/es5-ext/-/es5-ext-0.10.15.tgz#c330a5934c1ee21284a7c081a86e5fd937c91ea6"
+  dependencies:
+    es6-iterator "2"
+    es6-symbol "~3.1"
+
+es6-iterator@2:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/es6-iterator/-/es6-iterator-2.0.1.tgz#8e319c9f0453bf575d374940a655920e59ca5512"
+  dependencies:
+    d "1"
+    es5-ext "^0.10.14"
+    es6-symbol "^3.1"
+
+es6-iterator@~0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/es6-iterator/-/es6-iterator-0.1.3.tgz#d6f58b8c4fc413c249b4baa19768f8e4d7c8944e"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.5"
+    es6-symbol "~2.0.1"
+
+es6-symbol@^3.0.2, es6-symbol@^3.1, es6-symbol@~3.1:
+  version "3.1.1"
+  resolved "https://registry.yarnpkg.com/es6-symbol/-/es6-symbol-3.1.1.tgz#bf00ef4fdab6ba1b46ecb7b629b4c7ed5715cc77"
+  dependencies:
+    d "1"
+    es5-ext "~0.10.14"
+
+es6-symbol@~2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/es6-symbol/-/es6-symbol-2.0.1.tgz#761b5c67cfd4f1d18afb234f691d678682cb3bf3"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.5"
+
+es6-weak-map@~0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/es6-weak-map/-/es6-weak-map-0.1.4.tgz#706cef9e99aa236ba7766c239c8b9e286ea7d228"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.6"
+    es6-iterator "~0.1.3"
+    es6-symbol "~2.0.1"
+
+escape-html@~1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988"
+
+escape-string-regexp@^1.0.0, escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.3:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4"
+
+esprima-fb@~12001.1.0-dev-harmony-fb:
+  version "12001.1.0-dev-harmony-fb"
+  resolved "https://registry.yarnpkg.com/esprima-fb/-/esprima-fb-12001.1.0-dev-harmony-fb.tgz#d84400384ba95ce2678c617ad24a7f40808da915"
+
+esprima-fb@~15001.1001.0-dev-harmony-fb:
+  version "15001.1001.0-dev-harmony-fb"
+  resolved "https://registry.yarnpkg.com/esprima-fb/-/esprima-fb-15001.1001.0-dev-harmony-fb.tgz#43beb57ec26e8cf237d3dd8b33e42533577f2659"
+
+esprima@^2.6.0:
+  version "2.7.3"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-2.7.3.tgz#96e3b70d5779f6ad49cd032673d1c312767ba581"
+
+esprima@^3.1.1, esprima@~3.1.0:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-3.1.3.tgz#fdca51cee6133895e3c88d535ce49dbff62a4633"
+
+esutils@^2.0.0, esutils@^2.0.2:
+  version "2.0.2"
+  resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.2.tgz#0abf4f1caa5bcb1f7a9d8acc6dea4faaa04bac9b"
+
+etag@~1.8.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.0.tgz#6f631aef336d6c46362b51764044ce216be3c051"
+
+event-emitter@~0.3.4:
+  version "0.3.5"
+  resolved "https://registry.yarnpkg.com/event-emitter/-/event-emitter-0.3.5.tgz#df8c69eef1647923c7157b9ce83840610b02cc39"
+  dependencies:
+    d "1"
+    es5-ext "~0.10.14"
+
+eventemitter3@1.x.x:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-1.2.0.tgz#1c86991d816ad1e504750e73874224ecf3bec508"
+
+events-to-array@^1.0.1:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/events-to-array/-/events-to-array-1.1.2.tgz#2d41f563e1fe400ed4962fe1a4d5c6a7539df7f6"
+
+exec-sh@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/exec-sh/-/exec-sh-0.2.0.tgz#14f75de3f20d286ef933099b2ce50a90359cef10"
+  dependencies:
+    merge "^1.1.3"
+
+exists-sync@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/exists-sync/-/exists-sync-0.0.3.tgz#b910000bedbb113b378b82f5f5a7638107622dcf"
+
+exists-sync@0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/exists-sync/-/exists-sync-0.0.4.tgz#9744c2c428cc03b01060db454d4b12f0ef3c8879"
+
+exit@0.1.2, exit@0.1.x, exit@^0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/exit/-/exit-0.1.2.tgz#0632638f8d877cc82107d30a0fff1a17cba1cd0c"
+
+expand-brackets@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-0.1.5.tgz#df07284e342a807cd733ac5af72411e581d1177b"
+  dependencies:
+    is-posix-bracket "^0.1.0"
+
+expand-range@^1.8.1:
+  version "1.8.2"
+  resolved "https://registry.yarnpkg.com/expand-range/-/expand-range-1.8.2.tgz#a299effd335fe2721ebae8e257ec79644fc85337"
+  dependencies:
+    fill-range "^2.1.0"
+
+express@^4.10.7, express@^4.12.3:
+  version "4.15.2"
+  resolved "https://registry.yarnpkg.com/express/-/express-4.15.2.tgz#af107fc148504457f2dca9a6f2571d7129b97b35"
+  dependencies:
+    accepts "~1.3.3"
+    array-flatten "1.1.1"
+    content-disposition "0.5.2"
+    content-type "~1.0.2"
+    cookie "0.3.1"
+    cookie-signature "1.0.6"
+    debug "2.6.1"
+    depd "~1.1.0"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    etag "~1.8.0"
+    finalhandler "~1.0.0"
+    fresh "0.5.0"
+    merge-descriptors "1.0.1"
+    methods "~1.1.2"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    path-to-regexp "0.1.7"
+    proxy-addr "~1.1.3"
+    qs "6.4.0"
+    range-parser "~1.2.0"
+    send "0.15.1"
+    serve-static "1.12.1"
+    setprototypeof "1.0.3"
+    statuses "~1.3.1"
+    type-is "~1.6.14"
+    utils-merge "1.0.0"
+    vary "~1.1.0"
+
+extend@~3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.1.tgz#a755ea7bc1adfcc5a31ce7e762dbaadc5e636444"
+
+extglob@^0.3.1:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/extglob/-/extglob-0.3.2.tgz#2e18ff3d2f49ab2765cec9023f011daa8d8349a1"
+  dependencies:
+    is-extglob "^1.0.0"
+
+extsprintf@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.0.2.tgz#e1080e0658e300b06294990cc70e1502235fd550"
+
+fast-ordered-set@^1.0.0, fast-ordered-set@^1.0.2:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/fast-ordered-set/-/fast-ordered-set-1.0.3.tgz#3fbb36634f7be79e4f7edbdb4a357dee25d184eb"
+  dependencies:
+    blank-object "^1.0.1"
+
+fast-sourcemap-concat@^1.0.1:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/fast-sourcemap-concat/-/fast-sourcemap-concat-1.1.0.tgz#a800767abed5eda02e67238ec063a709be61f9d4"
+  dependencies:
+    chalk "^0.5.1"
+    debug "^2.2.0"
+    fs-extra "^0.30.0"
+    memory-streams "^0.1.0"
+    mkdirp "^0.5.0"
+    rsvp "^3.0.14"
+    source-map "^0.4.2"
+    source-map-url "^0.3.0"
+
+faye-websocket@~0.10.0:
+  version "0.10.0"
+  resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.10.0.tgz#4e492f8d04dfb6f89003507f6edbf2d501e7c6f4"
+  dependencies:
+    websocket-driver ">=0.5.1"
+
+fb-watchman@^1.8.0:
+  version "1.9.2"
+  resolved "https://registry.yarnpkg.com/fb-watchman/-/fb-watchman-1.9.2.tgz#a24cf47827f82d38fb59a69ad70b76e3b6ae7383"
+  dependencies:
+    bser "1.0.2"
+
+filename-regex@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/filename-regex/-/filename-regex-2.0.1.tgz#c1c4b9bee3e09725ddb106b75c1e301fe2f18b26"
+
+fill-range@^2.1.0:
+  version "2.2.3"
+  resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-2.2.3.tgz#50b77dfd7e469bc7492470963699fe7a8485a723"
+  dependencies:
+    is-number "^2.1.0"
+    isobject "^2.0.0"
+    randomatic "^1.1.3"
+    repeat-element "^1.1.2"
+    repeat-string "^1.5.2"
+
+finalhandler@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.0.1.tgz#bcd15d1689c0e5ed729b6f7f541a6df984117db8"
+  dependencies:
+    debug "2.6.3"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    statuses "~1.3.1"
+    unpipe "~1.0.0"
+
+finalhandler@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.0.2.tgz#d0e36f9dbc557f2de14423df6261889e9d60c93a"
+  dependencies:
+    debug "2.6.4"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    statuses "~1.3.1"
+    unpipe "~1.0.0"
+
+findup-sync@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/findup-sync/-/findup-sync-0.2.1.tgz#e0a90a450075c49466ee513732057514b81e878c"
+  dependencies:
+    glob "~4.3.0"
+
+findup-sync@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/findup-sync/-/findup-sync-0.3.0.tgz#37930aa5d816b777c03445e1966cc6790a4c0b16"
+  dependencies:
+    glob "~5.0.0"
+
+findup@0.1.5, findup@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/findup/-/findup-0.1.5.tgz#8ad929a3393bac627957a7e5de4623b06b0e2ceb"
+  dependencies:
+    colors "~0.6.0-1"
+    commander "~2.1.0"
+
+fireworm@^0.7.0:
+  version "0.7.1"
+  resolved "https://registry.yarnpkg.com/fireworm/-/fireworm-0.7.1.tgz#ccf20f7941f108883fcddb99383dbe6e1861c758"
+  dependencies:
+    async "~0.2.9"
+    is-type "0.0.1"
+    lodash.debounce "^3.1.1"
+    lodash.flatten "^3.0.2"
+    minimatch "^3.0.2"
+
+for-in@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80"
+
+for-own@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/for-own/-/for-own-0.1.5.tgz#5265c681a4f294dabbf17c9509b6763aa84510ce"
+  dependencies:
+    for-in "^1.0.1"
+
+forever-agent@~0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91"
+
+form-data@~1.0.0-rc3:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-1.0.1.tgz#ae315db9a4907fa065502304a66d7733475ee37c"
+  dependencies:
+    async "^2.0.1"
+    combined-stream "^1.0.5"
+    mime-types "^2.1.11"
+
+form-data@~2.1.1:
+  version "2.1.4"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.1.4.tgz#33c183acf193276ecaa98143a69e94bfee1750d1"
+  dependencies:
+    asynckit "^0.4.0"
+    combined-stream "^1.0.5"
+    mime-types "^2.1.12"
+
+forwarded@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.1.0.tgz#19ef9874c4ae1c297bcf078fde63a09b66a84363"
+
+fresh@0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.0.tgz#f474ca5e6a9246d6fd8e0953cfa9b9c805afa78e"
+
+fs-extra@0.26.2:
+  version "0.26.2"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.26.2.tgz#71b7697e539db037acf41e6e7923e94d605bf498"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    klaw "^1.0.0"
+    path-is-absolute "^1.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@^0.16.3:
+  version "0.16.5"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.16.5.tgz#1ad661fa6c86c9608cd1b49efc6fce834939a750"
+  dependencies:
+    graceful-fs "^3.0.5"
+    jsonfile "^2.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@^0.24.0:
+  version "0.24.0"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.24.0.tgz#d4e4342a96675cb7846633a6099249332b539952"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    path-is-absolute "^1.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@^0.30.0:
+  version "0.30.0"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.30.0.tgz#f233ffcc08d4da7d432daa449776989db1df93f0"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    klaw "^1.0.0"
+    path-is-absolute "^1.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@^2.0.0:
+  version "2.1.2"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-2.1.2.tgz#046c70163cef9aad46b0e4a7fa467fb22d71de35"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+
+fs-monitor-stack@^1.0.2:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/fs-monitor-stack/-/fs-monitor-stack-1.1.1.tgz#c4038d5977939b6b4e38396d7e7cd0895a7ac6b3"
+
+fs-readdir-recursive@^0.1.0:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/fs-readdir-recursive/-/fs-readdir-recursive-0.1.2.tgz#315b4fb8c1ca5b8c47defef319d073dad3568059"
+
+fs-tree-diff@^0.4.4:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/fs-tree-diff/-/fs-tree-diff-0.4.4.tgz#f6b75d70db22c1f3b05d592270f4ed6c9c2f82dd"
+  dependencies:
+    debug "^2.2.0"
+    fast-ordered-set "^1.0.2"
+
+fs-tree-diff@^0.5.2, fs-tree-diff@^0.5.3, fs-tree-diff@^0.5.4, fs-tree-diff@^0.5.6:
+  version "0.5.6"
+  resolved "https://registry.yarnpkg.com/fs-tree-diff/-/fs-tree-diff-0.5.6.tgz#342665749e8dca406800b672268c8f5073f3e623"
+  dependencies:
+    heimdalljs-logger "^0.1.7"
+    object-assign "^4.1.0"
+    path-posix "^1.0.0"
+    symlink-or-copy "^1.1.8"
+
+fs-vacuum@~1.2.7:
+  version "1.2.10"
+  resolved "https://registry.yarnpkg.com/fs-vacuum/-/fs-vacuum-1.2.10.tgz#b7629bec07a4031a2548fdf99f5ecf1cc8b31e36"
+  dependencies:
+    graceful-fs "^4.1.2"
+    path-is-inside "^1.0.1"
+    rimraf "^2.5.2"
+
+fs-write-stream-atomic@~1.0.4:
+  version "1.0.10"
+  resolved "https://registry.yarnpkg.com/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.10.tgz#b47df53493ef911df75731e70a9ded0189db40c9"
+  dependencies:
+    graceful-fs "^4.1.2"
+    iferr "^0.1.5"
+    imurmurhash "^0.1.4"
+    readable-stream "1 || 2"
+
+fs.realpath@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f"
+
+fstream-ignore@^1.0.0:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/fstream-ignore/-/fstream-ignore-1.0.5.tgz#9c31dae34767018fe1d249b24dada67d092da105"
+  dependencies:
+    fstream "^1.0.0"
+    inherits "2"
+    minimatch "^3.0.0"
+
+fstream-npm@~1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/fstream-npm/-/fstream-npm-1.0.7.tgz#7ed0d1ac13d7686dd9e1bf6ceb8be273bf6d2f86"
+  dependencies:
+    fstream-ignore "^1.0.0"
+    inherits "2"
+
+fstream@^1.0.0, fstream@^1.0.2, fstream@~1.0.8:
+  version "1.0.11"
+  resolved "https://registry.yarnpkg.com/fstream/-/fstream-1.0.11.tgz#5c1fb1f117477114f0632a0eb4b71b3cb0fd3171"
+  dependencies:
+    graceful-fs "^4.1.2"
+    inherits "~2.0.0"
+    mkdirp ">=0.5 0"
+    rimraf "2"
+
+gauge@~1.2.0, gauge@~1.2.5:
+  version "1.2.7"
+  resolved "https://registry.yarnpkg.com/gauge/-/gauge-1.2.7.tgz#e9cec5483d3d4ee0ef44b60a7d99e4935e136d93"
+  dependencies:
+    ansi "^0.3.0"
+    has-unicode "^2.0.0"
+    lodash.pad "^4.1.0"
+    lodash.padend "^4.1.0"
+    lodash.padstart "^4.1.0"
+
+gauge@~2.7.1:
+  version "2.7.4"
+  resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.4.tgz#2c03405c7538c39d7eb37b317022e325fb018bf7"
+  dependencies:
+    aproba "^1.0.3"
+    console-control-strings "^1.0.0"
+    has-unicode "^2.0.0"
+    object-assign "^4.1.0"
+    signal-exit "^3.0.0"
+    string-width "^1.0.1"
+    strip-ansi "^3.0.1"
+    wide-align "^1.1.0"
+
+generate-function@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/generate-function/-/generate-function-2.0.0.tgz#6858fe7c0969b7d4e9093337647ac79f60dfbe74"
+
+generate-object-property@^1.1.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/generate-object-property/-/generate-object-property-1.2.0.tgz#9c0e1c40308ce804f4783618b937fa88f99d50d0"
+  dependencies:
+    is-property "^1.0.0"
+
+get-caller-file@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-1.0.2.tgz#f702e63127e7e231c160a80c1554acb70d5047e5"
+
+get-stdin@^4.0.1:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-4.0.1.tgz#b968c6b0a04384324902e8bf1a5df32579a450fe"
+
+getpass@^0.1.1:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa"
+  dependencies:
+    assert-plus "^1.0.0"
+
+git-repo-info@^1.0.4, git-repo-info@^1.1.2:
+  version "1.4.1"
+  resolved "https://registry.yarnpkg.com/git-repo-info/-/git-repo-info-1.4.1.tgz#2a072823254aaf62fcf0766007d7b6651bd41943"
+
+git-repo-version@0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/git-repo-version/-/git-repo-version-0.3.0.tgz#c9b97d0d21c4357d669dc1269c2b6a75da6cc0e9"
+  dependencies:
+    git-repo-info "^1.0.4"
+
+git-tools@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/git-tools/-/git-tools-0.1.4.tgz#5e43e59443b8a5dedb39dba663da49e79f943978"
+  dependencies:
+    spawnback "~1.0.0"
+
+github-url-from-git@~1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/github-url-from-git/-/github-url-from-git-1.4.0.tgz#285e6b520819001bde128674704379e4ff03e0de"
+
+github-url-from-username-repo@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/github-url-from-username-repo/-/github-url-from-username-repo-1.0.2.tgz#7dd79330d2abe69c10c2cef79714c97215791dfa"
+
+glob-base@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/glob-base/-/glob-base-0.3.0.tgz#dbb164f6221b1c0b1ccf82aea328b497df0ea3c4"
+  dependencies:
+    glob-parent "^2.0.0"
+    is-glob "^2.0.0"
+
+glob-parent@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-2.0.0.tgz#81383d72db054fcccf5336daa902f182f6edbb28"
+  dependencies:
+    is-glob "^2.0.0"
+
+"glob@3 || 4", glob@~4.3.0:
+  version "4.3.5"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-4.3.5.tgz#80fbb08ca540f238acce5d11d1e9bc41e75173d3"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^2.0.1"
+    once "^1.3.0"
+
+glob@5.0.13, glob@^5.0.10:
+  version "5.0.13"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.13.tgz#0b6ffc3ac64eb90669f723a00a0ebb7281b33f8f"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^2.0.1"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@^5.0.15, glob@~5.0.0, glob@~5.0.15:
+  version "5.0.15"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.15.tgz#1bc936b9e02f4a603fcc222ecf7633d30b8b93b1"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "2 || 3"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@^6.0.1:
+  version "6.0.4"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-6.0.4.tgz#0f08860f6a155127b2fadd4f9ce24b1aab6e4d22"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "2 || 3"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@^7.0.4, glob@^7.0.5, glob@^7.1.1:
+  version "7.1.1"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.1.tgz#805211df04faaf1c63a3600306cdf5ade50b2ec8"
+  dependencies:
+    fs.realpath "^1.0.0"
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^3.0.2"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+globals@^6.4.0:
+  version "6.4.1"
+  resolved "https://registry.yarnpkg.com/globals/-/globals-6.4.1.tgz#8498032b3b6d1cc81eebc5f79690d8fe29fabf4f"
+
+globals@^9.0.0:
+  version "9.17.0"
+  resolved "https://registry.yarnpkg.com/globals/-/globals-9.17.0.tgz#0c0ca696d9b9bb694d2e5470bd37777caad50286"
+
+graceful-fs@^3.0.5:
+  version "3.0.11"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-3.0.11.tgz#7613c778a1afea62f25c630a086d7f3acbbdd818"
+  dependencies:
+    natives "^1.1.0"
+
+graceful-fs@^4.1.11, graceful-fs@^4.1.2, graceful-fs@^4.1.4, graceful-fs@^4.1.6, graceful-fs@^4.1.9, graceful-fs@~4.1.2:
+  version "4.1.11"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.11.tgz#0e8bdfe4d1ddb8854d64e04ea7c00e2a026e5658"
+
+graceful-fs@~2.0.0:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-2.0.3.tgz#7cd2cdb228a4a3f36e95efa6cc142de7d1a136d0"
+
+"graceful-readlink@>= 1.0.0":
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/graceful-readlink/-/graceful-readlink-1.0.1.tgz#4cafad76bc62f02fa039b2f94e9a3dd3a391a725"
+
+growly@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/growly/-/growly-1.3.0.tgz#f10748cbe76af964b7c96c93c6bcc28af120c081"
+
+handlebars@^4.0.4:
+  version "4.0.6"
+  resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-4.0.6.tgz#2ce4484850537f9c97a8026d5399b935c4ed4ed7"
+  dependencies:
+    async "^1.4.0"
+    optimist "^0.6.1"
+    source-map "^0.4.4"
+  optionalDependencies:
+    uglify-js "^2.6"
+
+har-schema@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-1.0.5.tgz#d263135f43307c02c602afc8fe95970c0151369e"
+
+har-validator@~2.0.2:
+  version "2.0.6"
+  resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-2.0.6.tgz#cdcbc08188265ad119b6a5a7c8ab70eecfb5d27d"
+  dependencies:
+    chalk "^1.1.1"
+    commander "^2.9.0"
+    is-my-json-valid "^2.12.4"
+    pinkie-promise "^2.0.0"
+
+har-validator@~4.2.1:
+  version "4.2.1"
+  resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-4.2.1.tgz#33481d0f1bbff600dd203d75812a6a5fba002e2a"
+  dependencies:
+    ajv "^4.9.1"
+    har-schema "^1.0.5"
+
+has-ansi@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-0.1.0.tgz#84f265aae8c0e6a88a12d7022894b7568894c62e"
+  dependencies:
+    ansi-regex "^0.2.0"
+
+has-ansi@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91"
+  dependencies:
+    ansi-regex "^2.0.0"
+
+has-binary@0.1.6:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/has-binary/-/has-binary-0.1.6.tgz#25326f39cfa4f616ad8787894e3af2cfbc7b6e10"
+  dependencies:
+    isarray "0.0.1"
+
+has-binary@0.1.7:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/has-binary/-/has-binary-0.1.7.tgz#68e61eb16210c9545a0a5cce06a873912fe1e68c"
+  dependencies:
+    isarray "0.0.1"
+
+has-color@~0.1.0:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/has-color/-/has-color-0.1.7.tgz#67144a5260c34fc3cca677d041daf52fe7b78b2f"
+
+has-cors@1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/has-cors/-/has-cors-1.1.0.tgz#5e474793f7ea9843d1bb99c23eef49ff126fff39"
+
+has-unicode@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9"
+
+hash-for-dep@^1.0.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/hash-for-dep/-/hash-for-dep-1.1.2.tgz#e3347ed92960eb0bb53a2c6c2b70e36d75b7cd0c"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    heimdalljs "^0.2.3"
+    heimdalljs-logger "^0.1.7"
+    resolve "^1.1.6"
+
+hawk@~3.1.0, hawk@~3.1.3:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/hawk/-/hawk-3.1.3.tgz#078444bd7c1640b0fe540d2c9b73d59678e8e1c4"
+  dependencies:
+    boom "2.x.x"
+    cryptiles "2.x.x"
+    hoek "2.x.x"
+    sntp "1.x.x"
+
+heimdalljs-logger@^0.1.7:
+  version "0.1.9"
+  resolved "https://registry.yarnpkg.com/heimdalljs-logger/-/heimdalljs-logger-0.1.9.tgz#d76ada4e45b7bb6f786fc9c010a68eb2e2faf176"
+  dependencies:
+    debug "^2.2.0"
+    heimdalljs "^0.2.0"
+
+heimdalljs@^0.2.0, heimdalljs@^0.2.1, heimdalljs@^0.2.3:
+  version "0.2.4"
+  resolved "https://registry.yarnpkg.com/heimdalljs/-/heimdalljs-0.2.4.tgz#34ead16eab422c94803065d33abeba1f7b24a910"
+  dependencies:
+    rsvp "~3.2.1"
+
+heimdalljs@^0.3.0:
+  version "0.3.3"
+  resolved "https://registry.yarnpkg.com/heimdalljs/-/heimdalljs-0.3.3.tgz#e92d2c6f77fd46d5bf50b610d28ad31755054d0b"
+  dependencies:
+    rsvp "~3.2.1"
+
+hoek@2.x.x:
+  version "2.16.3"
+  resolved "https://registry.yarnpkg.com/hoek/-/hoek-2.16.3.tgz#20bb7403d3cea398e91dc4710a8ff1b8274a25ed"
+
+home-or-tmp@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/home-or-tmp/-/home-or-tmp-1.0.0.tgz#4b9f1e40800c3e50c6c27f781676afcce71f3985"
+  dependencies:
+    os-tmpdir "^1.0.1"
+    user-home "^1.1.1"
+
+home-or-tmp@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/home-or-tmp/-/home-or-tmp-2.0.0.tgz#e36c3f2d2cae7d746a857e38d18d5f32a7882db8"
+  dependencies:
+    os-homedir "^1.0.0"
+    os-tmpdir "^1.0.1"
+
+hosted-git-info@^2.1.4, hosted-git-info@^2.1.5, hosted-git-info@~2.1.4:
+  version "2.1.5"
+  resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.1.5.tgz#0ba81d90da2e25ab34a332e6ec77936e1598118b"
+
+htmlparser2@3.8.x:
+  version "3.8.3"
+  resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-3.8.3.tgz#996c28b191516a8be86501a7d79757e5c70c1068"
+  dependencies:
+    domelementtype "1"
+    domhandler "2.3"
+    domutils "1.5"
+    entities "1.0"
+    readable-stream "1.1"
+
+http-errors@~1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.3.1.tgz#197e22cdebd4198585e8694ef6786197b91ed942"
+  dependencies:
+    inherits "~2.0.1"
+    statuses "1"
+
+http-errors@~1.6.1:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.6.1.tgz#5f8b8ed98aca545656bf572997387f904a722257"
+  dependencies:
+    depd "1.1.0"
+    inherits "2.0.3"
+    setprototypeof "1.0.3"
+    statuses ">= 1.3.1 < 2"
+
+http-proxy@^1.13.1, http-proxy@^1.9.0:
+  version "1.16.2"
+  resolved "https://registry.yarnpkg.com/http-proxy/-/http-proxy-1.16.2.tgz#06dff292952bf64dbe8471fa9df73066d4f37742"
+  dependencies:
+    eventemitter3 "1.x.x"
+    requires-port "1.x.x"
+
+http-signature@~0.11.0:
+  version "0.11.0"
+  resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-0.11.0.tgz#1796cf67a001ad5cd6849dca0991485f09089fe6"
+  dependencies:
+    asn1 "0.1.11"
+    assert-plus "^0.1.5"
+    ctype "0.5.3"
+
+http-signature@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.1.1.tgz#df72e267066cd0ac67fb76adf8e134a8fbcf91bf"
+  dependencies:
+    assert-plus "^0.2.0"
+    jsprim "^1.2.2"
+    sshpk "^1.7.0"
+
+iconv-lite@0.4.13, iconv-lite@^0.4.5:
+  version "0.4.13"
+  resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.13.tgz#1f88aba4ab0b1508e8312acc39345f36e992e2f2"
+
+iferr@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/iferr/-/iferr-0.1.5.tgz#c60eed69e6d8fdb6b3104a1fcbca1c192dc5b501"
+
+image-size@~0.5.0:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/image-size/-/image-size-0.5.1.tgz#28eea8548a4b1443480ddddc1e083ae54652439f"
+
+imurmurhash@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea"
+
+include-path-searcher@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/include-path-searcher/-/include-path-searcher-0.1.0.tgz#c0cf2ddfa164fb2eae07bc7ca43a7f191cb4d7bd"
+
+indexof@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/indexof/-/indexof-0.0.1.tgz#82dc336d232b9062179d05ab3293a66059fd435d"
+
+inflection@^1.7.0, inflection@^1.8.0:
+  version "1.12.0"
+  resolved "https://registry.yarnpkg.com/inflection/-/inflection-1.12.0.tgz#a200935656d6f5f6bc4dc7502e1aecb703228416"
+
+inflight@^1.0.4, inflight@~1.0.4:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9"
+  dependencies:
+    once "^1.3.0"
+    wrappy "1"
+
+inherits@2, inherits@2.0.3, inherits@^2.0.1, inherits@^2.0.3, inherits@~2.0.0, inherits@~2.0.1:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de"
+
+ini@^1.3.4, ini@~1.3.4:
+  version "1.3.4"
+  resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.4.tgz#0537cb79daf59b59a1a517dff706c86ec039162e"
+
+init-package-json@~1.9.1:
+  version "1.9.6"
+  resolved "https://registry.yarnpkg.com/init-package-json/-/init-package-json-1.9.6.tgz#789fc2b74466a4952b9ea77c0575bc78ebd60a61"
+  dependencies:
+    glob "^7.1.1"
+    npm-package-arg "^4.0.0 || ^5.0.0"
+    promzard "^0.3.0"
+    read "~1.0.1"
+    read-package-json "1 || 2"
+    semver "2.x || 3.x || 4 || 5"
+    validate-npm-package-license "^3.0.1"
+    validate-npm-package-name "^3.0.0"
+
+inline-source-map-comment@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/inline-source-map-comment/-/inline-source-map-comment-1.0.5.tgz#50a8a44c2a790dfac441b5c94eccd5462635faf6"
+  dependencies:
+    chalk "^1.0.0"
+    get-stdin "^4.0.1"
+    minimist "^1.1.1"
+    sum-up "^1.0.1"
+    xtend "^4.0.0"
+
+inquirer@0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-0.5.1.tgz#e9f2cd1ee172c7a32e054b78a03d4ddb0d7707f1"
+  dependencies:
+    async "~0.8.0"
+    chalk "~0.4.0"
+    cli-color "~0.3.2"
+    lodash "~2.4.1"
+    mute-stream "0.0.4"
+    readline2 "~0.1.0"
+    through "~2.3.4"
+
+invariant@^2.2.0, invariant@^2.2.2:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.2.tgz#9e1f56ac0acdb6bf303306f338be3b204ae60360"
+  dependencies:
+    loose-envify "^1.0.0"
+
+invert-kv@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/invert-kv/-/invert-kv-1.0.0.tgz#104a8e4aaca6d3d8cd157a8ef8bfab2d7a3ffdb6"
+
+ipaddr.js@1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.3.0.tgz#1e03a52fdad83a8bbb2b25cbf4998b4cffcd3dec"
+
+is-buffer@^1.1.5:
+  version "1.1.5"
+  resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.5.tgz#1f3b26ef613b214b88cbca23cc6c01d87961eecc"
+
+is-builtin-module@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-builtin-module/-/is-builtin-module-1.0.0.tgz#540572d34f7ac3119f8f76c30cbc1b1e037affbe"
+  dependencies:
+    builtin-modules "^1.0.0"
+
+is-dotfile@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-dotfile/-/is-dotfile-1.0.2.tgz#2c132383f39199f8edc268ca01b9b007d205cc4d"
+
+is-equal-shallow@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/is-equal-shallow/-/is-equal-shallow-0.1.3.tgz#2238098fc221de0bcfa5d9eac4c45d638aa1c534"
+  dependencies:
+    is-primitive "^2.0.0"
+
+is-extendable@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89"
+
+is-extglob@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-1.0.0.tgz#ac468177c4943405a092fc8f29760c6ffc6206c0"
+
+is-finite@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-finite/-/is-finite-1.0.2.tgz#cc6677695602be550ef11e8b4aa6305342b6d0aa"
+  dependencies:
+    number-is-nan "^1.0.0"
+
+is-fullwidth-code-point@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb"
+  dependencies:
+    number-is-nan "^1.0.0"
+
+is-git-url@^0.2.0:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/is-git-url/-/is-git-url-0.2.3.tgz#445200d6fbd6da028fb5e01440d9afc93f3ccb64"
+
+is-glob@^2.0.0, is-glob@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-2.0.1.tgz#d096f926a3ded5600f3fdfd91198cb0888c2d863"
+  dependencies:
+    is-extglob "^1.0.0"
+
+is-integer@^1.0.4:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/is-integer/-/is-integer-1.0.6.tgz#5273819fada880d123e1ac00a938e7172dd8d95e"
+  dependencies:
+    is-finite "^1.0.0"
+
+is-my-json-valid@^2.12.4:
+  version "2.16.0"
+  resolved "https://registry.yarnpkg.com/is-my-json-valid/-/is-my-json-valid-2.16.0.tgz#f079dd9bfdae65ee2038aae8acbc86ab109e3693"
+  dependencies:
+    generate-function "^2.0.0"
+    generate-object-property "^1.1.0"
+    jsonpointer "^4.0.0"
+    xtend "^4.0.0"
+
+is-number@^2.0.2, is-number@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/is-number/-/is-number-2.1.0.tgz#01fcbbb393463a548f2f466cce16dece49db908f"
+  dependencies:
+    kind-of "^3.0.2"
+
+is-posix-bracket@^0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/is-posix-bracket/-/is-posix-bracket-0.1.1.tgz#3334dc79774368e92f016e6fbc0a88f5cd6e6bc4"
+
+is-primitive@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/is-primitive/-/is-primitive-2.0.0.tgz#207bab91638499c07b2adf240a41a87210034575"
+
+is-property@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-property/-/is-property-1.0.2.tgz#57fe1c4e48474edd65b09911f26b1cd4095dda84"
+
+is-type@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/is-type/-/is-type-0.0.1.tgz#f651d85c365d44955d14a51d8d7061f3f6b4779c"
+  dependencies:
+    core-util-is "~1.0.0"
+
+is-typedarray@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a"
+
+is-utf8@^0.2.0:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/is-utf8/-/is-utf8-0.2.1.tgz#4b0da1442104d1b336340e80797e865cf39f7d72"
+
+isarray@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf"
+
+isarray@1.0.0, isarray@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11"
+
+isbinaryfile@^2.0.3:
+  version "2.0.4"
+  resolved "https://registry.yarnpkg.com/isbinaryfile/-/isbinaryfile-2.0.4.tgz#d23592e6a6f093efb84c2e6152056be294e414a1"
+
+isexe@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10"
+
+isobject@^2.0.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/isobject/-/isobject-2.1.0.tgz#f065561096a3f1da2ef46272f815c840d87e0c89"
+  dependencies:
+    isarray "1.0.0"
+
+isstream@~0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a"
+
+istextorbinary@2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/istextorbinary/-/istextorbinary-2.1.0.tgz#dbed2a6f51be2f7475b68f89465811141b758874"
+  dependencies:
+    binaryextensions "1 || 2"
+    editions "^1.1.1"
+    textextensions "1 || 2"
+
+ivy-codemirror@2.0.3:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/ivy-codemirror/-/ivy-codemirror-2.0.3.tgz#a5b26d343be2031dead036e2be794c46f1b157d9"
+  dependencies:
+    ember-cli-babel "^5.1.7"
+
+jju@^1.1.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/jju/-/jju-1.3.0.tgz#dadd9ef01924bc728b03f2f7979bdbd62f7a2aaa"
+
+jodid25519@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/jodid25519/-/jodid25519-1.0.2.tgz#06d4912255093419477d425633606e0e90782967"
+  dependencies:
+    jsbn "~0.1.0"
+
+js-tokens@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-1.0.1.tgz#cc435a5c8b94ad15acb7983140fc80182c89aeae"
+
+js-tokens@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.1.tgz#08e9f132484a2c45a30907e9dc4d5567b7f114d7"
+
+js-yaml@^3.2.5, js-yaml@^3.2.7:
+  version "3.8.3"
+  resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.8.3.tgz#33a05ec481c850c8875929166fe1beb61c728766"
+  dependencies:
+    argparse "^1.0.7"
+    esprima "^3.1.1"
+
+jsbn@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513"
+
+jsesc@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-1.3.0.tgz#46c3fec8c1892b12b0833db9bc7622176dbab34b"
+
+jsesc@~0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-0.5.0.tgz#e7dee66e35d6fc16f710fe91d5cf69f70f08911d"
+
+jshint@^2.7.0:
+  version "2.9.4"
+  resolved "https://registry.yarnpkg.com/jshint/-/jshint-2.9.4.tgz#5e3ba97848d5290273db514aee47fe24cf592934"
+  dependencies:
+    cli "~1.0.0"
+    console-browserify "1.1.x"
+    exit "0.1.x"
+    htmlparser2 "3.8.x"
+    lodash "3.7.x"
+    minimatch "~3.0.2"
+    shelljs "0.3.x"
+    strip-json-comments "1.0.x"
+
+json-parse-helpfulerror@^1.0.2:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/json-parse-helpfulerror/-/json-parse-helpfulerror-1.0.3.tgz#13f14ce02eed4e981297b64eb9e3b932e2dd13dc"
+  dependencies:
+    jju "^1.1.0"
+
+json-schema@0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.2.3.tgz#b480c892e59a2f05954ce727bd3f2a4e882f9e13"
+
+json-stable-stringify@^1.0.0, json-stable-stringify@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/json-stable-stringify/-/json-stable-stringify-1.0.1.tgz#9a759d39c5f2ff503fd5300646ed445f88c4f9af"
+  dependencies:
+    jsonify "~0.0.0"
+
+json-stringify-safe@~5.0.1:
+  version "5.0.1"
+  resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb"
+
+json3@3.3.2:
+  version "3.3.2"
+  resolved "https://registry.yarnpkg.com/json3/-/json3-3.3.2.tgz#3c0434743df93e2f5c42aee7b19bcb483575f4e1"
+
+json5@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/json5/-/json5-0.4.0.tgz#054352e4c4c80c86c0923877d449de176a732c8d"
+
+json5@^0.5.0:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/json5/-/json5-0.5.1.tgz#1eade7acc012034ad84e2396767ead9fa5495821"
+
+jsonfile@^2.0.0, jsonfile@^2.1.0:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-2.4.0.tgz#3736a2b428b87bbda0cc83b53fa3d633a35c2ae8"
+  optionalDependencies:
+    graceful-fs "^4.1.6"
+
+jsonify@~0.0.0:
+  version "0.0.0"
+  resolved "https://registry.yarnpkg.com/jsonify/-/jsonify-0.0.0.tgz#2c74b6ee41d93ca51b7b5aaee8f503631d252a73"
+
+jsonpointer@^4.0.0:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/jsonpointer/-/jsonpointer-4.0.1.tgz#4fd92cb34e0e9db3c89c8622ecf51f9b978c6cb9"
+
+jsprim@^1.2.2:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.0.tgz#a3b87e40298d8c380552d8cc7628a0bb95a22918"
+  dependencies:
+    assert-plus "1.0.0"
+    extsprintf "1.0.2"
+    json-schema "0.2.3"
+    verror "1.3.6"
+
+kind-of@^3.0.2:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.2.0.tgz#b58abe4d5c044ad33726a8c1525b48cf891bff07"
+  dependencies:
+    is-buffer "^1.1.5"
+
+klassy@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/klassy/-/klassy-0.1.3.tgz#c31d5756d583197d75f582b6e692872be497067f"
+
+klaw@^1.0.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/klaw/-/klaw-1.3.1.tgz#4088433b46b3b1ba259d78785d8e96f73ba02439"
+  optionalDependencies:
+    graceful-fs "^4.1.9"
+
+lazy-cache@^1.0.3:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/lazy-cache/-/lazy-cache-1.0.4.tgz#a1d78fc3a50474cb80845d3b3b6e1da49a446e8e"
+
+lcid@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/lcid/-/lcid-1.0.0.tgz#308accafa0bc483a3867b4b6f2b9506251d1b835"
+  dependencies:
+    invert-kv "^1.0.0"
+
+leek@0.0.21:
+  version "0.0.21"
+  resolved "https://registry.yarnpkg.com/leek/-/leek-0.0.21.tgz#09804bf70f8aefbba745f5d56d2a4debf22711ff"
+  dependencies:
+    debug "^2.1.0"
+    lodash.assign "^3.2.0"
+    request "^2.27.0"
+    rsvp "^3.0.21"
+
+less@^2.5.0:
+  version "2.7.2"
+  resolved "https://registry.yarnpkg.com/less/-/less-2.7.2.tgz#368d6cc73e1fb03981183280918743c5dcf9b3df"
+  optionalDependencies:
+    errno "^0.1.1"
+    graceful-fs "^4.1.2"
+    image-size "~0.5.0"
+    mime "^1.2.11"
+    mkdirp "^0.5.0"
+    promise "^7.1.1"
+    request "^2.72.0"
+    source-map "^0.5.3"
+
+leven@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/leven/-/leven-1.0.2.tgz#9144b6eebca5f1d0680169f1a6770dcea60b75c3"
+
+linkify-it@~1.2.0:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/linkify-it/-/linkify-it-1.2.4.tgz#0773526c317c8fd13bd534ee1d180ff88abf881a"
+  dependencies:
+    uc.micro "^1.0.1"
+
+livereload-js@^2.2.0:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/livereload-js/-/livereload-js-2.2.2.tgz#6c87257e648ab475bc24ea257457edcc1f8d0bc2"
+
+loader.js@^4.0.0:
+  version "4.3.0"
+  resolved "https://registry.yarnpkg.com/loader.js/-/loader.js-4.3.0.tgz#736c13eb8afdf75abd6c2d7b4f7fd40e1105a71f"
+
+lockfile@~1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/lockfile/-/lockfile-1.0.3.tgz#2638fc39a0331e9cac1a04b71799931c9c50df79"
+
+lodash-node@^2.4.1:
+  version "2.4.1"
+  resolved "https://registry.yarnpkg.com/lodash-node/-/lodash-node-2.4.1.tgz#ea82f7b100c733d1a42af76801e506105e2a80ec"
+
+lodash._arraycopy@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/lodash._arraycopy/-/lodash._arraycopy-3.0.0.tgz#76e7b7c1f1fb92547374878a562ed06a3e50f6e1"
+
+lodash._arrayeach@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/lodash._arrayeach/-/lodash._arrayeach-3.0.0.tgz#bab156b2a90d3f1bbd5c653403349e5e5933ef9e"
+
+lodash._baseassign@^3.0.0:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/lodash._baseassign/-/lodash._baseassign-3.2.0.tgz#8c38a099500f215ad09e59f1722fd0c52bfe0a4e"
+  dependencies:
+    lodash._basecopy "^3.0.0"
+    lodash.keys "^3.0.0"
+
+lodash._basecopy@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/lodash._basecopy/-/lodash._basecopy-3.0.1.tgz#8da0e6a876cf344c0ad8a54882111dd3c5c7ca36"
+
+lodash._baseflatten@^3.0.0:
+  version "3.1.4"
+  resolved "https://registry.yarnpkg.com/lodash._baseflatten/-/lodash._baseflatten-3.1.4.tgz#0770ff80131af6e34f3b511796a7ba5214e65ff7"
+  dependencies:
+    lodash.isarguments "^3.0.0"
+    lodash.isarray "^3.0.0"
+
+lodash._basefor@^3.0.0:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/lodash._basefor/-/lodash._basefor-3.0.3.tgz#7550b4e9218ef09fad24343b612021c79b4c20c2"
+
+lodash._bindcallback@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/lodash._bindcallback/-/lodash._bindcallback-3.0.1.tgz#e531c27644cf8b57a99e17ed95b35c748789392e"
+
+lodash._createassigner@^3.0.0:
+  version "3.1.1"
+  resolved "https://registry.yarnpkg.com/lodash._createassigner/-/lodash._createassigner-3.1.1.tgz#838a5bae2fdaca63ac22dee8e19fa4e6d6970b11"
+  dependencies:
+    lodash._bindcallback "^3.0.0"
+    lodash._isiterateecall "^3.0.0"
+    lodash.restparam "^3.0.0"
+
+lodash._getnative@^3.0.0:
+  version "3.9.1"
+  resolved "https://registry.yarnpkg.com/lodash._getnative/-/lodash._getnative-3.9.1.tgz#570bc7dede46d61cdcde687d65d3eecbaa3aaff5"
+
+lodash._isiterateecall@^3.0.0:
+  version "3.0.9"
+  resolved "https://registry.yarnpkg.com/lodash._isiterateecall/-/lodash._isiterateecall-3.0.9.tgz#5203ad7ba425fae842460e696db9cf3e6aac057c"
+
+lodash.assign@^3.0.0, lodash.assign@^3.2.0:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/lodash.assign/-/lodash.assign-3.2.0.tgz#3ce9f0234b4b2223e296b8fa0ac1fee8ebca64fa"
+  dependencies:
+    lodash._baseassign "^3.0.0"
+    lodash._createassigner "^3.0.0"
+    lodash.keys "^3.0.0"
+
+lodash.assignin@^4.1.0:
+  version "4.2.0"
+  resolved "https://registry.yarnpkg.com/lodash.assignin/-/lodash.assignin-4.2.0.tgz#ba8df5fb841eb0a3e8044232b0e263a8dc6a28a2"
+
+lodash.clonedeep@^4.4.1:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz#e23f3f9c4f8fbdde872529c1071857a086e5ccef"
+
+lodash.debounce@^3.1.1:
+  version "3.1.1"
+  resolved "https://registry.yarnpkg.com/lodash.debounce/-/lodash.debounce-3.1.1.tgz#812211c378a94cc29d5aa4e3346cf0bfce3a7df5"
+  dependencies:
+    lodash._getnative "^3.0.0"
+
+lodash.defaults@^3.1.2:
+  version "3.1.2"
+  resolved "https://registry.yarnpkg.com/lodash.defaults/-/lodash.defaults-3.1.2.tgz#c7308b18dbf8bc9372d701a73493c61192bd2e2c"
+  dependencies:
+    lodash.assign "^3.0.0"
+    lodash.restparam "^3.0.0"
+
+lodash.find@^4.5.1:
+  version "4.6.0"
+  resolved "https://registry.yarnpkg.com/lodash.find/-/lodash.find-4.6.0.tgz#cb0704d47ab71789ffa0de8b97dd926fb88b13b1"
+
+lodash.flatten@^3.0.2:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/lodash.flatten/-/lodash.flatten-3.0.2.tgz#de1cf57758f8f4479319d35c3e9cc60c4501938c"
+  dependencies:
+    lodash._baseflatten "^3.0.0"
+    lodash._isiterateecall "^3.0.0"
+
+lodash.isarguments@^3.0.0:
+  version "3.1.0"
+  resolved "https://registry.yarnpkg.com/lodash.isarguments/-/lodash.isarguments-3.1.0.tgz#2f573d85c6a24289ff00663b491c1d338ff3458a"
+
+lodash.isarray@^3.0.0:
+  version "3.0.4"
+  resolved "https://registry.yarnpkg.com/lodash.isarray/-/lodash.isarray-3.0.4.tgz#79e4eb88c36a8122af86f844aa9bcd851b5fbb55"
+
+lodash.isplainobject@^3.0.0:
+  version "3.2.0"
+  resolved "https://registry.yarnpkg.com/lodash.isplainobject/-/lodash.isplainobject-3.2.0.tgz#9a8238ae16b200432960cd7346512d0123fbf4c5"
+  dependencies:
+    lodash._basefor "^3.0.0"
+    lodash.isarguments "^3.0.0"
+    lodash.keysin "^3.0.0"
+
+lodash.istypedarray@^3.0.0:
+  version "3.0.6"
+  resolved "https://registry.yarnpkg.com/lodash.istypedarray/-/lodash.istypedarray-3.0.6.tgz#c9a477498607501d8e8494d283b87c39281cef62"
+
+lodash.keys@^3.0.0:
+  version "3.1.2"
+  resolved "https://registry.yarnpkg.com/lodash.keys/-/lodash.keys-3.1.2.tgz#4dbc0472b156be50a0b286855d1bd0b0c656098a"
+  dependencies:
+    lodash._getnative "^3.0.0"
+    lodash.isarguments "^3.0.0"
+    lodash.isarray "^3.0.0"
+
+lodash.keysin@^3.0.0:
+  version "3.0.8"
+  resolved "https://registry.yarnpkg.com/lodash.keysin/-/lodash.keysin-3.0.8.tgz#22c4493ebbedb1427962a54b445b2c8a767fb47f"
+  dependencies:
+    lodash.isarguments "^3.0.0"
+    lodash.isarray "^3.0.0"
+
+lodash.merge@^3.0.2, lodash.merge@^3.3.2:
+  version "3.3.2"
+  resolved "https://registry.yarnpkg.com/lodash.merge/-/lodash.merge-3.3.2.tgz#0d90d93ed637b1878437bb3e21601260d7afe994"
+  dependencies:
+    lodash._arraycopy "^3.0.0"
+    lodash._arrayeach "^3.0.0"
+    lodash._createassigner "^3.0.0"
+    lodash._getnative "^3.0.0"
+    lodash.isarguments "^3.0.0"
+    lodash.isarray "^3.0.0"
+    lodash.isplainobject "^3.0.0"
+    lodash.istypedarray "^3.0.0"
+    lodash.keys "^3.0.0"
+    lodash.keysin "^3.0.0"
+    lodash.toplainobject "^3.0.0"
+
+lodash.merge@^4.3.0, lodash.merge@^4.5.1:
+  version "4.6.0"
+  resolved "https://registry.yarnpkg.com/lodash.merge/-/lodash.merge-4.6.0.tgz#69884ba144ac33fe699737a6086deffadd0f89c5"
+
+lodash.omit@^4.1.0:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/lodash.omit/-/lodash.omit-4.5.0.tgz#6eb19ae5a1ee1dd9df0b969e66ce0b7fa30b5e60"
+
+lodash.pad@^4.1.0:
+  version "4.5.1"
+  resolved "https://registry.yarnpkg.com/lodash.pad/-/lodash.pad-4.5.1.tgz#4330949a833a7c8da22cc20f6a26c4d59debba70"
+
+lodash.padend@^4.1.0:
+  version "4.6.1"
+  resolved "https://registry.yarnpkg.com/lodash.padend/-/lodash.padend-4.6.1.tgz#53ccba047d06e158d311f45da625f4e49e6f166e"
+
+lodash.padstart@^4.1.0:
+  version "4.6.1"
+  resolved "https://registry.yarnpkg.com/lodash.padstart/-/lodash.padstart-4.6.1.tgz#d2e3eebff0d9d39ad50f5cbd1b52a7bce6bb611b"
+
+lodash.restparam@^3.0.0:
+  version "3.6.1"
+  resolved "https://registry.yarnpkg.com/lodash.restparam/-/lodash.restparam-3.6.1.tgz#936a4e309ef330a7645ed4145986c85ae5b20805"
+
+lodash.toplainobject@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/lodash.toplainobject/-/lodash.toplainobject-3.0.0.tgz#28790ad942d293d78aa663a07ecf7f52ca04198d"
+  dependencies:
+    lodash._basecopy "^3.0.0"
+    lodash.keysin "^3.0.0"
+
+lodash.uniq@^4.2.0:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773"
+
+lodash.uniqby@^4.7.0:
+  version "4.7.0"
+  resolved "https://registry.yarnpkg.com/lodash.uniqby/-/lodash.uniqby-4.7.0.tgz#d99c07a669e9e6d24e1362dfe266c67616af1302"
+
+lodash@3.7.x:
+  version "3.7.0"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-3.7.0.tgz#3678bd8ab995057c07ade836ed2ef087da811d45"
+
+lodash@^3.10.0, lodash@^3.9.3:
+  version "3.10.1"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-3.10.1.tgz#5bf45e8e49ba4189e17d482789dfd15bd140b7b6"
+
+lodash@^4.0.0, lodash@^4.14.0, lodash@^4.2.0:
+  version "4.17.4"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.4.tgz#78203a4d1c328ae1d86dca6460e369b57f4055ae"
+
+lodash@~2.4.1:
+  version "2.4.2"
+  resolved "https://registry.yarnpkg.com/lodash/-/lodash-2.4.2.tgz#fadd834b9683073da179b3eae6d9c0d15053f73e"
+
+longest@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/longest/-/longest-1.0.1.tgz#30a0b2da38f73770e8294a0d22e6625ed77d0097"
+
+loose-envify@^1.0.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.3.1.tgz#d1a8ad33fa9ce0e713d65fdd0ac8b748d478c848"
+  dependencies:
+    js-tokens "^3.0.0"
+
+lru-cache@2, lru-cache@~2.7.0:
+  version "2.7.3"
+  resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-2.7.3.tgz#6d4524e8b955f95d4f5b58851ce21dd72fb4e952"
+
+lru-cache@^4.0.1:
+  version "4.0.2"
+  resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-4.0.2.tgz#1d17679c069cda5d040991a09dbc2c0db377e55e"
+  dependencies:
+    pseudomap "^1.0.1"
+    yallist "^2.0.0"
+
+lru-queue@0.1:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/lru-queue/-/lru-queue-0.1.0.tgz#2738bd9f0d3cf4f84490c5736c48699ac632cda3"
+  dependencies:
+    es5-ext "~0.10.2"
+
+make-array@^0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/make-array/-/make-array-0.1.2.tgz#335e36ebb0c5a43154d21213a1ecaeae2a1bb3ef"
+
+makeerror@1.0.x:
+  version "1.0.11"
+  resolved "https://registry.yarnpkg.com/makeerror/-/makeerror-1.0.11.tgz#e01a5c9109f2af79660e4e8b9587790184f5a96c"
+  dependencies:
+    tmpl "1.0.x"
+
+markdown-it-terminal@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/markdown-it-terminal/-/markdown-it-terminal-0.0.3.tgz#c77a8533c2170b46d2a907a3c3452d4d7f4aa5db"
+  dependencies:
+    ansi-styles "^2.1.0"
+    cardinal "^0.5.0"
+    cli-table "^0.3.1"
+    lodash.merge "^3.3.2"
+    markdown-it "^4.4.0"
+
+markdown-it@4.3.0:
+  version "4.3.0"
+  resolved "https://registry.yarnpkg.com/markdown-it/-/markdown-it-4.3.0.tgz#0ee2b0724079d186b3f04b7345ce395ae47cc474"
+  dependencies:
+    argparse "~1.0.2"
+    entities "~1.1.1"
+    linkify-it "~1.2.0"
+    mdurl "~1.0.0"
+    uc.micro "^1.0.0"
+
+markdown-it@^4.4.0:
+  version "4.4.0"
+  resolved "https://registry.yarnpkg.com/markdown-it/-/markdown-it-4.4.0.tgz#3df373dbea587a9a7fef3e56311b68908f75c414"
+  dependencies:
+    argparse "~1.0.2"
+    entities "~1.1.1"
+    linkify-it "~1.2.0"
+    mdurl "~1.0.0"
+    uc.micro "^1.0.0"
+
+matcher-collection@^1.0.0, matcher-collection@^1.0.1:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/matcher-collection/-/matcher-collection-1.0.4.tgz#2f66ae0869996f29e43d0b62c83dd1d43e581755"
+  dependencies:
+    minimatch "^3.0.2"
+
+md5-hex@^1.0.2:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/md5-hex/-/md5-hex-1.3.0.tgz#d2c4afe983c4370662179b8cad145219135046c4"
+  dependencies:
+    md5-o-matic "^0.1.1"
+
+md5-o-matic@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/md5-o-matic/-/md5-o-matic-0.1.1.tgz#822bccd65e117c514fab176b25945d54100a03c3"
+
+mdurl@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/mdurl/-/mdurl-1.0.1.tgz#fe85b2ec75a59037f2adfec100fd6c601761152e"
+
+media-typer@0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748"
+
+memoizee@~0.3.8:
+  version "0.3.10"
+  resolved "https://registry.yarnpkg.com/memoizee/-/memoizee-0.3.10.tgz#4eca0d8aed39ec9d017f4c5c2f2f6432f42e5c8f"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.11"
+    es6-weak-map "~0.1.4"
+    event-emitter "~0.3.4"
+    lru-queue "0.1"
+    next-tick "~0.2.2"
+    timers-ext "0.1"
+
+memory-streams@^0.1.0:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/memory-streams/-/memory-streams-0.1.2.tgz#273ff777ab60fec599b116355255282cca2c50c2"
+  dependencies:
+    readable-stream "~1.0.2"
+
+merge-defaults@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/merge-defaults/-/merge-defaults-0.2.1.tgz#dd42248eb96bb6a51521724321c72ff9583dde80"
+  dependencies:
+    lodash "~2.4.1"
+
+merge-descriptors@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61"
+
+merge@^1.1.3, merge@^1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/merge/-/merge-1.2.0.tgz#7531e39d4949c281a66b8c5a6e0265e8b05894da"
+
+methods@~1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee"
+
+micromatch@^2.1.5:
+  version "2.3.11"
+  resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-2.3.11.tgz#86677c97d1720b363431d04d0d15293bd38c1565"
+  dependencies:
+    arr-diff "^2.0.0"
+    array-unique "^0.2.1"
+    braces "^1.8.2"
+    expand-brackets "^0.1.4"
+    extglob "^0.3.1"
+    filename-regex "^2.0.0"
+    is-extglob "^1.0.0"
+    is-glob "^2.0.1"
+    kind-of "^3.0.2"
+    normalize-path "^2.0.1"
+    object.omit "^2.0.0"
+    parse-glob "^3.0.4"
+    regex-cache "^0.4.2"
+
+"mime-db@>= 1.27.0 < 2", mime-db@~1.27.0:
+  version "1.27.0"
+  resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.27.0.tgz#820f572296bbd20ec25ed55e5b5de869e5436eb1"
+
+mime-types@^2.1.11, mime-types@^2.1.12, mime-types@~2.1.11, mime-types@~2.1.15, mime-types@~2.1.7:
+  version "2.1.15"
+  resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.15.tgz#a4ebf5064094569237b8cf70046776d09fc92aed"
+  dependencies:
+    mime-db "~1.27.0"
+
+mime@1.3.4, mime@^1.2.11:
+  version "1.3.4"
+  resolved "https://registry.yarnpkg.com/mime/-/mime-1.3.4.tgz#115f9e3b6b3daf2959983cb38f149a2d40eb5d53"
+
+minimatch@1:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-1.0.0.tgz#e0dd2120b49e1b724ce8d714c520822a9438576d"
+  dependencies:
+    lru-cache "2"
+    sigmund "~1.0.0"
+
+"minimatch@2 || 3", minimatch@^3.0.0, minimatch@^3.0.2, minimatch@~3.0.0, minimatch@~3.0.2:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.3.tgz#2a4e4090b96b2db06a9d7df01055a62a77c9b774"
+  dependencies:
+    brace-expansion "^1.0.0"
+
+minimatch@^2.0.1, minimatch@^2.0.3:
+  version "2.0.10"
+  resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-2.0.10.tgz#8d087c39c6b38c001b97fca7ce6d0e1e80afbac7"
+  dependencies:
+    brace-expansion "^1.0.0"
+
+minimist@0.0.8, minimist@~0.0.1:
+  version "0.0.8"
+  resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d"
+
+minimist@^1.1.0, minimist@^1.1.1:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.0.tgz#a35008b20f41383eec1fb914f4cd5df79a264284"
+
+mkdirp@0.5.x, "mkdirp@>=0.5 0", mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@~0.5.0, mkdirp@~0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903"
+  dependencies:
+    minimist "0.0.8"
+
+mkdirp@~0.4.0:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.4.2.tgz#427c8c18ece398b932f6f666f4e1e5b7740e78c8"
+  dependencies:
+    minimist "0.0.8"
+
+mktemp@~0.3.4:
+  version "0.3.5"
+  resolved "https://registry.yarnpkg.com/mktemp/-/mktemp-0.3.5.tgz#a1504c706d0d2b198c6a0eb645f7fdaf8181f7de"
+
+moment-timezone@^0.3.0:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/moment-timezone/-/moment-timezone-0.3.1.tgz#3ef47856b02d53b718a10a5ec2023aa299e07bf5"
+  dependencies:
+    moment ">= 2.6.0"
+
+moment-timezone@^0.5.0:
+  version "0.5.13"
+  resolved "https://registry.yarnpkg.com/moment-timezone/-/moment-timezone-0.5.13.tgz#99ce5c7d827262eb0f1f702044177f60745d7b90"
+  dependencies:
+    moment ">= 2.9.0"
+
+"moment@>= 2.6.0", "moment@>= 2.9.0", moment@^2.13.0:
+  version "2.18.1"
+  resolved "https://registry.yarnpkg.com/moment/-/moment-2.18.1.tgz#c36193dd3ce1c2eed2adb7c802dbbc77a81b1c0f"
+
+morgan@^1.5.2:
+  version "1.8.1"
+  resolved "https://registry.yarnpkg.com/morgan/-/morgan-1.8.1.tgz#f93023d3887bd27b78dfd6023cea7892ee27a4b1"
+  dependencies:
+    basic-auth "~1.1.0"
+    debug "2.6.1"
+    depd "~1.1.0"
+    on-finished "~2.3.0"
+    on-headers "~1.0.1"
+
+mout@~0.9.0:
+  version "0.9.1"
+  resolved "https://registry.yarnpkg.com/mout/-/mout-0.9.1.tgz#84f0f3fd6acc7317f63de2affdcc0cee009b0477"
+
+ms@0.7.1:
+  version "0.7.1"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.1.tgz#9cd13c03adbff25b65effde7ce864ee952017098"
+
+ms@0.7.2:
+  version "0.7.2"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.2.tgz#ae25cf2512b3885a1d95d7f037868d8431124765"
+
+ms@0.7.3:
+  version "0.7.3"
+  resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.3.tgz#708155a5e44e33f5fd0fc53e81d0d40a91be1fff"
+
+mustache@^2.2.1:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/mustache/-/mustache-2.3.0.tgz#4028f7778b17708a489930a6e52ac3bca0da41d0"
+
+mute-stream@0.0.4, mute-stream@~0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.4.tgz#a9219960a6d5d5d046597aee51252c6655f7177e"
+
+natives@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/natives/-/natives-1.1.0.tgz#e9ff841418a6b2ec7a495e939984f78f163e6e31"
+
+negotiator@0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.1.tgz#2b327184e8992101177b28563fb5e7102acd0ca9"
+
+next-tick@1:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/next-tick/-/next-tick-1.0.0.tgz#ca86d1fe8828169b0120208e3dc8424b9db8342c"
+
+next-tick@~0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/next-tick/-/next-tick-0.2.2.tgz#75da4a927ee5887e39065880065b7336413b310d"
+
+node-gyp@~3.0.3:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/node-gyp/-/node-gyp-3.0.3.tgz#9b004219f4fa9efbfd78c5fc674aa12e58fb8694"
+  dependencies:
+    fstream "^1.0.0"
+    glob "3 || 4"
+    graceful-fs "^4.1.2"
+    minimatch "1"
+    mkdirp "^0.5.0"
+    nopt "2 || 3"
+    npmlog "0 || 1"
+    osenv "0"
+    path-array "^1.0.0"
+    request "2"
+    rimraf "2"
+    semver "2.x || 3.x || 4 || 5"
+    tar "^1.0.0"
+    which "1"
+
+node-int64@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/node-int64/-/node-int64-0.4.0.tgz#87a9065cdb355d3182d8f94ce11188b825c68a3b"
+
+node-modules-path@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/node-modules-path/-/node-modules-path-1.0.1.tgz#40096b08ce7ad0ea14680863af449c7c75a5d1c8"
+
+node-notifier@^5.0.1:
+  version "5.1.2"
+  resolved "https://registry.yarnpkg.com/node-notifier/-/node-notifier-5.1.2.tgz#2fa9e12605fa10009d44549d6fcd8a63dde0e4ff"
+  dependencies:
+    growly "^1.3.0"
+    semver "^5.3.0"
+    shellwords "^0.1.0"
+    which "^1.2.12"
+
+node-uuid@^1.4.3, node-uuid@~1.4.3:
+  version "1.4.8"
+  resolved "https://registry.yarnpkg.com/node-uuid/-/node-uuid-1.4.8.tgz#b040eb0923968afabf8d32fb1f17f1167fdab907"
+
+"nopt@2 || 3", nopt@^3.0.1, nopt@^3.0.3, nopt@~3.0.4:
+  version "3.0.6"
+  resolved "https://registry.yarnpkg.com/nopt/-/nopt-3.0.6.tgz#c6465dbf08abcd4db359317f79ac68a646b28ff9"
+  dependencies:
+    abbrev "1"
+
+normalize-git-url@~3.0.1:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/normalize-git-url/-/normalize-git-url-3.0.2.tgz#8e5f14be0bdaedb73e07200310aa416c27350fc4"
+
+normalize-package-data@^2.0.0, "normalize-package-data@~1.0.1 || ^2.0.0", normalize-package-data@~2.3.5:
+  version "2.3.8"
+  resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-2.3.8.tgz#d819eda2a9dedbd1ffa563ea4071d936782295bb"
+  dependencies:
+    hosted-git-info "^2.1.4"
+    is-builtin-module "^1.0.0"
+    semver "2 || 3 || 4 || 5"
+    validate-npm-package-license "^3.0.1"
+
+normalize-path@^2.0.1:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-2.1.1.tgz#1ab28b556e198363a8c1a6f7e6fa20137fe6aed9"
+  dependencies:
+    remove-trailing-separator "^1.0.1"
+
+npm-cache-filename@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/npm-cache-filename/-/npm-cache-filename-1.0.2.tgz#ded306c5b0bfc870a9e9faf823bc5f283e05ae11"
+
+npm-git-info@^1.0.0:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/npm-git-info/-/npm-git-info-1.0.3.tgz#a933c42ec321e80d3646e0d6e844afe94630e1d5"
+
+npm-install-checks@~1.0.6:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/npm-install-checks/-/npm-install-checks-1.0.7.tgz#6d91aeda0ac96801f1ed7aadee116a6c0a086a57"
+  dependencies:
+    npmlog "0.1 || 1 || 2"
+    semver "^2.3.0 || 3.x || 4 || 5"
+
+"npm-package-arg@^3.0.0 || ^4.0.0", "npm-package-arg@^4.0.0 || ^5.0.0", npm-package-arg@~4.0.2:
+  version "4.0.2"
+  resolved "https://registry.yarnpkg.com/npm-package-arg/-/npm-package-arg-4.0.2.tgz#3f28235f9f6428e54bfeca73629e27d6c81a7e82"
+  dependencies:
+    hosted-git-info "^2.1.4"
+    semver "4 || 5"
+
+npm-package-arg@^4.1.1:
+  version "4.2.1"
+  resolved "https://registry.yarnpkg.com/npm-package-arg/-/npm-package-arg-4.2.1.tgz#593303fdea85f7c422775f17f9eb7670f680e3ec"
+  dependencies:
+    hosted-git-info "^2.1.5"
+    semver "^5.1.0"
+
+npm-registry-client@~7.0.7:
+  version "7.0.9"
+  resolved "https://registry.yarnpkg.com/npm-registry-client/-/npm-registry-client-7.0.9.tgz#1baf86ee5285c4e6d38d4556208ded56049231bb"
+  dependencies:
+    chownr "^1.0.1"
+    concat-stream "^1.4.6"
+    graceful-fs "^4.1.2"
+    mkdirp "^0.5.0"
+    normalize-package-data "~1.0.1 || ^2.0.0"
+    npm-package-arg "^3.0.0 || ^4.0.0"
+    once "^1.3.0"
+    request "^2.47.0"
+    retry "^0.8.0"
+    rimraf "2"
+    semver "2 >=2.2.1 || 3.x || 4 || 5"
+    slide "^1.1.3"
+  optionalDependencies:
+    npmlog "~2.0.0"
+
+npm-user-validate@~0.1.2:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/npm-user-validate/-/npm-user-validate-0.1.5.tgz#52465d50c2d20294a57125b996baedbf56c5004b"
+
+npm@2.14.10:
+  version "2.14.10"
+  resolved "https://registry.yarnpkg.com/npm/-/npm-2.14.10.tgz#96597ee1e5efeebdcf5f91b57763fe9ae17b9903"
+  dependencies:
+    abbrev "~1.0.7"
+    ansi "~0.3.0"
+    ansicolors "~0.3.2"
+    ansistyles "~0.1.3"
+    archy "~1.0.0"
+    async-some "~1.0.2"
+    block-stream "0.0.8"
+    char-spinner "~1.0.1"
+    chmodr "~1.0.2"
+    chownr "~1.0.1"
+    cmd-shim "~2.0.1"
+    columnify "~1.5.2"
+    config-chain "~1.1.9"
+    dezalgo "~1.0.3"
+    editor "~1.0.0"
+    fs-vacuum "~1.2.7"
+    fs-write-stream-atomic "~1.0.4"
+    fstream "~1.0.8"
+    fstream-npm "~1.0.7"
+    github-url-from-git "~1.4.0"
+    github-url-from-username-repo "~1.0.2"
+    glob "~5.0.15"
+    graceful-fs "~4.1.2"
+    hosted-git-info "~2.1.4"
+    inflight "~1.0.4"
+    inherits "~2.0.1"
+    ini "~1.3.4"
+    init-package-json "~1.9.1"
+    lockfile "~1.0.1"
+    lru-cache "~2.7.0"
+    minimatch "~3.0.0"
+    mkdirp "~0.5.1"
+    node-gyp "~3.0.3"
+    nopt "~3.0.4"
+    normalize-git-url "~3.0.1"
+    normalize-package-data "~2.3.5"
+    npm-cache-filename "~1.0.2"
+    npm-install-checks "~1.0.6"
+    npm-package-arg "~4.0.2"
+    npm-registry-client "~7.0.7"
+    npm-user-validate "~0.1.2"
+    npmlog "~2.0.0"
+    once "~1.3.2"
+    opener "~1.4.1"
+    osenv "~0.1.3"
+    path-is-inside "~1.0.0"
+    read "~1.0.7"
+    read-installed "~4.0.3"
+    read-package-json "~2.0.2"
+    readable-stream "~1.1.13"
+    realize-package-specifier "~3.0.1"
+    request "~2.65.0"
+    retry "~0.8.0"
+    rimraf "~2.4.3"
+    semver "~5.0.3"
+    sha "~2.0.1"
+    slide "~1.1.6"
+    sorted-object "~1.0.0"
+    spdx "~0.4.1"
+    tar "~2.2.1"
+    text-table "~0.2.0"
+    uid-number "0.0.6"
+    umask "~1.1.0"
+    validate-npm-package-license "~3.0.1"
+    validate-npm-package-name "~2.2.2"
+    which "~1.2.0"
+    wrappy "~1.0.1"
+    write-file-atomic "~1.1.3"
+
+"npmlog@0 || 1":
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-1.2.1.tgz#28e7be619609b53f7ad1dd300a10d64d716268b6"
+  dependencies:
+    ansi "~0.3.0"
+    are-we-there-yet "~1.0.0"
+    gauge "~1.2.0"
+
+"npmlog@0.1 || 1 || 2", npmlog@~2.0.0:
+  version "2.0.4"
+  resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-2.0.4.tgz#98b52530f2514ca90d09ec5b22c8846722375692"
+  dependencies:
+    ansi "~0.3.1"
+    are-we-there-yet "~1.1.2"
+    gauge "~1.2.5"
+
+npmlog@^4.0.0:
+  version "4.0.2"
+  resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-4.0.2.tgz#d03950e0e78ce1527ba26d2a7592e9348ac3e75f"
+  dependencies:
+    are-we-there-yet "~1.1.2"
+    console-control-strings "~1.1.0"
+    gauge "~2.7.1"
+    set-blocking "~2.0.0"
+
+number-is-nan@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/number-is-nan/-/number-is-nan-1.0.1.tgz#097b602b53422a522c1afb8790318336941a011d"
+
+oauth-sign@~0.8.0, oauth-sign@~0.8.1:
+  version "0.8.2"
+  resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.8.2.tgz#46a6ab7f0aead8deae9ec0565780b7d4efeb9d43"
+
+object-assign@4.1.0:
+  version "4.1.0"
+  resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.0.tgz#7a3b3d0e98063d43f4c03f2e8ae6cd51a86883a0"
+
+object-assign@^4.0.1, object-assign@^4.1.0:
+  version "4.1.1"
+  resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863"
+
+object-component@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/object-component/-/object-component-0.0.3.tgz#f0c69aa50efc95b866c186f400a33769cb2f1291"
+
+object.omit@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/object.omit/-/object.omit-2.0.1.tgz#1a9c744829f39dbb858c76ca3579ae2a54ebd1fa"
+  dependencies:
+    for-own "^0.1.4"
+    is-extendable "^0.1.1"
+
+on-finished@~2.3.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.3.0.tgz#20f1336481b083cd75337992a16971aa2d906947"
+  dependencies:
+    ee-first "1.1.1"
+
+on-headers@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/on-headers/-/on-headers-1.0.1.tgz#928f5d0f470d49342651ea6794b0857c100693f7"
+
+once@^1.3.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1"
+  dependencies:
+    wrappy "1"
+
+once@~1.3.2:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/once/-/once-1.3.3.tgz#b2e261557ce4c314ec8304f3fa82663e4297ca20"
+  dependencies:
+    wrappy "1"
+
+opener@~1.4.1:
+  version "1.4.3"
+  resolved "https://registry.yarnpkg.com/opener/-/opener-1.4.3.tgz#5c6da2c5d7e5831e8ffa3964950f8d6674ac90b8"
+
+optimist@^0.6.1, optimist@~0.6.0:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.6.1.tgz#da3ea74686fa21a19a111c326e90eb15a0196686"
+  dependencies:
+    minimist "~0.0.1"
+    wordwrap "~0.0.2"
+
+options@>=0.0.5:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/options/-/options-0.0.6.tgz#ec22d312806bb53e731773e7cdaefcf1c643128f"
+
+os-homedir@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3"
+
+os-locale@^1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/os-locale/-/os-locale-1.4.0.tgz#20f9f17ae29ed345e8bde583b13d2009803c14d9"
+  dependencies:
+    lcid "^1.0.0"
+
+os-tmpdir@^1.0.0, os-tmpdir@^1.0.1, os-tmpdir@~1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274"
+
+osenv@0, osenv@^0.1.0, osenv@~0.1.3:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.1.4.tgz#42fe6d5953df06c8064be6f176c3d05aaaa34644"
+  dependencies:
+    os-homedir "^1.0.0"
+    os-tmpdir "^1.0.0"
+
+osenv@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.0.3.tgz#cd6ad8ddb290915ad9e22765576025d411f29cb6"
+
+output-file-sync@^1.1.0:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/output-file-sync/-/output-file-sync-1.1.2.tgz#d0a33eefe61a205facb90092e826598d5245ce76"
+  dependencies:
+    graceful-fs "^4.1.4"
+    mkdirp "^0.5.1"
+    object-assign "^4.1.0"
+
+parse-glob@^3.0.4:
+  version "3.0.4"
+  resolved "https://registry.yarnpkg.com/parse-glob/-/parse-glob-3.0.4.tgz#b2c376cfb11f35513badd173ef0bb6e3a388391c"
+  dependencies:
+    glob-base "^0.3.0"
+    is-dotfile "^1.0.0"
+    is-extglob "^1.0.0"
+    is-glob "^2.0.0"
+
+parsejson@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/parsejson/-/parsejson-0.0.3.tgz#ab7e3759f209ece99437973f7d0f1f64ae0e64ab"
+  dependencies:
+    better-assert "~1.0.0"
+
+parseqs@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/parseqs/-/parseqs-0.0.5.tgz#d5208a3738e46766e291ba2ea173684921a8b89d"
+  dependencies:
+    better-assert "~1.0.0"
+
+parseuri@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/parseuri/-/parseuri-0.0.5.tgz#80204a50d4dbb779bfdc6ebe2778d90e4bce320a"
+  dependencies:
+    better-assert "~1.0.0"
+
+parseurl@~1.3.0, parseurl@~1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.1.tgz#c8ab8c9223ba34888aa64a297b28853bec18da56"
+
+path-array@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/path-array/-/path-array-1.0.1.tgz#7e2f0f35f07a2015122b868b7eac0eb2c4fec271"
+  dependencies:
+    array-index "^1.0.0"
+
+path-exists@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-1.0.0.tgz#d5a8998eb71ef37a74c34eb0d9eba6e878eea081"
+
+path-is-absolute@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f"
+
+path-is-inside@^1.0.1, path-is-inside@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/path-is-inside/-/path-is-inside-1.0.2.tgz#365417dede44430d1c11af61027facf074bdfc53"
+
+path-parse@^1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.5.tgz#3c1adf871ea9cd6c9431b6ea2bd74a0ff055c4c1"
+
+path-posix@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/path-posix/-/path-posix-1.0.0.tgz#06b26113f56beab042545a23bfa88003ccac260f"
+
+path-to-regexp@0.1.7:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c"
+
+performance-now@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-0.2.0.tgz#33ef30c5c77d4ea21c5a53869d91b56d8f2555e5"
+
+pinkie-promise@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa"
+  dependencies:
+    pinkie "^2.0.0"
+
+pinkie@^2.0.0:
+  version "2.0.4"
+  resolved "https://registry.yarnpkg.com/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870"
+
+pleasant-progress@^1.0.2:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/pleasant-progress/-/pleasant-progress-1.1.0.tgz#c99cd730a2e50cffdd3badff845fc4d5282e266b"
+
+portfinder@^0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/portfinder/-/portfinder-0.4.0.tgz#a3ffadffafe4fb98e0601a85eda27c27ce84ca1e"
+  dependencies:
+    async "0.9.0"
+    mkdirp "0.5.x"
+
+preserve@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/preserve/-/preserve-0.2.0.tgz#815ed1f6ebc65926f865b310c0713bcb3315ce4b"
+
+printf@^0.2.3:
+  version "0.2.5"
+  resolved "https://registry.yarnpkg.com/printf/-/printf-0.2.5.tgz#c438ca2ca33e3927671db4ab69c0e52f936a4f0f"
+
+private@^0.1.6, private@~0.1.5:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/private/-/private-0.1.7.tgz#68ce5e8a1ef0a23bb570cc28537b5332aba63ef1"
+
+process-nextick-args@~1.0.6:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-1.0.7.tgz#150e20b756590ad3f91093f25a4f2ad8bff30ba3"
+
+process-relative-require@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/process-relative-require/-/process-relative-require-1.0.0.tgz#1590dfcf5b8f2983ba53e398446b68240b4cc68a"
+  dependencies:
+    node-modules-path "^1.0.0"
+
+promise-map-series@^0.2.1:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/promise-map-series/-/promise-map-series-0.2.3.tgz#c2d377afc93253f6bd03dbb77755eb88ab20a847"
+  dependencies:
+    rsvp "^3.0.14"
+
+promise@^7.1.1:
+  version "7.1.1"
+  resolved "https://registry.yarnpkg.com/promise/-/promise-7.1.1.tgz#489654c692616b8aa55b0724fa809bb7db49c5bf"
+  dependencies:
+    asap "~2.0.3"
+
+promzard@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/promzard/-/promzard-0.3.0.tgz#26a5d6ee8c7dee4cb12208305acfb93ba382a9ee"
+  dependencies:
+    read "1"
+
+proto-list@~1.2.1:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/proto-list/-/proto-list-1.2.4.tgz#212d5bfe1318306a420f6402b8e26ff39647a849"
+
+proxy-addr@~1.1.3:
+  version "1.1.4"
+  resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-1.1.4.tgz#27e545f6960a44a627d9b44467e35c1b6b4ce2f3"
+  dependencies:
+    forwarded "~0.1.0"
+    ipaddr.js "1.3.0"
+
+prr@~0.0.0:
+  version "0.0.0"
+  resolved "https://registry.yarnpkg.com/prr/-/prr-0.0.0.tgz#1a84b85908325501411853d0081ee3fa86e2926a"
+
+pseudomap@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/pseudomap/-/pseudomap-1.0.2.tgz#f052a28da70e618917ef0a8ac34c1ae5a68286b3"
+
+punycode@^1.4.1:
+  version "1.4.1"
+  resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e"
+
+q@^1.1.2:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/q/-/q-1.5.0.tgz#dd01bac9d06d30e6f219aecb8253ee9ebdc308f1"
+
+qs@5.2.0:
+  version "5.2.0"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-5.2.0.tgz#a9f31142af468cb72b25b30136ba2456834916be"
+
+qs@6.4.0, qs@~6.4.0:
+  version "6.4.0"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-6.4.0.tgz#13e26d28ad6b0ffaa91312cd3bf708ed351e7233"
+
+qs@~5.1.0:
+  version "5.1.0"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-5.1.0.tgz#4d932e5c7ea411cca76a312d39a606200fd50cd9"
+
+qs@~5.2.0:
+  version "5.2.1"
+  resolved "https://registry.yarnpkg.com/qs/-/qs-5.2.1.tgz#801fee030e0b9450d6385adc48a4cc55b44aedfc"
+
+quick-temp@0.1.5, quick-temp@^0.1.0, quick-temp@^0.1.2, quick-temp@^0.1.3, quick-temp@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/quick-temp/-/quick-temp-0.1.5.tgz#0d0d67f0fb6a589a0e142f90985f76cdbaf403f7"
+  dependencies:
+    mktemp "~0.3.4"
+    rimraf "~2.2.6"
+    underscore.string "~2.3.3"
+
+qunitjs@^1.20.0:
+  version "1.23.1"
+  resolved "https://registry.yarnpkg.com/qunitjs/-/qunitjs-1.23.1.tgz#1971cf97ac9be01a64d2315508d2e48e6fd4e719"
+
+randomatic@^1.1.3:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/randomatic/-/randomatic-1.1.6.tgz#110dcabff397e9dcff7c0789ccc0a49adf1ec5bb"
+  dependencies:
+    is-number "^2.0.2"
+    kind-of "^3.0.2"
+
+range-parser@~1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.0.tgz#f49be6b487894ddc40dcc94a322f611092e00d5e"
+
+raw-body@~2.1.5:
+  version "2.1.7"
+  resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.1.7.tgz#adfeace2e4fb3098058014d08c072dcc59758774"
+  dependencies:
+    bytes "2.4.0"
+    iconv-lite "0.4.13"
+    unpipe "1.0.0"
+
+read-installed@~4.0.3:
+  version "4.0.3"
+  resolved "https://registry.yarnpkg.com/read-installed/-/read-installed-4.0.3.tgz#ff9b8b67f187d1e4c29b9feb31f6b223acd19067"
+  dependencies:
+    debuglog "^1.0.1"
+    read-package-json "^2.0.0"
+    readdir-scoped-modules "^1.0.0"
+    semver "2 || 3 || 4 || 5"
+    slide "~1.1.3"
+    util-extend "^1.0.1"
+  optionalDependencies:
+    graceful-fs "^4.1.2"
+
+"read-package-json@1 || 2", read-package-json@^2.0.0, read-package-json@~2.0.2:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/read-package-json/-/read-package-json-2.0.5.tgz#f93a64e641529df68a08c64de46389e8a3f88845"
+  dependencies:
+    glob "^7.1.1"
+    json-parse-helpfulerror "^1.0.2"
+    normalize-package-data "^2.0.0"
+  optionalDependencies:
+    graceful-fs "^4.1.2"
+
+read@1, read@~1.0.1, read@~1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/read/-/read-1.0.7.tgz#b3da19bd052431a97671d44a42634adf710b40c4"
+  dependencies:
+    mute-stream "~0.0.4"
+
+"readable-stream@1 || 2", readable-stream@1.1, readable-stream@~1.1.13:
+  version "1.1.14"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.1.14.tgz#7cf4c54ef648e3813084c636dd2079e166c081d9"
+  dependencies:
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "0.0.1"
+    string_decoder "~0.10.x"
+
+readable-stream@^2, "readable-stream@^2.0.0 || ^1.1.13", readable-stream@^2.0.2, readable-stream@^2.0.6, readable-stream@^2.2.2:
+  version "2.2.9"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.2.9.tgz#cf78ec6f4a6d1eb43d26488cac97f042e74b7fc8"
+  dependencies:
+    buffer-shims "~1.0.0"
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "~1.0.0"
+    process-nextick-args "~1.0.6"
+    string_decoder "~1.0.0"
+    util-deprecate "~1.0.1"
+
+readable-stream@~1.0.2:
+  version "1.0.34"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.0.34.tgz#125820e34bc842d2f2aaafafe4c2916ee32c157c"
+  dependencies:
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "0.0.1"
+    string_decoder "~0.10.x"
+
+readable-stream@~2.0.5:
+  version "2.0.6"
+  resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.0.6.tgz#8f90341e68a53ccc928788dacfcd11b36eb9b78e"
+  dependencies:
+    core-util-is "~1.0.0"
+    inherits "~2.0.1"
+    isarray "~1.0.0"
+    process-nextick-args "~1.0.6"
+    string_decoder "~0.10.x"
+    util-deprecate "~1.0.1"
+
+readdir-scoped-modules@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/readdir-scoped-modules/-/readdir-scoped-modules-1.0.2.tgz#9fafa37d286be5d92cbaebdee030dc9b5f406747"
+  dependencies:
+    debuglog "^1.0.1"
+    dezalgo "^1.0.0"
+    graceful-fs "^4.1.2"
+    once "^1.3.0"
+
+readline2@0.1.1, readline2@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/readline2/-/readline2-0.1.1.tgz#99443ba6e83b830ef3051bfd7dc241a82728d568"
+  dependencies:
+    mute-stream "0.0.4"
+    strip-ansi "^2.0.1"
+
+realize-package-specifier@~3.0.1:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/realize-package-specifier/-/realize-package-specifier-3.0.3.tgz#d0def882952b8de3f67eba5e91199661271f41f4"
+  dependencies:
+    dezalgo "^1.0.1"
+    npm-package-arg "^4.1.1"
+
+recast@0.10.33, recast@^0.10.10:
+  version "0.10.33"
+  resolved "https://registry.yarnpkg.com/recast/-/recast-0.10.33.tgz#942808f7aa016f1fa7142c461d7e5704aaa8d697"
+  dependencies:
+    ast-types "0.8.12"
+    esprima-fb "~15001.1001.0-dev-harmony-fb"
+    private "~0.1.5"
+    source-map "~0.5.0"
+
+recast@^0.11.17, recast@^0.11.3:
+  version "0.11.23"
+  resolved "https://registry.yarnpkg.com/recast/-/recast-0.11.23.tgz#451fd3004ab1e4df9b4e4b66376b2a21912462d3"
+  dependencies:
+    ast-types "0.9.6"
+    esprima "~3.1.0"
+    private "~0.1.5"
+    source-map "~0.5.0"
+
+redeyed@~0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/redeyed/-/redeyed-0.5.0.tgz#7ab000e60ee3875ac115d29edb32c1403c6c25d1"
+  dependencies:
+    esprima-fb "~12001.1.0-dev-harmony-fb"
+
+regenerate@^1.2.1:
+  version "1.3.2"
+  resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.3.2.tgz#d1941c67bad437e1be76433add5b385f95b19260"
+
+regenerator-runtime@^0.10.0:
+  version "0.10.5"
+  resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.10.5.tgz#336c3efc1220adcedda2c9fab67b5a7955a33658"
+
+regenerator-transform@0.9.11:
+  version "0.9.11"
+  resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.9.11.tgz#3a7d067520cb7b7176769eb5ff868691befe1283"
+  dependencies:
+    babel-runtime "^6.18.0"
+    babel-types "^6.19.0"
+    private "^0.1.6"
+
+regenerator@0.8.40:
+  version "0.8.40"
+  resolved "https://registry.yarnpkg.com/regenerator/-/regenerator-0.8.40.tgz#a0e457c58ebdbae575c9f8cd75127e93756435d8"
+  dependencies:
+    commoner "~0.10.3"
+    defs "~1.1.0"
+    esprima-fb "~15001.1001.0-dev-harmony-fb"
+    private "~0.1.5"
+    recast "0.10.33"
+    through "~2.3.8"
+
+regex-cache@^0.4.2:
+  version "0.4.3"
+  resolved "https://registry.yarnpkg.com/regex-cache/-/regex-cache-0.4.3.tgz#9b1a6c35d4d0dfcef5711ae651e8e9d3d7114145"
+  dependencies:
+    is-equal-shallow "^0.1.3"
+    is-primitive "^2.0.0"
+
+regexpu-core@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-2.0.0.tgz#49d038837b8dcf8bfa5b9a42139938e6ea2ae240"
+  dependencies:
+    regenerate "^1.2.1"
+    regjsgen "^0.2.0"
+    regjsparser "^0.1.4"
+
+regexpu@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/regexpu/-/regexpu-1.3.0.tgz#e534dc991a9e5846050c98de6d7dd4a55c9ea16d"
+  dependencies:
+    esprima "^2.6.0"
+    recast "^0.10.10"
+    regenerate "^1.2.1"
+    regjsgen "^0.2.0"
+    regjsparser "^0.1.4"
+
+regjsgen@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.2.0.tgz#6c016adeac554f75823fe37ac05b92d5a4edb1f7"
+
+regjsparser@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.1.5.tgz#7ee8f84dc6fa792d3fd0ae228d24bd949ead205c"
+  dependencies:
+    jsesc "~0.5.0"
+
+remove-trailing-separator@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/remove-trailing-separator/-/remove-trailing-separator-1.0.1.tgz#615ebb96af559552d4bf4057c8436d486ab63cc4"
+
+repeat-element@^1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/repeat-element/-/repeat-element-1.1.2.tgz#ef089a178d1483baae4d93eb98b4f9e4e11d990a"
+
+repeat-string@^1.5.2:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637"
+
+repeating@^1.1.0, repeating@^1.1.2:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/repeating/-/repeating-1.1.3.tgz#3d4114218877537494f97f77f9785fab810fa4ac"
+  dependencies:
+    is-finite "^1.0.0"
+
+repeating@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/repeating/-/repeating-2.0.1.tgz#5214c53a926d3552707527fbab415dbc08d06dda"
+  dependencies:
+    is-finite "^1.0.0"
+
+request@2, request@^2.27.0, request@^2.47.0, request@^2.72.0:
+  version "2.81.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.81.0.tgz#c6928946a0e06c5f8d6f8a9333469ffda46298a0"
+  dependencies:
+    aws-sign2 "~0.6.0"
+    aws4 "^1.2.1"
+    caseless "~0.12.0"
+    combined-stream "~1.0.5"
+    extend "~3.0.0"
+    forever-agent "~0.6.1"
+    form-data "~2.1.1"
+    har-validator "~4.2.1"
+    hawk "~3.1.3"
+    http-signature "~1.1.0"
+    is-typedarray "~1.0.0"
+    isstream "~0.1.2"
+    json-stringify-safe "~5.0.1"
+    mime-types "~2.1.7"
+    oauth-sign "~0.8.1"
+    performance-now "^0.2.0"
+    qs "~6.4.0"
+    safe-buffer "^5.0.1"
+    stringstream "~0.0.4"
+    tough-cookie "~2.3.0"
+    tunnel-agent "^0.6.0"
+    uuid "^3.0.0"
+
+request@~2.65.0:
+  version "2.65.0"
+  resolved "https://registry.yarnpkg.com/request/-/request-2.65.0.tgz#cc1a3bc72b96254734fc34296da322f9486ddeba"
+  dependencies:
+    aws-sign2 "~0.6.0"
+    bl "~1.0.0"
+    caseless "~0.11.0"
+    combined-stream "~1.0.5"
+    extend "~3.0.0"
+    forever-agent "~0.6.1"
+    form-data "~1.0.0-rc3"
+    har-validator "~2.0.2"
+    hawk "~3.1.0"
+    http-signature "~0.11.0"
+    isstream "~0.1.2"
+    json-stringify-safe "~5.0.1"
+    mime-types "~2.1.7"
+    node-uuid "~1.4.3"
+    oauth-sign "~0.8.0"
+    qs "~5.2.0"
+    stringstream "~0.0.4"
+    tough-cookie "~2.2.0"
+    tunnel-agent "~0.4.1"
+
+requires-port@1.x.x:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/requires-port/-/requires-port-1.0.0.tgz#925d2601d39ac485e091cf0da5c6e694dc3dcaff"
+
+resolve@^1.1.2, resolve@^1.1.6:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.3.3.tgz#655907c3469a8680dc2de3a275a8fdd69691f0e5"
+  dependencies:
+    path-parse "^1.0.5"
+
+retry@^0.8.0, retry@~0.8.0:
+  version "0.8.0"
+  resolved "https://registry.yarnpkg.com/retry/-/retry-0.8.0.tgz#2367628dc0edb247b1eab649dc53ac8628ac2d5f"
+
+right-align@^0.1.1:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/right-align/-/right-align-0.1.3.tgz#61339b722fe6a3515689210d24e14c96148613ef"
+  dependencies:
+    align-text "^0.1.1"
+
+rimraf@2, rimraf@^2.2.8, rimraf@^2.3.4, rimraf@^2.4.3, rimraf@^2.4.4, rimraf@^2.5.2, rimraf@^2.5.3, rimraf@^2.6.1:
+  version "2.6.1"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.1.tgz#c2338ec643df7a1b7fe5c54fa86f57428a55f33d"
+  dependencies:
+    glob "^7.0.5"
+
+rimraf@~2.2.6:
+  version "2.2.8"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.2.8.tgz#e439be2aaee327321952730f99a8929e4fc50582"
+
+rimraf@~2.4.3:
+  version "2.4.5"
+  resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.4.5.tgz#ee710ce5d93a8fdb856fb5ea8ff0e2d75934b2da"
+  dependencies:
+    glob "^6.0.1"
+
+rsvp@^3.0.14, rsvp@^3.0.16, rsvp@^3.0.17, rsvp@^3.0.18, rsvp@^3.0.21, rsvp@^3.0.6, rsvp@^3.1.0:
+  version "3.5.0"
+  resolved "https://registry.yarnpkg.com/rsvp/-/rsvp-3.5.0.tgz#a62c573a4ae4e1dfd0697ebc6242e79c681eaa34"
+
+rsvp@~3.0.6:
+  version "3.0.21"
+  resolved "https://registry.yarnpkg.com/rsvp/-/rsvp-3.0.21.tgz#49c588fe18ef293bcd0ab9f4e6756e6ac433359f"
+
+rsvp@~3.2.1:
+  version "3.2.1"
+  resolved "https://registry.yarnpkg.com/rsvp/-/rsvp-3.2.1.tgz#07cb4a5df25add9e826ebc67dcc9fd89db27d84a"
+
+safe-buffer@^5.0.1:
+  version "5.0.1"
+  resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.0.1.tgz#d263ca54696cd8a306b5ca6551e92de57918fbe7"
+
+sane@^1.1.1:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/sane/-/sane-1.6.0.tgz#9610c452307a135d29c1fdfe2547034180c46775"
+  dependencies:
+    anymatch "^1.3.0"
+    exec-sh "^0.2.0"
+    fb-watchman "^1.8.0"
+    minimatch "^3.0.2"
+    minimist "^1.1.1"
+    walker "~1.0.5"
+    watch "~0.10.0"
+
+sanitize-filename@^1.5.3:
+  version "1.6.1"
+  resolved "https://registry.yarnpkg.com/sanitize-filename/-/sanitize-filename-1.6.1.tgz#612da1c96473fa02dccda92dcd5b4ab164a6772a"
+  dependencies:
+    truncate-utf8-bytes "^1.0.0"
+
+"semver@2 >=2.2.1 || 3.x || 4 || 5", "semver@2 || 3 || 4 || 5", "semver@2.x || 3.x || 4 || 5", "semver@4 || 5", "semver@^2.3.0 || 3.x || 4 || 5", semver@^5.1.0, semver@^5.3.0:
+  version "5.3.0"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-5.3.0.tgz#9b2ce5d3de02d17c6012ad326aa6b4d0cf54f94f"
+
+semver@^4.1.0, semver@^4.3.1:
+  version "4.3.6"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-4.3.6.tgz#300bc6e0e86374f7ba61068b5b1ecd57fc6532da"
+
+semver@~5.0.3:
+  version "5.0.3"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-5.0.3.tgz#77466de589cd5d3c95f138aa78bc569a3cb5d27a"
+
+send@0.15.1:
+  version "0.15.1"
+  resolved "https://registry.yarnpkg.com/send/-/send-0.15.1.tgz#8a02354c26e6f5cca700065f5f0cdeba90ec7b5f"
+  dependencies:
+    debug "2.6.1"
+    depd "~1.1.0"
+    destroy "~1.0.4"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    etag "~1.8.0"
+    fresh "0.5.0"
+    http-errors "~1.6.1"
+    mime "1.3.4"
+    ms "0.7.2"
+    on-finished "~2.3.0"
+    range-parser "~1.2.0"
+    statuses "~1.3.1"
+
+serve-static@1.12.1:
+  version "1.12.1"
+  resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.12.1.tgz#7443a965e3ced647aceb5639fa06bf4d1bbe0039"
+  dependencies:
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    parseurl "~1.3.1"
+    send "0.15.1"
+
+set-blocking@~2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7"
+
+setprototypeof@1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.0.3.tgz#66567e37043eeb4f04d91bd658c0cbefb55b8e04"
+
+sha@~2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/sha/-/sha-2.0.1.tgz#6030822fbd2c9823949f8f72ed6411ee5cf25aae"
+  dependencies:
+    graceful-fs "^4.1.2"
+    readable-stream "^2.0.2"
+
+shebang-command@^1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-1.2.0.tgz#44aac65b695b03398968c39f363fee5deafdf1ea"
+  dependencies:
+    shebang-regex "^1.0.0"
+
+shebang-regex@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-1.0.0.tgz#da42f49740c0b42db2ca9728571cb190c98efea3"
+
+shelljs@0.3.x:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/shelljs/-/shelljs-0.3.0.tgz#3596e6307a781544f591f37da618360f31db57b1"
+
+shellwords@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/shellwords/-/shellwords-0.1.0.tgz#66afd47b6a12932d9071cbfd98a52e785cd0ba14"
+
+sigmund@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/sigmund/-/sigmund-1.0.1.tgz#3ff21f198cad2175f9f3b781853fd94d0d19b590"
+
+signal-exit@^3.0.0:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.2.tgz#b5fdc08f1287ea1178628e415e25132b73646c6d"
+
+silent-error@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/silent-error/-/silent-error-1.0.1.tgz#71b7d503d1c6f94882b51b56be879b113cb4822c"
+  dependencies:
+    debug "^2.2.0"
+
+simple-fmt@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/simple-fmt/-/simple-fmt-0.1.0.tgz#191bf566a59e6530482cb25ab53b4a8dc85c3a6b"
+
+simple-is@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/simple-is/-/simple-is-0.2.0.tgz#2abb75aade39deb5cc815ce10e6191164850baf0"
+
+slash@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/slash/-/slash-1.0.0.tgz#c41f2f6c39fc16d1cd17ad4b5d896114ae470d55"
+
+slide@^1.1.3, slide@^1.1.5, slide@~1.1.3, slide@~1.1.6:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/slide/-/slide-1.1.6.tgz#56eb027d65b4d2dce6cb2e2d32c4d4afc9e1d707"
+
+sntp@1.x.x:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/sntp/-/sntp-1.0.9.tgz#6541184cc90aeea6c6e7b35e2659082443c66198"
+  dependencies:
+    hoek "2.x.x"
+
+socket.io-adapter@0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/socket.io-adapter/-/socket.io-adapter-0.5.0.tgz#cb6d4bb8bec81e1078b99677f9ced0046066bb8b"
+  dependencies:
+    debug "2.3.3"
+    socket.io-parser "2.3.1"
+
+socket.io-client@1.6.0:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/socket.io-client/-/socket.io-client-1.6.0.tgz#5b668f4f771304dfeed179064708386fa6717853"
+  dependencies:
+    backo2 "1.0.2"
+    component-bind "1.0.0"
+    component-emitter "1.2.1"
+    debug "2.3.3"
+    engine.io-client "1.8.0"
+    has-binary "0.1.7"
+    indexof "0.0.1"
+    object-component "0.0.3"
+    parseuri "0.0.5"
+    socket.io-parser "2.3.1"
+    to-array "0.1.4"
+
+socket.io-parser@2.3.1:
+  version "2.3.1"
+  resolved "https://registry.yarnpkg.com/socket.io-parser/-/socket.io-parser-2.3.1.tgz#dd532025103ce429697326befd64005fcfe5b4a0"
+  dependencies:
+    component-emitter "1.1.2"
+    debug "2.2.0"
+    isarray "0.0.1"
+    json3 "3.3.2"
+
+socket.io@1.6.0:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/socket.io/-/socket.io-1.6.0.tgz#3e40d932637e6bd923981b25caf7c53e83b6e2e1"
+  dependencies:
+    debug "2.3.3"
+    engine.io "1.8.0"
+    has-binary "0.1.7"
+    object-assign "4.1.0"
+    socket.io-adapter "0.5.0"
+    socket.io-client "1.6.0"
+    socket.io-parser "2.3.1"
+
+sorted-object@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/sorted-object/-/sorted-object-1.0.0.tgz#5d1f4f9c1fb2cd48965967304e212eb44cfb6d05"
+
+source-map-support@^0.2.10:
+  version "0.2.10"
+  resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.2.10.tgz#ea5a3900a1c1cb25096a0ae8cc5c2b4b10ded3dc"
+  dependencies:
+    source-map "0.1.32"
+
+source-map-support@^0.4.2:
+  version "0.4.15"
+  resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.4.15.tgz#03202df65c06d2bd8c7ec2362a193056fef8d3b1"
+  dependencies:
+    source-map "^0.5.6"
+
+source-map-url@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/source-map-url/-/source-map-url-0.3.0.tgz#7ecaf13b57bcd09da8a40c5d269db33799d4aaf9"
+
+source-map@0.1.32:
+  version "0.1.32"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.32.tgz#c8b6c167797ba4740a8ea33252162ff08591b266"
+  dependencies:
+    amdefine ">=0.0.4"
+
+source-map@0.4.x, source-map@^0.4.2, source-map@^0.4.4:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.4.4.tgz#eba4f5da9c0dc999de68032d8b4f76173652036b"
+  dependencies:
+    amdefine ">=0.0.4"
+
+source-map@^0.5.0, source-map@^0.5.3, source-map@^0.5.6, source-map@~0.5.0, source-map@~0.5.1:
+  version "0.5.6"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.6.tgz#75ce38f52bf0733c5a7f0c118d81334a2bb5f412"
+
+spawn-args@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/spawn-args/-/spawn-args-0.2.0.tgz#fb7d0bd1d70fd4316bd9e3dec389e65f9d6361bb"
+
+spawnback@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/spawnback/-/spawnback-1.0.0.tgz#f73662f7e54d95367eca74d6426c677dd7ea686f"
+
+spdx-correct@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-1.0.2.tgz#4b3073d933ff51f3912f03ac5519498a4150db40"
+  dependencies:
+    spdx-license-ids "^1.0.2"
+
+spdx-expression-parse@~1.0.0:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-1.0.4.tgz#9bdf2f20e1f40ed447fbe273266191fced51626c"
+
+spdx-license-ids@*:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-2.0.1.tgz#02017bcc3534ee4ffef6d58d20e7d3e9a1c3c8ec"
+
+spdx-license-ids@^1.0.0, spdx-license-ids@^1.0.2:
+  version "1.2.2"
+  resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-1.2.2.tgz#c9df7a3424594ade6bd11900d596696dc06bac57"
+
+spdx@~0.4.1:
+  version "0.4.3"
+  resolved "https://registry.yarnpkg.com/spdx/-/spdx-0.4.3.tgz#ab373c3fcf7b84ffd8fdeb0592d24ff0d14812e4"
+  dependencies:
+    spdx-license-ids "^1.0.0"
+
+sprintf-js@~1.0.2:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c"
+
+sri-toolbox@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/sri-toolbox/-/sri-toolbox-0.2.0.tgz#a7fea5c3fde55e675cf1c8c06f3ebb5c2935835e"
+
+sshpk@^1.7.0:
+  version "1.13.0"
+  resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.13.0.tgz#ff2a3e4fd04497555fed97b39a0fd82fafb3a33c"
+  dependencies:
+    asn1 "~0.2.3"
+    assert-plus "^1.0.0"
+    dashdash "^1.12.0"
+    getpass "^0.1.1"
+  optionalDependencies:
+    bcrypt-pbkdf "^1.0.0"
+    ecc-jsbn "~0.1.1"
+    jodid25519 "^1.0.0"
+    jsbn "~0.1.0"
+    tweetnacl "~0.14.0"
+
+stable@~0.1.3:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/stable/-/stable-0.1.6.tgz#910f5d2aed7b520c6e777499c1f32e139fdecb10"
+
+statuses@1, "statuses@>= 1.3.1 < 2", statuses@~1.3.1:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.3.1.tgz#faf51b9eb74aaef3b3acf4ad5f61abf24cb7b93e"
+
+string-width@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3"
+  dependencies:
+    code-point-at "^1.0.0"
+    is-fullwidth-code-point "^1.0.0"
+    strip-ansi "^3.0.0"
+
+string_decoder@~0.10.x:
+  version "0.10.31"
+  resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-0.10.31.tgz#62e203bc41766c6c28c9fc84301dab1c5310fa94"
+
+string_decoder@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.0.0.tgz#f06f41157b664d86069f84bdbdc9b0d8ab281667"
+  dependencies:
+    buffer-shims "~1.0.0"
+
+stringmap@~0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/stringmap/-/stringmap-0.2.2.tgz#556c137b258f942b8776f5b2ef582aa069d7d1b1"
+
+stringset@~0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/stringset/-/stringset-0.2.1.tgz#ef259c4e349344377fcd1c913dd2e848c9c042b5"
+
+stringstream@~0.0.4:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/stringstream/-/stringstream-0.0.5.tgz#4e484cd4de5a0bbbee18e46307710a8a81621878"
+
+strip-ansi@*, strip-ansi@^3.0.0, strip-ansi@^3.0.1:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf"
+  dependencies:
+    ansi-regex "^2.0.0"
+
+strip-ansi@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-0.3.0.tgz#25f48ea22ca79187f3174a4db8759347bb126220"
+  dependencies:
+    ansi-regex "^0.2.1"
+
+strip-ansi@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-2.0.1.tgz#df62c1aa94ed2f114e1d0f21fd1d50482b79a60e"
+  dependencies:
+    ansi-regex "^1.0.0"
+
+strip-ansi@~0.1.0:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-0.1.1.tgz#39e8a98d044d150660abe4a6808acf70bb7bc991"
+
+strip-bom@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-2.0.0.tgz#6219a85616520491f35788bdbf1447a99c7e6b0e"
+  dependencies:
+    is-utf8 "^0.2.0"
+
+strip-json-comments@1.0.x:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-1.0.4.tgz#1e15fbcac97d3ee99bf2d73b4c656b082bbafb91"
+
+styled_string@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/styled_string/-/styled_string-0.0.1.tgz#d22782bd81295459bc4f1df18c4bad8e94dd124a"
+
+sum-up@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/sum-up/-/sum-up-1.0.3.tgz#1c661f667057f63bcb7875aa1438bc162525156e"
+  dependencies:
+    chalk "^1.0.0"
+
+supports-color@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-0.2.0.tgz#d92de2694eb3f67323973d7ae3d8b55b4c22190a"
+
+supports-color@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7"
+
+symlink-or-copy@^1.0.0, symlink-or-copy@^1.0.1, symlink-or-copy@^1.1.8:
+  version "1.1.8"
+  resolved "https://registry.yarnpkg.com/symlink-or-copy/-/symlink-or-copy-1.1.8.tgz#cabe61e0010c1c023c173b25ee5108b37f4b4aa3"
+
+tap-parser@^5.1.0:
+  version "5.3.3"
+  resolved "https://registry.yarnpkg.com/tap-parser/-/tap-parser-5.3.3.tgz#53ec8a90f275d6fff43f169e56a679502a741185"
+  dependencies:
+    events-to-array "^1.0.1"
+    js-yaml "^3.2.7"
+  optionalDependencies:
+    readable-stream "^2"
+
+tar@^1.0.0:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/tar/-/tar-1.0.3.tgz#15bcdab244fa4add44e4244a0176edb8aa9a2b44"
+  dependencies:
+    block-stream "*"
+    fstream "^1.0.2"
+    inherits "2"
+
+tar@~2.2.1:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/tar/-/tar-2.2.1.tgz#8e4d2a256c0e2185c6b18ad694aec968b83cb1d1"
+  dependencies:
+    block-stream "*"
+    fstream "^1.0.2"
+    inherits "2"
+
+temp@0.8.3:
+  version "0.8.3"
+  resolved "https://registry.yarnpkg.com/temp/-/temp-0.8.3.tgz#e0c6bc4d26b903124410e4fed81103014dfc1f59"
+  dependencies:
+    os-tmpdir "^1.0.0"
+    rimraf "~2.2.6"
+
+testem@^1.3.0:
+  version "1.16.0"
+  resolved "https://registry.yarnpkg.com/testem/-/testem-1.16.0.tgz#3933040b5d5b5fbdb6a2b1e7032e511b54a05867"
+  dependencies:
+    backbone "^1.1.2"
+    bluebird "^3.4.6"
+    charm "^1.0.0"
+    commander "^2.6.0"
+    consolidate "^0.14.0"
+    cross-spawn "^5.1.0"
+    express "^4.10.7"
+    fireworm "^0.7.0"
+    glob "^7.0.4"
+    http-proxy "^1.13.1"
+    js-yaml "^3.2.5"
+    lodash.assignin "^4.1.0"
+    lodash.clonedeep "^4.4.1"
+    lodash.find "^4.5.1"
+    lodash.uniqby "^4.7.0"
+    mkdirp "^0.5.1"
+    mustache "^2.2.1"
+    node-notifier "^5.0.1"
+    npmlog "^4.0.0"
+    printf "^0.2.3"
+    rimraf "^2.4.4"
+    socket.io "1.6.0"
+    spawn-args "^0.2.0"
+    styled_string "0.0.1"
+    tap-parser "^5.1.0"
+    xmldom "^0.1.19"
+
+text-table@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4"
+
+"textextensions@1 || 2":
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/textextensions/-/textextensions-2.1.0.tgz#1be0dc2a0dc244d44be8a09af6a85afb93c4dbc3"
+
+through@^2.3.6, through@~2.3.4, through@~2.3.8:
+  version "2.3.8"
+  resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5"
+
+timers-ext@0.1:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/timers-ext/-/timers-ext-0.1.2.tgz#61cc47a76c1abd3195f14527f978d58ae94c5204"
+  dependencies:
+    es5-ext "~0.10.14"
+    next-tick "1"
+
+tiny-lr@0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/tiny-lr/-/tiny-lr-0.2.1.tgz#b3fdba802e5d56a33c2f6f10794b32e477ac729d"
+  dependencies:
+    body-parser "~1.14.0"
+    debug "~2.2.0"
+    faye-websocket "~0.10.0"
+    livereload-js "^2.2.0"
+    parseurl "~1.3.0"
+    qs "~5.1.0"
+
+tmp@0.0.28:
+  version "0.0.28"
+  resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.28.tgz#172735b7f614ea7af39664fa84cf0de4e515d120"
+  dependencies:
+    os-tmpdir "~1.0.1"
+
+tmpl@1.0.x:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/tmpl/-/tmpl-1.0.4.tgz#23640dd7b42d00433911140820e5cf440e521dd1"
+
+to-array@0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/to-array/-/to-array-0.1.4.tgz#17e6c11f73dd4f3d74cda7a4ff3238e9ad9bf890"
+
+to-fast-properties@^1.0.0, to-fast-properties@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-1.0.2.tgz#f3f5c0c3ba7299a7ef99427e44633257ade43320"
+
+tough-cookie@~2.2.0:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.2.2.tgz#c83a1830f4e5ef0b93ef2a3488e724f8de016ac7"
+
+tough-cookie@~2.3.0:
+  version "2.3.2"
+  resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.3.2.tgz#f081f76e4c85720e6c37a5faced737150d84072a"
+  dependencies:
+    punycode "^1.4.1"
+
+tree-sync@^1.0.0:
+  version "1.2.2"
+  resolved "https://registry.yarnpkg.com/tree-sync/-/tree-sync-1.2.2.tgz#2cf76b8589f59ffedb58db5a3ac7cb013d0158b7"
+  dependencies:
+    debug "^2.2.0"
+    fs-tree-diff "^0.5.6"
+    mkdirp "^0.5.1"
+    quick-temp "^0.1.5"
+    walk-sync "^0.2.7"
+
+trim-right@^1.0.0, trim-right@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/trim-right/-/trim-right-1.0.1.tgz#cb2e1203067e0c8de1f614094b9fe45704ea6003"
+
+truncate-utf8-bytes@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/truncate-utf8-bytes/-/truncate-utf8-bytes-1.0.2.tgz#405923909592d56f78a5818434b0b78489ca5f2b"
+  dependencies:
+    utf8-byte-length "^1.0.1"
+
+try-resolve@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/try-resolve/-/try-resolve-1.0.1.tgz#cfde6fabd72d63e5797cfaab873abbe8e700e912"
+
+tryor@~0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/tryor/-/tryor-0.1.2.tgz#8145e4ca7caff40acde3ccf946e8b8bb75b4172b"
+
+tunnel-agent@^0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd"
+  dependencies:
+    safe-buffer "^5.0.1"
+
+tunnel-agent@~0.4.1:
+  version "0.4.3"
+  resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.4.3.tgz#6373db76909fe570e08d73583365ed828a74eeeb"
+
+tweetnacl@^0.14.3, tweetnacl@~0.14.0:
+  version "0.14.5"
+  resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64"
+
+type-is@~1.6.10, type-is@~1.6.14:
+  version "1.6.15"
+  resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.15.tgz#cab10fb4909e441c82842eafe1ad646c81804410"
+  dependencies:
+    media-typer "0.3.0"
+    mime-types "~2.1.15"
+
+typedarray@^0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777"
+
+uc.micro@^1.0.0, uc.micro@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/uc.micro/-/uc.micro-1.0.3.tgz#7ed50d5e0f9a9fb0a573379259f2a77458d50192"
+
+uglify-js@^2.6, uglify-js@^2.7.0:
+  version "2.8.22"
+  resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-2.8.22.tgz#d54934778a8da14903fa29a326fb24c0ab51a1a0"
+  dependencies:
+    source-map "~0.5.1"
+    yargs "~3.10.0"
+  optionalDependencies:
+    uglify-to-browserify "~1.0.0"
+
+uglify-to-browserify@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/uglify-to-browserify/-/uglify-to-browserify-1.0.2.tgz#6e0924d6bda6b5afe349e39a6d632850a0f882b7"
+
+uid-number@0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/uid-number/-/uid-number-0.0.6.tgz#0ea10e8035e8eb5b8e4449f06da1c730663baa81"
+
+ultron@1.0.x:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/ultron/-/ultron-1.0.2.tgz#ace116ab557cd197386a4e88f4685378c8b2e4fa"
+
+umask@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/umask/-/umask-1.1.0.tgz#f29cebf01df517912bb58ff9c4e50fde8e33320d"
+
+underscore.string@~2.3.3:
+  version "2.3.3"
+  resolved "https://registry.yarnpkg.com/underscore.string/-/underscore.string-2.3.3.tgz#71c08bf6b428b1133f37e78fa3a21c82f7329b0d"
+
+underscore@>=1.8.3:
+  version "1.8.3"
+  resolved "https://registry.yarnpkg.com/underscore/-/underscore-1.8.3.tgz#4f3fb53b106e6097fcf9cb4109f2a5e9bdfa5022"
+
+unpipe@1.0.0, unpipe@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec"
+
+user-home@^1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/user-home/-/user-home-1.1.1.tgz#2b5be23a32b63a7c9deb8d0f28d485724a3df190"
+
+utf8-byte-length@^1.0.1:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/utf8-byte-length/-/utf8-byte-length-1.0.4.tgz#f45f150c4c66eee968186505ab93fcbb8ad6bf61"
+
+util-deprecate@~1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf"
+
+util-extend@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/util-extend/-/util-extend-1.0.3.tgz#a7c216d267545169637b3b6edc6ca9119e2ff93f"
+
+utils-merge@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.0.tgz#0294fb922bb9375153541c4f7096231f287c8af8"
+
+uuid@^2.0.1:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/uuid/-/uuid-2.0.3.tgz#67e2e863797215530dff318e5bf9dcebfd47b21a"
+
+uuid@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.0.1.tgz#6544bba2dfda8c1cf17e629a3a305e2bb1fee6c1"
+
+validate-npm-package-license@^3.0.1, validate-npm-package-license@~3.0.1:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.1.tgz#2804babe712ad3379459acfbe24746ab2c303fbc"
+  dependencies:
+    spdx-correct "~1.0.0"
+    spdx-expression-parse "~1.0.0"
+
+validate-npm-package-name@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/validate-npm-package-name/-/validate-npm-package-name-3.0.0.tgz#5fa912d81eb7d0c74afc140de7317f0ca7df437e"
+  dependencies:
+    builtins "^1.0.3"
+
+validate-npm-package-name@~2.2.2:
+  version "2.2.2"
+  resolved "https://registry.yarnpkg.com/validate-npm-package-name/-/validate-npm-package-name-2.2.2.tgz#f65695b22f7324442019a3c7fa39a6e7fd299085"
+  dependencies:
+    builtins "0.0.7"
+
+vary@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.1.tgz#67535ebb694c1d52257457984665323f587e8d37"
+
+verror@1.3.6:
+  version "1.3.6"
+  resolved "https://registry.yarnpkg.com/verror/-/verror-1.3.6.tgz#cff5df12946d297d2baaefaa2689e25be01c005c"
+  dependencies:
+    extsprintf "1.0.2"
+
+walk-sync@^0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/walk-sync/-/walk-sync-0.1.3.tgz#8a07261a00bda6cfb1be25e9f100fad57546f583"
+
+walk-sync@^0.2.0, walk-sync@^0.2.5, walk-sync@^0.2.6, walk-sync@^0.2.7:
+  version "0.2.7"
+  resolved "https://registry.yarnpkg.com/walk-sync/-/walk-sync-0.2.7.tgz#b49be4ee6867657aeb736978b56a29d10fa39969"
+  dependencies:
+    ensure-posix-path "^1.0.0"
+    matcher-collection "^1.0.0"
+
+walk-sync@^0.3.0, walk-sync@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/walk-sync/-/walk-sync-0.3.1.tgz#558a16aeac8c0db59c028b73c66f397684ece465"
+  dependencies:
+    ensure-posix-path "^1.0.0"
+    matcher-collection "^1.0.0"
+
+walker@~1.0.5:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/walker/-/walker-1.0.7.tgz#2f7f9b8fd10d677262b18a884e28d19618e028fb"
+  dependencies:
+    makeerror "1.0.x"
+
+watch@~0.10.0:
+  version "0.10.0"
+  resolved "https://registry.yarnpkg.com/watch/-/watch-0.10.0.tgz#77798b2da0f9910d595f1ace5b0c2258521f21dc"
+
+wcwidth@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/wcwidth/-/wcwidth-1.0.1.tgz#f0b0dcf915bc5ff1528afadb2c0e17b532da2fe8"
+  dependencies:
+    defaults "^1.0.3"
+
+websocket-driver@>=0.5.1:
+  version "0.6.5"
+  resolved "https://registry.yarnpkg.com/websocket-driver/-/websocket-driver-0.6.5.tgz#5cb2556ceb85f4373c6d8238aa691c8454e13a36"
+  dependencies:
+    websocket-extensions ">=0.1.1"
+
+websocket-extensions@>=0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/websocket-extensions/-/websocket-extensions-0.1.1.tgz#76899499c184b6ef754377c2dbb0cd6cb55d29e7"
+
+which@1, which@^1.2.12, which@^1.2.9, which@~1.2.0:
+  version "1.2.14"
+  resolved "https://registry.yarnpkg.com/which/-/which-1.2.14.tgz#9a87c4378f03e827cecaf1acdf56c736c01c14e5"
+  dependencies:
+    isexe "^2.0.0"
+
+wide-align@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.0.tgz#40edde802a71fea1f070da3e62dcda2e7add96ad"
+  dependencies:
+    string-width "^1.0.1"
+
+window-size@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.0.tgz#5438cd2ea93b202efa3a19fe8887aee7c94f9c9d"
+
+window-size@^0.1.2:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.4.tgz#f8e1aa1ee5a53ec5bf151ffa09742a6ad7697876"
+
+wordwrap@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.2.tgz#b79669bb42ecb409f83d583cad52ca17eaa1643f"
+
+wordwrap@~0.0.2:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.3.tgz#a3d5da6cd5c0bc0008d37234bbaf1bed63059107"
+
+wrappy@1, wrappy@~1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f"
+
+write-file-atomic@^1.1.2:
+  version "1.3.4"
+  resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-1.3.4.tgz#f807a4f0b1d9e913ae7a48112e6cc3af1991b45f"
+  dependencies:
+    graceful-fs "^4.1.11"
+    imurmurhash "^0.1.4"
+    slide "^1.1.5"
+
+write-file-atomic@~1.1.3:
+  version "1.1.4"
+  resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-1.1.4.tgz#b1f52dc2e8dc0e3cb04d187a25f758a38a90ca3b"
+  dependencies:
+    graceful-fs "^4.1.2"
+    imurmurhash "^0.1.4"
+    slide "^1.1.5"
+
+ws@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ws/-/ws-1.1.1.tgz#082ddb6c641e85d4bb451f03d52f06eabdb1f018"
+  dependencies:
+    options ">=0.0.5"
+    ultron "1.0.x"
+
+wtf-8@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/wtf-8/-/wtf-8-1.0.0.tgz#392d8ba2d0f1c34d1ee2d630f15d0efb68e1048a"
+
+xdg-basedir@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-2.0.0.tgz#edbc903cc385fc04523d966a335504b5504d1bd2"
+  dependencies:
+    os-homedir "^1.0.0"
+
+xmldom@^0.1.19:
+  version "0.1.27"
+  resolved "https://registry.yarnpkg.com/xmldom/-/xmldom-0.1.27.tgz#d501f97b3bdb403af8ef9ecc20573187aadac0e9"
+
+xmlhttprequest-ssl@1.5.3:
+  version "1.5.3"
+  resolved "https://registry.yarnpkg.com/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.5.3.tgz#185a888c04eca46c3e4070d99f7b49de3528992d"
+
+xtend@^4.0.0:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.1.tgz#a5c6d532be656e23db820efb943a1f04998d63af"
+
+y18n@^3.2.0:
+  version "3.2.1"
+  resolved "https://registry.yarnpkg.com/y18n/-/y18n-3.2.1.tgz#6d15fba884c08679c0d77e88e7759e811e07fa41"
+
+yallist@^2.0.0:
+  version "2.1.2"
+  resolved "https://registry.yarnpkg.com/yallist/-/yallist-2.1.2.tgz#1c11f9218f076089a47dd512f93c6699a6a81d52"
+
+yam@0.0.18:
+  version "0.0.18"
+  resolved "https://registry.yarnpkg.com/yam/-/yam-0.0.18.tgz#e5cab771f0fc80ca599814cb9c269cb8bff00e2c"
+  dependencies:
+    findup "^0.1.5"
+    fs-extra "^0.16.3"
+    lodash.merge "^3.0.2"
+
+yargs@~3.10.0:
+  version "3.10.0"
+  resolved "https://registry.yarnpkg.com/yargs/-/yargs-3.10.0.tgz#f7ee7bd857dd7c1d2d38c0e74efbd681d1431fd1"
+  dependencies:
+    camelcase "^1.0.2"
+    cliui "^2.1.0"
+    decamelize "^1.0.0"
+    window-size "0.1.0"
+
+yargs@~3.27.0:
+  version "3.27.0"
+  resolved "https://registry.yarnpkg.com/yargs/-/yargs-3.27.0.tgz#21205469316e939131d59f2da0c6d7f98221ea40"
+  dependencies:
+    camelcase "^1.2.1"
+    cliui "^2.1.0"
+    decamelize "^1.0.0"
+    os-locale "^1.4.0"
+    window-size "^0.1.2"
+    y18n "^3.2.0"
+
+yeast@0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/yeast/-/yeast-0.1.2.tgz#008e06d8094320c372dbc2f8ed76a0ca6c8ac419"
diff --git a/dev-support/config-utils/diff_stack_properties.py b/dev-support/config-utils/diff_stack_properties.py
new file mode 100644
index 0000000..beef608
--- /dev/null
+++ b/dev-support/config-utils/diff_stack_properties.py
@@ -0,0 +1,154 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import sys
+import os
+
+import xml.etree.ElementTree as ET
+
+COMMON = "common-services"
+STACKS = "stacks"
+CONFIG_DIR = "configuration"
+SERVICES_DIR = "services"
+
+SYMLINKS_TXT = "symlinks.txt"
+VERSIONS_TXT = "versions.txt"
+
+
+def main():
+  """ Parse arguments from user, check that all required args are passed in and start work."""
+
+  if len(sys.argv) != 3:
+    print "usage: diff_stack_properties.py [first_stack_dir] [second_stack_dir]"
+    sys.exit(-1)
+
+  args = sys.argv[1:]
+
+  if not os.path.exists(args[0]) or not os.path.exists(args[1]):
+    print "usage: diff_stack_properties.py [first_stack_dir] [second_stack_dir]"
+    sys.exit(-1)
+
+  args = sys.argv[1:]
+
+  do_work(args)
+
+
+def do_work(args):
+  """
+  Compare stack dirs.
+  :param args:
+  """
+  new_stacks = args[0]
+  old_stacks = args[1]
+
+  compare_common(new_stacks, old_stacks)
+
+  compare_stacks(new_stacks, old_stacks)
+
+
+def compare_stacks(new_stacks, old_stacks):
+  print "#############[{}]#############".format(STACKS)
+  for stack in [stack for stack in os.listdir(os.path.join(new_stacks, STACKS)) if
+                os.path.isdir(os.path.join(new_stacks, STACKS, stack))]:
+    for version in os.listdir(os.path.join(new_stacks, STACKS, stack)):
+      if os.path.exists(os.path.join(new_stacks, STACKS, stack, version, CONFIG_DIR)):
+        diff = compare_config_dirs(os.path.join(new_stacks, STACKS, stack, version, CONFIG_DIR),
+                                   os.path.join(old_stacks, STACKS, stack, version, CONFIG_DIR))
+        if diff != "":
+          print "#############{}.{}#############".format(stack, version)
+          print diff
+      if os.path.exists(os.path.join(new_stacks, STACKS, stack, version, SERVICES_DIR)):
+        print "#############{}.{}#############".format(stack, version)
+        for service_name in os.listdir(os.path.join(new_stacks, STACKS, stack, version, SERVICES_DIR)):
+          new_configs_dir = os.path.join(new_stacks, STACKS, stack, version, SERVICES_DIR, service_name, CONFIG_DIR)
+          old_configs_dir = os.path.join(old_stacks, STACKS, stack, version, SERVICES_DIR, service_name, CONFIG_DIR)
+          diff = compare_config_dirs(new_configs_dir, old_configs_dir)
+          if diff != "":
+            print "=========={}==========".format(service_name)
+            print diff
+
+
+def compare_common(new_stacks, old_stacks):
+  print "#############[{}]#############".format(COMMON)
+  for service_name in os.listdir(os.path.join(new_stacks, COMMON)):
+    for version in os.listdir(os.path.join(new_stacks, COMMON, service_name)):
+      new_configs_dir = os.path.join(new_stacks, COMMON, service_name, version, CONFIG_DIR)
+      old_configs_dir = os.path.join(old_stacks, COMMON, service_name, version, CONFIG_DIR)
+      diff = compare_config_dirs(new_configs_dir, old_configs_dir)
+      if diff != "":
+        print "=========={}.{}==========".format(service_name, version)
+        print diff
+
+
+def compare_config_dirs(new_configs_dir, old_configs_dir):
+  result = ""
+  if os.path.exists(old_configs_dir) and os.path.exists(new_configs_dir):
+    for file_name in os.listdir(new_configs_dir):
+      old_file_name = os.path.join(old_configs_dir, file_name)
+      if os.path.exists(old_file_name):
+        result += compare_config_files(os.path.join(new_configs_dir, file_name),
+                                       os.path.join(old_configs_dir, file_name),
+                                       file_name)
+      else:
+        result += "new file {}\n".format(file_name)
+  else:
+    if os.path.exists(old_configs_dir) or os.path.exists(new_configs_dir):
+      if not os.path.exists(new_configs_dir):
+        result += "deleted configuration dir {}\n".format(new_configs_dir)
+      if not os.path.exists(old_configs_dir):
+        result += "new configuration dir {} with files {} \n".format(new_configs_dir, os.listdir(new_configs_dir))
+  return result
+
+
+def compare_config_files(new_configs, old_configs, file_name):
+  result = ""
+  if os.path.exists(old_configs):
+    old_configs_tree = ET.ElementTree(file=old_configs)
+    new_configs_tree = ET.ElementTree(file=new_configs)
+    for new_property in new_configs_tree.findall("property"):
+      name = new_property.find("name").text
+      if new_property.find("value") is not None:
+        value = new_property.find("value").text
+      if new_property.find("on-ambari-upgrade") is not None:
+        on_amb_upgrade = new_property.find("on-ambari-upgrade").get("add")
+      else:
+        on_amb_upgrade = None
+
+      deleted = None
+      old_deleted = None
+      if new_property.find("deleted") is not None:
+        deleted = new_property.find("deleted").text
+      old_property = old_configs_tree.find("property[name='{}']".format(name))
+
+      if on_amb_upgrade == "true" and old_property is None:
+        result += "add {}\n".format(name)
+      else:
+        if old_property is not None and old_property.find("deleted") is not None:
+          old_deleted = old_property.find("deleted").text
+        if deleted == "true" and old_deleted != "true":
+          result += "deleted {}\n".format(name)
+    if result != "":
+      result = "------{}------\n".format(file_name) + result
+  else:
+    result += "{} not exists\n".format(old_configs, )
+  return result
+
+
+if __name__ == "__main__":
+  main()
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index 9b6d83e..6ed8322 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -267,7 +267,7 @@
     echo "Top-level trunk compilation is broken?"
     JIRA_COMMENT="$JIRA_COMMENT
 
-    {color:red}-1 patch{color}.  Top-level trunk compilation may be broken."
+    {color:red}-1 patch{color}.  Top-level [trunk compilation|$BUILD_URL/artifact/patch-work/trunkJavacWarnings.txt] may be broken."
     return 1
   fi
 
@@ -390,7 +390,7 @@
   if [[ $? != 0 ]] ; then
     JIRA_COMMENT="$JIRA_COMMENT
 
-    {color:red}-1 javac{color}.  The patch appears to cause the build to fail."
+    {color:red}-1 javac{color}.  The patch appears to cause the [build to fail|$BUILD_URL/artifact/patch-work/patchJavacWarnings.txt]."
     return 2
   fi
   ### Compare trunk and patch javac warning numbers
@@ -665,6 +665,7 @@
     done
 
     test_logfile=$PATCH_DIR/testrun_${module_suffix}.txt
+    test_logfile_url=$BUILD_URL/artifact/patch-work/testrun_${module_suffix}.txt
     echo "  Running tests in $module"
 
     # Skip java tests if this module did not have changes to java files
@@ -691,7 +692,7 @@
 ${module_failed_tests}"
     fi
     if [[ $test_build_result != 0 && -z "$module_failed_tests" && -z "$module_test_timeouts" ]] ; then
-      failed_test_builds="$module $failed_test_builds"
+      failed_test_builds="[$module|$test_logfile_url] $failed_test_builds"
     fi
     cd -
   done
@@ -995,8 +996,6 @@
 fi
 checkInjectSystemFaults
 (( RESULT = RESULT + $? ))
-JIRA_COMMENT_FOOTER="Test results: $BUILD_URL/testReport/
-$JIRA_COMMENT_FOOTER"
 
 submitJiraComment $RESULT
 cleanupAndExit $RESULT
diff --git a/docs/pom.xml b/docs/pom.xml
index 225359f..6553c83 100644
--- a/docs/pom.xml
+++ b/docs/pom.xml
@@ -1089,6 +1089,18 @@
             </organization>
         </developer>       
         <developer>
+            <id>vsairam</id>
+            <name>Venkata Sairam Lanka</name>
+            <email>vsairam@apache.org</email>
+            <timezone>+5.5</timezone>
+            <roles>
+                <role>Committer</role>
+            </roles>
+            <organization>
+                Hortonworks
+            </organization>
+        </developer>
+        <developer>
             <id>xiwang</id>
             <name>Xi Wang</name>
             <email>xiwang@apache.org</email>
diff --git a/pom.xml b/pom.xml
index 1cf13f4..e0d0373 100644
--- a/pom.xml
+++ b/pom.xml
@@ -399,6 +399,7 @@
             <exclude>ambari-metrics/**/.*/**</exclude>
             <!-- generated DDL-->
             <exclude>**/createDDL.jdbc</exclude>
+            <exclude>**/yarn.lock</exclude>
           </excludes>
         </configuration>
         <executions>